]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ixgbe/ixgbe_main.c
ixgbe: update version number for ixgbe
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
8c47eaa7 4 Copyright(c) 1999 - 2010 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
60127865 37#include <linux/pkt_sched.h>
9a799d71 38#include <linux/ipv6.h>
5a0e3ad6 39#include <linux/slab.h>
9a799d71
AK
40#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
eacd73f7 44#include <scsi/fc/fc_fcoe.h>
9a799d71
AK
45
46#include "ixgbe.h"
47#include "ixgbe_common.h"
ee5f784a 48#include "ixgbe_dcb_82599.h"
1cdd1ec8 49#include "ixgbe_sriov.h"
9a799d71
AK
50
51char ixgbe_driver_name[] = "ixgbe";
9c8eb720 52static const char ixgbe_driver_string[] =
e8e9f696 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
9a799d71 54
9a2d09cf 55#define DRV_VERSION "3.0.12-k2"
9c8eb720 56const char ixgbe_driver_version[] = DRV_VERSION;
8c47eaa7 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
9a799d71
AK
58
59static const struct ixgbe_info *ixgbe_info_tbl[] = {
b4617240 60 [board_82598] = &ixgbe_82598_info,
e8e26350 61 [board_82599] = &ixgbe_82599_info,
fe15e8e1 62 [board_X540] = &ixgbe_X540_info,
9a799d71
AK
63};
64
65/* ixgbe_pci_tbl - PCI Device ID Table
66 *
67 * Wildcard entries (PCI_ANY_ID) should come last
68 * Last entry must be all 0s
69 *
70 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
71 * Class, Class Mask, private data (not used) }
72 */
a3aa1884 73static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
1e336d0f
DS
74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
75 board_82598 },
9a799d71 76 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
3957d63d 77 board_82598 },
9a799d71 78 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
3957d63d 79 board_82598 },
0befdb3e
JB
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
81 board_82598 },
3845bec0
PWJ
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
83 board_82598 },
9a799d71 84 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
3957d63d 85 board_82598 },
8d792cd9
JB
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
87 board_82598 },
c4900be0
DS
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
89 board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
91 board_82598 },
b95f5fcb
JB
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
93 board_82598 },
c4900be0
DS
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
95 board_82598 },
2f21bdd3
DS
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
97 board_82598 },
e8e26350
PW
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
99 board_82599 },
1fcf03e6
PWJ
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
101 board_82599 },
74757d49
DS
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
103 board_82599 },
e8e26350
PW
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
105 board_82599 },
38ad1c8e
DS
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
107 board_82599 },
dbfec662
DS
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
109 board_82599 },
8911184f
PWJ
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
111 board_82599 },
119fc60a
MC
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
113 board_82599 },
312eb931
DS
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
115 board_82599 },
b93a2226
DS
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
117 board_82599 },
9a799d71
AK
118
119 /* required last entry */
120 {0, }
121};
122MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
123
5dd2d332 124#ifdef CONFIG_IXGBE_DCA
bd0362dd 125static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
e8e9f696 126 void *p);
bd0362dd
JC
127static struct notifier_block dca_notifier = {
128 .notifier_call = ixgbe_notify_dca,
129 .next = NULL,
130 .priority = 0
131};
132#endif
133
1cdd1ec8
GR
134#ifdef CONFIG_PCI_IOV
135static unsigned int max_vfs;
136module_param(max_vfs, uint, 0);
e8e9f696
JP
137MODULE_PARM_DESC(max_vfs,
138 "Maximum number of virtual functions to allocate per physical function");
1cdd1ec8
GR
139#endif /* CONFIG_PCI_IOV */
140
9a799d71
AK
141MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
142MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
143MODULE_LICENSE("GPL");
144MODULE_VERSION(DRV_VERSION);
145
146#define DEFAULT_DEBUG_LEVEL_SHIFT 3
147
1cdd1ec8
GR
148static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
149{
150 struct ixgbe_hw *hw = &adapter->hw;
151 u32 gcr;
152 u32 gpie;
153 u32 vmdctl;
154
155#ifdef CONFIG_PCI_IOV
156 /* disable iov and allow time for transactions to clear */
157 pci_disable_sriov(adapter->pdev);
158#endif
159
160 /* turn off device IOV mode */
161 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
162 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
163 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
164 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
165 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
166 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
167
168 /* set default pool back to 0 */
169 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
170 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
171 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
172
173 /* take a breather then clean up driver data */
174 msleep(100);
e8e9f696
JP
175
176 kfree(adapter->vfinfo);
1cdd1ec8
GR
177 adapter->vfinfo = NULL;
178
179 adapter->num_vfs = 0;
180 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
181}
182
dcd79aeb
TI
183struct ixgbe_reg_info {
184 u32 ofs;
185 char *name;
186};
187
188static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
189
190 /* General Registers */
191 {IXGBE_CTRL, "CTRL"},
192 {IXGBE_STATUS, "STATUS"},
193 {IXGBE_CTRL_EXT, "CTRL_EXT"},
194
195 /* Interrupt Registers */
196 {IXGBE_EICR, "EICR"},
197
198 /* RX Registers */
199 {IXGBE_SRRCTL(0), "SRRCTL"},
200 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
201 {IXGBE_RDLEN(0), "RDLEN"},
202 {IXGBE_RDH(0), "RDH"},
203 {IXGBE_RDT(0), "RDT"},
204 {IXGBE_RXDCTL(0), "RXDCTL"},
205 {IXGBE_RDBAL(0), "RDBAL"},
206 {IXGBE_RDBAH(0), "RDBAH"},
207
208 /* TX Registers */
209 {IXGBE_TDBAL(0), "TDBAL"},
210 {IXGBE_TDBAH(0), "TDBAH"},
211 {IXGBE_TDLEN(0), "TDLEN"},
212 {IXGBE_TDH(0), "TDH"},
213 {IXGBE_TDT(0), "TDT"},
214 {IXGBE_TXDCTL(0), "TXDCTL"},
215
216 /* List Terminator */
217 {}
218};
219
220
221/*
222 * ixgbe_regdump - register printout routine
223 */
224static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
225{
226 int i = 0, j = 0;
227 char rname[16];
228 u32 regs[64];
229
230 switch (reginfo->ofs) {
231 case IXGBE_SRRCTL(0):
232 for (i = 0; i < 64; i++)
233 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
234 break;
235 case IXGBE_DCA_RXCTRL(0):
236 for (i = 0; i < 64; i++)
237 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
238 break;
239 case IXGBE_RDLEN(0):
240 for (i = 0; i < 64; i++)
241 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
242 break;
243 case IXGBE_RDH(0):
244 for (i = 0; i < 64; i++)
245 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
246 break;
247 case IXGBE_RDT(0):
248 for (i = 0; i < 64; i++)
249 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
250 break;
251 case IXGBE_RXDCTL(0):
252 for (i = 0; i < 64; i++)
253 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
254 break;
255 case IXGBE_RDBAL(0):
256 for (i = 0; i < 64; i++)
257 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
258 break;
259 case IXGBE_RDBAH(0):
260 for (i = 0; i < 64; i++)
261 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
262 break;
263 case IXGBE_TDBAL(0):
264 for (i = 0; i < 64; i++)
265 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
266 break;
267 case IXGBE_TDBAH(0):
268 for (i = 0; i < 64; i++)
269 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
270 break;
271 case IXGBE_TDLEN(0):
272 for (i = 0; i < 64; i++)
273 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
274 break;
275 case IXGBE_TDH(0):
276 for (i = 0; i < 64; i++)
277 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
278 break;
279 case IXGBE_TDT(0):
280 for (i = 0; i < 64; i++)
281 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
282 break;
283 case IXGBE_TXDCTL(0):
284 for (i = 0; i < 64; i++)
285 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
286 break;
287 default:
c7689578 288 pr_info("%-15s %08x\n", reginfo->name,
dcd79aeb
TI
289 IXGBE_READ_REG(hw, reginfo->ofs));
290 return;
291 }
292
293 for (i = 0; i < 8; i++) {
294 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
c7689578 295 pr_err("%-15s", rname);
dcd79aeb 296 for (j = 0; j < 8; j++)
c7689578
JP
297 pr_cont(" %08x", regs[i*8+j]);
298 pr_cont("\n");
dcd79aeb
TI
299 }
300
301}
302
303/*
304 * ixgbe_dump - Print registers, tx-rings and rx-rings
305 */
306static void ixgbe_dump(struct ixgbe_adapter *adapter)
307{
308 struct net_device *netdev = adapter->netdev;
309 struct ixgbe_hw *hw = &adapter->hw;
310 struct ixgbe_reg_info *reginfo;
311 int n = 0;
312 struct ixgbe_ring *tx_ring;
313 struct ixgbe_tx_buffer *tx_buffer_info;
314 union ixgbe_adv_tx_desc *tx_desc;
315 struct my_u0 { u64 a; u64 b; } *u0;
316 struct ixgbe_ring *rx_ring;
317 union ixgbe_adv_rx_desc *rx_desc;
318 struct ixgbe_rx_buffer *rx_buffer_info;
319 u32 staterr;
320 int i = 0;
321
322 if (!netif_msg_hw(adapter))
323 return;
324
325 /* Print netdevice Info */
326 if (netdev) {
327 dev_info(&adapter->pdev->dev, "Net device Info\n");
c7689578 328 pr_info("Device Name state "
dcd79aeb 329 "trans_start last_rx\n");
c7689578
JP
330 pr_info("%-15s %016lX %016lX %016lX\n",
331 netdev->name,
332 netdev->state,
333 netdev->trans_start,
334 netdev->last_rx);
dcd79aeb
TI
335 }
336
337 /* Print Registers */
338 dev_info(&adapter->pdev->dev, "Register Dump\n");
c7689578 339 pr_info(" Register Name Value\n");
dcd79aeb
TI
340 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
341 reginfo->name; reginfo++) {
342 ixgbe_regdump(hw, reginfo);
343 }
344
345 /* Print TX Ring Summary */
346 if (!netdev || !netif_running(netdev))
347 goto exit;
348
349 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
c7689578 350 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
dcd79aeb
TI
351 for (n = 0; n < adapter->num_tx_queues; n++) {
352 tx_ring = adapter->tx_ring[n];
353 tx_buffer_info =
354 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
c7689578 355 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
dcd79aeb
TI
356 n, tx_ring->next_to_use, tx_ring->next_to_clean,
357 (u64)tx_buffer_info->dma,
358 tx_buffer_info->length,
359 tx_buffer_info->next_to_watch,
360 (u64)tx_buffer_info->time_stamp);
361 }
362
363 /* Print TX Rings */
364 if (!netif_msg_tx_done(adapter))
365 goto rx_ring_summary;
366
367 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
368
369 /* Transmit Descriptor Formats
370 *
371 * Advanced Transmit Descriptor
372 * +--------------------------------------------------------------+
373 * 0 | Buffer Address [63:0] |
374 * +--------------------------------------------------------------+
375 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
376 * +--------------------------------------------------------------+
377 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
378 */
379
380 for (n = 0; n < adapter->num_tx_queues; n++) {
381 tx_ring = adapter->tx_ring[n];
c7689578
JP
382 pr_info("------------------------------------\n");
383 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
384 pr_info("------------------------------------\n");
385 pr_info("T [desc] [address 63:0 ] "
dcd79aeb
TI
386 "[PlPOIdStDDt Ln] [bi->dma ] "
387 "leng ntw timestamp bi->skb\n");
388
389 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
31f05a2d 390 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
dcd79aeb
TI
391 tx_buffer_info = &tx_ring->tx_buffer_info[i];
392 u0 = (struct my_u0 *)tx_desc;
c7689578 393 pr_info("T [0x%03X] %016llX %016llX %016llX"
dcd79aeb
TI
394 " %04X %3X %016llX %p", i,
395 le64_to_cpu(u0->a),
396 le64_to_cpu(u0->b),
397 (u64)tx_buffer_info->dma,
398 tx_buffer_info->length,
399 tx_buffer_info->next_to_watch,
400 (u64)tx_buffer_info->time_stamp,
401 tx_buffer_info->skb);
402 if (i == tx_ring->next_to_use &&
403 i == tx_ring->next_to_clean)
c7689578 404 pr_cont(" NTC/U\n");
dcd79aeb 405 else if (i == tx_ring->next_to_use)
c7689578 406 pr_cont(" NTU\n");
dcd79aeb 407 else if (i == tx_ring->next_to_clean)
c7689578 408 pr_cont(" NTC\n");
dcd79aeb 409 else
c7689578 410 pr_cont("\n");
dcd79aeb
TI
411
412 if (netif_msg_pktdata(adapter) &&
413 tx_buffer_info->dma != 0)
414 print_hex_dump(KERN_INFO, "",
415 DUMP_PREFIX_ADDRESS, 16, 1,
416 phys_to_virt(tx_buffer_info->dma),
417 tx_buffer_info->length, true);
418 }
419 }
420
421 /* Print RX Rings Summary */
422rx_ring_summary:
423 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
c7689578 424 pr_info("Queue [NTU] [NTC]\n");
dcd79aeb
TI
425 for (n = 0; n < adapter->num_rx_queues; n++) {
426 rx_ring = adapter->rx_ring[n];
c7689578
JP
427 pr_info("%5d %5X %5X\n",
428 n, rx_ring->next_to_use, rx_ring->next_to_clean);
dcd79aeb
TI
429 }
430
431 /* Print RX Rings */
432 if (!netif_msg_rx_status(adapter))
433 goto exit;
434
435 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
436
437 /* Advanced Receive Descriptor (Read) Format
438 * 63 1 0
439 * +-----------------------------------------------------+
440 * 0 | Packet Buffer Address [63:1] |A0/NSE|
441 * +----------------------------------------------+------+
442 * 8 | Header Buffer Address [63:1] | DD |
443 * +-----------------------------------------------------+
444 *
445 *
446 * Advanced Receive Descriptor (Write-Back) Format
447 *
448 * 63 48 47 32 31 30 21 20 16 15 4 3 0
449 * +------------------------------------------------------+
450 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
451 * | Checksum Ident | | | | Type | Type |
452 * +------------------------------------------------------+
453 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
454 * +------------------------------------------------------+
455 * 63 48 47 32 31 20 19 0
456 */
457 for (n = 0; n < adapter->num_rx_queues; n++) {
458 rx_ring = adapter->rx_ring[n];
c7689578
JP
459 pr_info("------------------------------------\n");
460 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
461 pr_info("------------------------------------\n");
462 pr_info("R [desc] [ PktBuf A0] "
dcd79aeb
TI
463 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
464 "<-- Adv Rx Read format\n");
c7689578 465 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
dcd79aeb
TI
466 "[vl er S cks ln] ---------------- [bi->skb] "
467 "<-- Adv Rx Write-Back format\n");
468
469 for (i = 0; i < rx_ring->count; i++) {
470 rx_buffer_info = &rx_ring->rx_buffer_info[i];
31f05a2d 471 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
dcd79aeb
TI
472 u0 = (struct my_u0 *)rx_desc;
473 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
474 if (staterr & IXGBE_RXD_STAT_DD) {
475 /* Descriptor Done */
c7689578 476 pr_info("RWB[0x%03X] %016llX "
dcd79aeb
TI
477 "%016llX ---------------- %p", i,
478 le64_to_cpu(u0->a),
479 le64_to_cpu(u0->b),
480 rx_buffer_info->skb);
481 } else {
c7689578 482 pr_info("R [0x%03X] %016llX "
dcd79aeb
TI
483 "%016llX %016llX %p", i,
484 le64_to_cpu(u0->a),
485 le64_to_cpu(u0->b),
486 (u64)rx_buffer_info->dma,
487 rx_buffer_info->skb);
488
489 if (netif_msg_pktdata(adapter)) {
490 print_hex_dump(KERN_INFO, "",
491 DUMP_PREFIX_ADDRESS, 16, 1,
492 phys_to_virt(rx_buffer_info->dma),
493 rx_ring->rx_buf_len, true);
494
495 if (rx_ring->rx_buf_len
496 < IXGBE_RXBUFFER_2048)
497 print_hex_dump(KERN_INFO, "",
498 DUMP_PREFIX_ADDRESS, 16, 1,
499 phys_to_virt(
500 rx_buffer_info->page_dma +
501 rx_buffer_info->page_offset
502 ),
503 PAGE_SIZE/2, true);
504 }
505 }
506
507 if (i == rx_ring->next_to_use)
c7689578 508 pr_cont(" NTU\n");
dcd79aeb 509 else if (i == rx_ring->next_to_clean)
c7689578 510 pr_cont(" NTC\n");
dcd79aeb 511 else
c7689578 512 pr_cont("\n");
dcd79aeb
TI
513
514 }
515 }
516
517exit:
518 return;
519}
520
5eba3699
AV
521static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
522{
523 u32 ctrl_ext;
524
525 /* Let firmware take over control of h/w */
526 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 528 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699
AV
529}
530
531static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
532{
533 u32 ctrl_ext;
534
535 /* Let firmware know the driver has taken over */
536 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 538 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699 539}
9a799d71 540
e8e26350
PW
541/*
542 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
543 * @adapter: pointer to adapter struct
544 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
545 * @queue: queue to map the corresponding interrupt to
546 * @msix_vector: the vector to map to the corresponding queue
547 *
548 */
549static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
e8e9f696 550 u8 queue, u8 msix_vector)
9a799d71
AK
551{
552 u32 ivar, index;
e8e26350
PW
553 struct ixgbe_hw *hw = &adapter->hw;
554 switch (hw->mac.type) {
555 case ixgbe_mac_82598EB:
556 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
557 if (direction == -1)
558 direction = 0;
559 index = (((direction * 64) + queue) >> 2) & 0x1F;
560 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
561 ivar &= ~(0xFF << (8 * (queue & 0x3)));
562 ivar |= (msix_vector << (8 * (queue & 0x3)));
563 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
564 break;
565 case ixgbe_mac_82599EB:
b93a2226 566 case ixgbe_mac_X540:
e8e26350
PW
567 if (direction == -1) {
568 /* other causes */
569 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
570 index = ((queue & 1) * 8);
571 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
572 ivar &= ~(0xFF << index);
573 ivar |= (msix_vector << index);
574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
575 break;
576 } else {
577 /* tx or rx causes */
578 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
579 index = ((16 * (queue & 1)) + (8 * direction));
580 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
581 ivar &= ~(0xFF << index);
582 ivar |= (msix_vector << index);
583 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
584 break;
585 }
586 default:
587 break;
588 }
9a799d71
AK
589}
590
fe49f04a 591static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
e8e9f696 592 u64 qmask)
fe49f04a
AD
593{
594 u32 mask;
595
bd508178
AD
596 switch (adapter->hw.mac.type) {
597 case ixgbe_mac_82598EB:
fe49f04a
AD
598 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
bd508178
AD
600 break;
601 case ixgbe_mac_82599EB:
b93a2226 602 case ixgbe_mac_X540:
fe49f04a
AD
603 mask = (qmask & 0xFFFFFFFF);
604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
605 mask = (qmask >> 32);
606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
bd508178
AD
607 break;
608 default:
609 break;
fe49f04a
AD
610 }
611}
612
b6ec895e
AD
613void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
614 struct ixgbe_tx_buffer *tx_buffer_info)
9a799d71 615{
e5a43549
AD
616 if (tx_buffer_info->dma) {
617 if (tx_buffer_info->mapped_as_page)
b6ec895e 618 dma_unmap_page(tx_ring->dev,
e5a43549
AD
619 tx_buffer_info->dma,
620 tx_buffer_info->length,
1b507730 621 DMA_TO_DEVICE);
e5a43549 622 else
b6ec895e 623 dma_unmap_single(tx_ring->dev,
e5a43549
AD
624 tx_buffer_info->dma,
625 tx_buffer_info->length,
1b507730 626 DMA_TO_DEVICE);
e5a43549
AD
627 tx_buffer_info->dma = 0;
628 }
9a799d71
AK
629 if (tx_buffer_info->skb) {
630 dev_kfree_skb_any(tx_buffer_info->skb);
631 tx_buffer_info->skb = NULL;
632 }
44df32c5 633 tx_buffer_info->time_stamp = 0;
9a799d71
AK
634 /* tx_buffer_info must be completely set up in the transmit path */
635}
636
26f23d82 637/**
c84d324c
JF
638 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
639 * @adapter: driver private struct
640 * @index: reg idx of queue to query (0-127)
26f23d82 641 *
c84d324c
JF
642 * Helper function to determine the traffic index for a paticular
643 * register index.
26f23d82 644 *
c84d324c 645 * Returns : a tc index for use in range 0-7, or 0-3
26f23d82 646 */
c84d324c 647u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
26f23d82 648{
c84d324c
JF
649 int tc = -1;
650 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
26f23d82 651
c84d324c
JF
652 /* if DCB is not enabled the queues have no TC */
653 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
654 return tc;
26f23d82 655
c84d324c
JF
656 /* check valid range */
657 if (reg_idx >= adapter->hw.mac.max_tx_queues)
658 return tc;
659
660 switch (adapter->hw.mac.type) {
661 case ixgbe_mac_82598EB:
662 tc = reg_idx >> 2;
663 break;
664 default:
665 if (dcb_i != 4 && dcb_i != 8)
6837e895 666 break;
c84d324c
JF
667
668 /* if VMDq is enabled the lowest order bits determine TC */
669 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
670 IXGBE_FLAG_VMDQ_ENABLED)) {
671 tc = reg_idx & (dcb_i - 1);
672 break;
673 }
674
675 /*
676 * Convert the reg_idx into the correct TC. This bitmask
677 * targets the last full 32 ring traffic class and assigns
678 * it a value of 1. From there the rest of the rings are
679 * based on shifting the mask further up to include the
680 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
681 * will only ever be 8 or 4 and that reg_idx will never
682 * be greater then 128. The code without the power of 2
683 * optimizations would be:
684 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
685 */
686 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
687 tc >>= 9 - (reg_idx >> 5);
688 }
689
690 return tc;
691}
692
693static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
694{
695 struct ixgbe_hw *hw = &adapter->hw;
696 struct ixgbe_hw_stats *hwstats = &adapter->stats;
697 u32 data = 0;
698 u32 xoff[8] = {0};
699 int i;
700
701 if ((hw->fc.current_mode == ixgbe_fc_full) ||
702 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
703 switch (hw->mac.type) {
704 case ixgbe_mac_82598EB:
705 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
6837e895
PW
706 break;
707 default:
c84d324c
JF
708 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
709 }
710 hwstats->lxoffrxc += data;
711
712 /* refill credits (no tx hang) if we received xoff */
713 if (!data)
714 return;
715
716 for (i = 0; i < adapter->num_tx_queues; i++)
717 clear_bit(__IXGBE_HANG_CHECK_ARMED,
718 &adapter->tx_ring[i]->state);
719 return;
720 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
721 return;
722
723 /* update stats for each tc, only valid with PFC enabled */
724 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
725 switch (hw->mac.type) {
726 case ixgbe_mac_82598EB:
727 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
bd508178 728 break;
c84d324c
JF
729 default:
730 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
26f23d82 731 }
c84d324c
JF
732 hwstats->pxoffrxc[i] += xoff[i];
733 }
734
735 /* disarm tx queues that have received xoff frames */
736 for (i = 0; i < adapter->num_tx_queues; i++) {
737 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
738 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
739
740 if (xoff[tc])
741 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
26f23d82 742 }
26f23d82
YZ
743}
744
c84d324c 745static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
9a799d71 746{
c84d324c
JF
747 return ring->tx_stats.completed;
748}
749
750static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
751{
752 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
e01c31a5 753 struct ixgbe_hw *hw = &adapter->hw;
e01c31a5 754
c84d324c
JF
755 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
756 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
757
758 if (head != tail)
759 return (head < tail) ?
760 tail - head : (tail + ring->count - head);
761
762 return 0;
763}
764
765static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
766{
767 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
768 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
769 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
770 bool ret = false;
771
7d637bcc 772 clear_check_for_tx_hang(tx_ring);
c84d324c
JF
773
774 /*
775 * Check for a hung queue, but be thorough. This verifies
776 * that a transmit has been completed since the previous
777 * check AND there is at least one packet pending. The
778 * ARMED bit is set to indicate a potential hang. The
779 * bit is cleared if a pause frame is received to remove
780 * false hang detection due to PFC or 802.3x frames. By
781 * requiring this to fail twice we avoid races with
782 * pfc clearing the ARMED bit and conditions where we
783 * run the check_tx_hang logic with a transmit completion
784 * pending but without time to complete it yet.
785 */
786 if ((tx_done_old == tx_done) && tx_pending) {
787 /* make sure it is true for two checks in a row */
788 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
789 &tx_ring->state);
790 } else {
791 /* update completed stats and continue */
792 tx_ring->tx_stats.tx_done_old = tx_done;
793 /* reset the countdown */
794 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
9a799d71
AK
795 }
796
c84d324c 797 return ret;
9a799d71
AK
798}
799
b4617240
PW
800#define IXGBE_MAX_TXD_PWR 14
801#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
e092be60
AV
802
803/* Tx Descriptors needed, worst case */
804#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
805 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
806#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
b4617240 807 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
e092be60 808
e01c31a5
JB
809static void ixgbe_tx_timeout(struct net_device *netdev);
810
9a799d71
AK
811/**
812 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
fe49f04a 813 * @q_vector: structure containing interrupt and ring information
e01c31a5 814 * @tx_ring: tx ring to clean
9a799d71 815 **/
fe49f04a 816static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696 817 struct ixgbe_ring *tx_ring)
9a799d71 818{
fe49f04a 819 struct ixgbe_adapter *adapter = q_vector->adapter;
12207e49
PWJ
820 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
821 struct ixgbe_tx_buffer *tx_buffer_info;
e01c31a5 822 unsigned int total_bytes = 0, total_packets = 0;
b953799e 823 u16 i, eop, count = 0;
9a799d71
AK
824
825 i = tx_ring->next_to_clean;
12207e49 826 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 827 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
828
829 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
9a1a69ad 830 (count < tx_ring->work_limit)) {
12207e49 831 bool cleaned = false;
2d0bb1c1 832 rmb(); /* read buffer_info after eop_desc */
12207e49 833 for ( ; !cleaned; count++) {
31f05a2d 834 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71 835 tx_buffer_info = &tx_ring->tx_buffer_info[i];
8ad494b0
AD
836
837 tx_desc->wb.status = 0;
12207e49 838 cleaned = (i == eop);
9a799d71 839
8ad494b0
AD
840 i++;
841 if (i == tx_ring->count)
842 i = 0;
e01c31a5 843
8ad494b0
AD
844 if (cleaned && tx_buffer_info->skb) {
845 total_bytes += tx_buffer_info->bytecount;
846 total_packets += tx_buffer_info->gso_segs;
e092be60 847 }
e01c31a5 848
b6ec895e 849 ixgbe_unmap_and_free_tx_resource(tx_ring,
e8e9f696 850 tx_buffer_info);
e01c31a5 851 }
12207e49 852
c84d324c 853 tx_ring->tx_stats.completed++;
12207e49 854 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 855 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
856 }
857
9a799d71 858 tx_ring->next_to_clean = i;
b953799e
AD
859 tx_ring->total_bytes += total_bytes;
860 tx_ring->total_packets += total_packets;
861 u64_stats_update_begin(&tx_ring->syncp);
862 tx_ring->stats.packets += total_packets;
863 tx_ring->stats.bytes += total_bytes;
864 u64_stats_update_end(&tx_ring->syncp);
865
c84d324c
JF
866 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
867 /* schedule immediate reset if we believe we hung */
868 struct ixgbe_hw *hw = &adapter->hw;
869 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
870 e_err(drv, "Detected Tx Unit Hang\n"
871 " Tx Queue <%d>\n"
872 " TDH, TDT <%x>, <%x>\n"
873 " next_to_use <%x>\n"
874 " next_to_clean <%x>\n"
875 "tx_buffer_info[next_to_clean]\n"
876 " time_stamp <%lx>\n"
877 " jiffies <%lx>\n",
878 tx_ring->queue_index,
879 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
880 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
881 tx_ring->next_to_use, eop,
882 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
883
884 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
885
886 e_info(probe,
887 "tx hang %d detected on queue %d, resetting adapter\n",
888 adapter->tx_timeout_count + 1, tx_ring->queue_index);
889
b953799e 890 /* schedule immediate reset if we believe we hung */
b953799e
AD
891 ixgbe_tx_timeout(adapter->netdev);
892
893 /* the adapter is about to reset, no point in enabling stuff */
894 return true;
895 }
9a799d71 896
e092be60 897#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fc77dc3c 898 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
e8e9f696 899 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
e092be60
AV
900 /* Make sure that anybody stopping the queue after this
901 * sees the new next_to_clean.
902 */
903 smp_mb();
fc77dc3c 904 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
30eba97a 905 !test_bit(__IXGBE_DOWN, &adapter->state)) {
fc77dc3c 906 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 907 ++tx_ring->tx_stats.restart_queue;
30eba97a 908 }
e092be60 909 }
9a799d71 910
807540ba 911 return count < tx_ring->work_limit;
9a799d71
AK
912}
913
5dd2d332 914#ifdef CONFIG_IXGBE_DCA
bd0362dd 915static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
33cf09c9
AD
916 struct ixgbe_ring *rx_ring,
917 int cpu)
bd0362dd 918{
33cf09c9 919 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 920 u32 rxctrl;
33cf09c9
AD
921 u8 reg_idx = rx_ring->reg_idx;
922
923 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
924 switch (hw->mac.type) {
925 case ixgbe_mac_82598EB:
926 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
927 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
928 break;
929 case ixgbe_mac_82599EB:
b93a2226 930 case ixgbe_mac_X540:
33cf09c9
AD
931 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
932 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
933 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
934 break;
935 default:
936 break;
bd0362dd 937 }
33cf09c9
AD
938 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
939 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
940 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
941 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
942 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
943 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
bd0362dd
JC
944}
945
946static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
33cf09c9
AD
947 struct ixgbe_ring *tx_ring,
948 int cpu)
bd0362dd 949{
33cf09c9 950 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 951 u32 txctrl;
33cf09c9
AD
952 u8 reg_idx = tx_ring->reg_idx;
953
954 switch (hw->mac.type) {
955 case ixgbe_mac_82598EB:
956 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
957 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
958 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
959 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
960 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
961 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
962 break;
963 case ixgbe_mac_82599EB:
b93a2226 964 case ixgbe_mac_X540:
33cf09c9
AD
965 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
966 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
967 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
968 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
969 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
970 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
971 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
972 break;
973 default:
974 break;
975 }
976}
977
978static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
979{
980 struct ixgbe_adapter *adapter = q_vector->adapter;
bd0362dd 981 int cpu = get_cpu();
33cf09c9
AD
982 long r_idx;
983 int i;
bd0362dd 984
33cf09c9
AD
985 if (q_vector->cpu == cpu)
986 goto out_no_update;
987
988 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
989 for (i = 0; i < q_vector->txr_count; i++) {
990 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
991 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
992 r_idx + 1);
bd0362dd 993 }
33cf09c9
AD
994
995 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
996 for (i = 0; i < q_vector->rxr_count; i++) {
997 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
998 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
999 r_idx + 1);
1000 }
1001
1002 q_vector->cpu = cpu;
1003out_no_update:
bd0362dd
JC
1004 put_cpu();
1005}
1006
1007static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1008{
33cf09c9 1009 int num_q_vectors;
bd0362dd
JC
1010 int i;
1011
1012 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1013 return;
1014
e35ec126
AD
1015 /* always use CB2 mode, difference is masked in the CB driver */
1016 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1017
33cf09c9
AD
1018 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1019 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1020 else
1021 num_q_vectors = 1;
1022
1023 for (i = 0; i < num_q_vectors; i++) {
1024 adapter->q_vector[i]->cpu = -1;
1025 ixgbe_update_dca(adapter->q_vector[i]);
bd0362dd
JC
1026 }
1027}
1028
1029static int __ixgbe_notify_dca(struct device *dev, void *data)
1030{
c60fbb00 1031 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
bd0362dd
JC
1032 unsigned long event = *(unsigned long *)data;
1033
33cf09c9
AD
1034 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1035 return 0;
1036
bd0362dd
JC
1037 switch (event) {
1038 case DCA_PROVIDER_ADD:
96b0e0f6
JB
1039 /* if we're already enabled, don't do it again */
1040 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1041 break;
652f093f 1042 if (dca_add_requester(dev) == 0) {
96b0e0f6 1043 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
1044 ixgbe_setup_dca(adapter);
1045 break;
1046 }
1047 /* Fall Through since DCA is disabled. */
1048 case DCA_PROVIDER_REMOVE:
1049 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1050 dca_remove_requester(dev);
1051 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1052 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1053 }
1054 break;
1055 }
1056
652f093f 1057 return 0;
bd0362dd
JC
1058}
1059
5dd2d332 1060#endif /* CONFIG_IXGBE_DCA */
9a799d71
AK
1061/**
1062 * ixgbe_receive_skb - Send a completed packet up the stack
1063 * @adapter: board private structure
1064 * @skb: packet to send up
177db6ff
MC
1065 * @status: hardware indication of status of receive
1066 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1067 * @rx_desc: rx descriptor
9a799d71 1068 **/
78b6f4ce 1069static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
1070 struct sk_buff *skb, u8 status,
1071 struct ixgbe_ring *ring,
1072 union ixgbe_adv_rx_desc *rx_desc)
9a799d71 1073{
78b6f4ce
HX
1074 struct ixgbe_adapter *adapter = q_vector->adapter;
1075 struct napi_struct *napi = &q_vector->napi;
177db6ff
MC
1076 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1077 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
9a799d71 1078
f62bbb5e
JG
1079 if (is_vlan && (tag & VLAN_VID_MASK))
1080 __vlan_hwaccel_put_tag(skb, tag);
1081
1082 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1083 napi_gro_receive(napi, skb);
1084 else
1085 netif_rx(skb);
9a799d71
AK
1086}
1087
e59bd25d
AV
1088/**
1089 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1090 * @adapter: address of board private structure
1091 * @status_err: hardware indication of status of receive
1092 * @skb: skb currently being received and modified
1093 **/
9a799d71 1094static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
8bae1b2b
DS
1095 union ixgbe_adv_rx_desc *rx_desc,
1096 struct sk_buff *skb)
9a799d71 1097{
8bae1b2b
DS
1098 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
1099
bc8acf2c 1100 skb_checksum_none_assert(skb);
9a799d71 1101
712744be
JB
1102 /* Rx csum disabled */
1103 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
9a799d71 1104 return;
e59bd25d
AV
1105
1106 /* if IP and error */
1107 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
1108 (status_err & IXGBE_RXDADV_ERR_IPE)) {
9a799d71
AK
1109 adapter->hw_csum_rx_error++;
1110 return;
1111 }
e59bd25d
AV
1112
1113 if (!(status_err & IXGBE_RXD_STAT_L4CS))
1114 return;
1115
1116 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
8bae1b2b
DS
1117 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1118
1119 /*
1120 * 82599 errata, UDP frames with a 0 checksum can be marked as
1121 * checksum errors.
1122 */
1123 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1124 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1125 return;
1126
e59bd25d
AV
1127 adapter->hw_csum_rx_error++;
1128 return;
1129 }
1130
9a799d71 1131 /* It must be a TCP or UDP packet with a valid checksum */
e59bd25d 1132 skb->ip_summed = CHECKSUM_UNNECESSARY;
9a799d71
AK
1133}
1134
84ea2591 1135static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
e8e26350
PW
1136{
1137 /*
1138 * Force memory writes to complete before letting h/w
1139 * know there are new descriptors to fetch. (Only
1140 * applicable for weak-ordered memory model archs,
1141 * such as IA-64).
1142 */
1143 wmb();
84ea2591 1144 writel(val, rx_ring->tail);
e8e26350
PW
1145}
1146
9a799d71
AK
1147/**
1148 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
fc77dc3c
AD
1149 * @rx_ring: ring to place buffers on
1150 * @cleaned_count: number of buffers to replace
9a799d71 1151 **/
fc77dc3c 1152void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
9a799d71 1153{
9a799d71 1154 union ixgbe_adv_rx_desc *rx_desc;
3a581073 1155 struct ixgbe_rx_buffer *bi;
d5f398ed
AD
1156 struct sk_buff *skb;
1157 u16 i = rx_ring->next_to_use;
9a799d71 1158
fc77dc3c
AD
1159 /* do nothing if no valid netdev defined */
1160 if (!rx_ring->netdev)
1161 return;
1162
9a799d71 1163 while (cleaned_count--) {
31f05a2d 1164 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
d5f398ed
AD
1165 bi = &rx_ring->rx_buffer_info[i];
1166 skb = bi->skb;
9a799d71 1167
d5f398ed 1168 if (!skb) {
fc77dc3c 1169 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
d5f398ed 1170 rx_ring->rx_buf_len);
9a799d71 1171 if (!skb) {
5b7da515 1172 rx_ring->rx_stats.alloc_rx_buff_failed++;
9a799d71
AK
1173 goto no_buffers;
1174 }
d716a7d8
AD
1175 /* initialize queue mapping */
1176 skb_record_rx_queue(skb, rx_ring->queue_index);
d5f398ed 1177 bi->skb = skb;
d716a7d8 1178 }
9a799d71 1179
d716a7d8 1180 if (!bi->dma) {
b6ec895e 1181 bi->dma = dma_map_single(rx_ring->dev,
d5f398ed 1182 skb->data,
e8e9f696 1183 rx_ring->rx_buf_len,
1b507730 1184 DMA_FROM_DEVICE);
b6ec895e 1185 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
5b7da515 1186 rx_ring->rx_stats.alloc_rx_buff_failed++;
d5f398ed
AD
1187 bi->dma = 0;
1188 goto no_buffers;
1189 }
9a799d71 1190 }
d5f398ed 1191
7d637bcc 1192 if (ring_is_ps_enabled(rx_ring)) {
d5f398ed 1193 if (!bi->page) {
fc77dc3c 1194 bi->page = netdev_alloc_page(rx_ring->netdev);
d5f398ed 1195 if (!bi->page) {
5b7da515 1196 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1197 goto no_buffers;
1198 }
1199 }
1200
1201 if (!bi->page_dma) {
1202 /* use a half page if we're re-using */
1203 bi->page_offset ^= PAGE_SIZE / 2;
b6ec895e 1204 bi->page_dma = dma_map_page(rx_ring->dev,
d5f398ed
AD
1205 bi->page,
1206 bi->page_offset,
1207 PAGE_SIZE / 2,
1208 DMA_FROM_DEVICE);
b6ec895e 1209 if (dma_mapping_error(rx_ring->dev,
d5f398ed 1210 bi->page_dma)) {
5b7da515 1211 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1212 bi->page_dma = 0;
1213 goto no_buffers;
1214 }
1215 }
1216
1217 /* Refresh the desc even if buffer_addrs didn't change
1218 * because each write-back erases this info. */
3a581073
JB
1219 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1220 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9a799d71 1221 } else {
3a581073 1222 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
84418e3b 1223 rx_desc->read.hdr_addr = 0;
9a799d71
AK
1224 }
1225
1226 i++;
1227 if (i == rx_ring->count)
1228 i = 0;
9a799d71 1229 }
7c6e0a43 1230
9a799d71
AK
1231no_buffers:
1232 if (rx_ring->next_to_use != i) {
1233 rx_ring->next_to_use = i;
84ea2591 1234 ixgbe_release_rx_desc(rx_ring, i);
9a799d71
AK
1235 }
1236}
1237
c267fc16 1238static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
7c6e0a43 1239{
c267fc16
AD
1240 /* HW will not DMA in data larger than the given buffer, even if it
1241 * parses the (NFS, of course) header to be larger. In that case, it
1242 * fills the header buffer and spills the rest into the page.
1243 */
1244 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1245 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1246 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1247 if (hlen > IXGBE_RX_HDR_SIZE)
1248 hlen = IXGBE_RX_HDR_SIZE;
1249 return hlen;
7c6e0a43
JB
1250}
1251
f8212f97
AD
1252/**
1253 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1254 * @skb: pointer to the last skb in the rsc queue
1255 *
1256 * This function changes a queue full of hw rsc buffers into a completed
1257 * packet. It uses the ->prev pointers to find the first packet and then
1258 * turns it into the frag list owner.
1259 **/
aa80175a 1260static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
f8212f97
AD
1261{
1262 unsigned int frag_list_size = 0;
aa80175a 1263 unsigned int skb_cnt = 1;
f8212f97
AD
1264
1265 while (skb->prev) {
1266 struct sk_buff *prev = skb->prev;
1267 frag_list_size += skb->len;
1268 skb->prev = NULL;
1269 skb = prev;
aa80175a 1270 skb_cnt++;
f8212f97
AD
1271 }
1272
1273 skb_shinfo(skb)->frag_list = skb->next;
1274 skb->next = NULL;
1275 skb->len += frag_list_size;
1276 skb->data_len += frag_list_size;
1277 skb->truesize += frag_list_size;
aa80175a
AD
1278 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1279
f8212f97
AD
1280 return skb;
1281}
1282
aa80175a
AD
1283static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1284{
1285 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1286 IXGBE_RXDADV_RSCCNT_MASK);
1287}
43634e82 1288
c267fc16 1289static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
1290 struct ixgbe_ring *rx_ring,
1291 int *work_done, int work_to_do)
9a799d71 1292{
78b6f4ce 1293 struct ixgbe_adapter *adapter = q_vector->adapter;
9a799d71
AK
1294 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1295 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1296 struct sk_buff *skb;
d2f4fbe2 1297 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
c267fc16 1298 const int current_node = numa_node_id();
3d8fd385
YZ
1299#ifdef IXGBE_FCOE
1300 int ddp_bytes = 0;
1301#endif /* IXGBE_FCOE */
c267fc16
AD
1302 u32 staterr;
1303 u16 i;
1304 u16 cleaned_count = 0;
aa80175a 1305 bool pkt_is_rsc = false;
9a799d71
AK
1306
1307 i = rx_ring->next_to_clean;
31f05a2d 1308 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1309 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
9a799d71
AK
1310
1311 while (staterr & IXGBE_RXD_STAT_DD) {
7c6e0a43 1312 u32 upper_len = 0;
9a799d71 1313
3c945e5b 1314 rmb(); /* read descriptor and rx_buffer_info after status DD */
9a799d71 1315
c267fc16
AD
1316 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1317
9a799d71 1318 skb = rx_buffer_info->skb;
9a799d71 1319 rx_buffer_info->skb = NULL;
c267fc16 1320 prefetch(skb->data);
9a799d71 1321
c267fc16 1322 if (ring_is_rsc_enabled(rx_ring))
aa80175a 1323 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
c267fc16
AD
1324
1325 /* if this is a skb from previous receive DMA will be 0 */
21fa4e66 1326 if (rx_buffer_info->dma) {
c267fc16 1327 u16 hlen;
aa80175a 1328 if (pkt_is_rsc &&
c267fc16
AD
1329 !(staterr & IXGBE_RXD_STAT_EOP) &&
1330 !skb->prev) {
43634e82
MC
1331 /*
1332 * When HWRSC is enabled, delay unmapping
1333 * of the first packet. It carries the
1334 * header information, HW may still
1335 * access the header after the writeback.
1336 * Only unmap it when EOP is reached
1337 */
e8171aaa 1338 IXGBE_RSC_CB(skb)->delay_unmap = true;
43634e82 1339 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
e8171aaa 1340 } else {
b6ec895e 1341 dma_unmap_single(rx_ring->dev,
e8e9f696
JP
1342 rx_buffer_info->dma,
1343 rx_ring->rx_buf_len,
1344 DMA_FROM_DEVICE);
e8171aaa 1345 }
4f57ca6e 1346 rx_buffer_info->dma = 0;
c267fc16
AD
1347
1348 if (ring_is_ps_enabled(rx_ring)) {
1349 hlen = ixgbe_get_hlen(rx_desc);
1350 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1351 } else {
1352 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1353 }
1354
1355 skb_put(skb, hlen);
1356 } else {
1357 /* assume packet split since header is unmapped */
1358 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
9a799d71
AK
1359 }
1360
1361 if (upper_len) {
b6ec895e
AD
1362 dma_unmap_page(rx_ring->dev,
1363 rx_buffer_info->page_dma,
1364 PAGE_SIZE / 2,
1365 DMA_FROM_DEVICE);
9a799d71
AK
1366 rx_buffer_info->page_dma = 0;
1367 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
e8e9f696
JP
1368 rx_buffer_info->page,
1369 rx_buffer_info->page_offset,
1370 upper_len);
762f4c57 1371
c267fc16
AD
1372 if ((page_count(rx_buffer_info->page) == 1) &&
1373 (page_to_nid(rx_buffer_info->page) == current_node))
762f4c57 1374 get_page(rx_buffer_info->page);
c267fc16
AD
1375 else
1376 rx_buffer_info->page = NULL;
9a799d71
AK
1377
1378 skb->len += upper_len;
1379 skb->data_len += upper_len;
1380 skb->truesize += upper_len;
1381 }
1382
1383 i++;
1384 if (i == rx_ring->count)
1385 i = 0;
9a799d71 1386
31f05a2d 1387 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1388 prefetch(next_rxd);
9a799d71 1389 cleaned_count++;
f8212f97 1390
aa80175a 1391 if (pkt_is_rsc) {
f8212f97
AD
1392 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1393 IXGBE_RXDADV_NEXTP_SHIFT;
1394 next_buffer = &rx_ring->rx_buffer_info[nextp];
f8212f97
AD
1395 } else {
1396 next_buffer = &rx_ring->rx_buffer_info[i];
1397 }
1398
c267fc16 1399 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
7d637bcc 1400 if (ring_is_ps_enabled(rx_ring)) {
f8212f97
AD
1401 rx_buffer_info->skb = next_buffer->skb;
1402 rx_buffer_info->dma = next_buffer->dma;
1403 next_buffer->skb = skb;
1404 next_buffer->dma = 0;
1405 } else {
1406 skb->next = next_buffer->skb;
1407 skb->next->prev = skb;
1408 }
5b7da515 1409 rx_ring->rx_stats.non_eop_descs++;
9a799d71
AK
1410 goto next_desc;
1411 }
1412
aa80175a
AD
1413 if (skb->prev) {
1414 skb = ixgbe_transform_rsc_queue(skb);
1415 /* if we got here without RSC the packet is invalid */
1416 if (!pkt_is_rsc) {
1417 __pskb_trim(skb, 0);
1418 rx_buffer_info->skb = skb;
1419 goto next_desc;
1420 }
1421 }
c267fc16
AD
1422
1423 if (ring_is_rsc_enabled(rx_ring)) {
1424 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1425 dma_unmap_single(rx_ring->dev,
1426 IXGBE_RSC_CB(skb)->dma,
1427 rx_ring->rx_buf_len,
1428 DMA_FROM_DEVICE);
1429 IXGBE_RSC_CB(skb)->dma = 0;
1430 IXGBE_RSC_CB(skb)->delay_unmap = false;
1431 }
aa80175a
AD
1432 }
1433 if (pkt_is_rsc) {
c267fc16
AD
1434 if (ring_is_ps_enabled(rx_ring))
1435 rx_ring->rx_stats.rsc_count +=
aa80175a 1436 skb_shinfo(skb)->nr_frags;
c267fc16 1437 else
aa80175a
AD
1438 rx_ring->rx_stats.rsc_count +=
1439 IXGBE_RSC_CB(skb)->skb_cnt;
c267fc16
AD
1440 rx_ring->rx_stats.rsc_flush++;
1441 }
1442
1443 /* ERR_MASK will only have valid bits if EOP set */
9a799d71 1444 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
c267fc16
AD
1445 /* trim packet back to size 0 and recycle it */
1446 __pskb_trim(skb, 0);
1447 rx_buffer_info->skb = skb;
9a799d71
AK
1448 goto next_desc;
1449 }
1450
8bae1b2b 1451 ixgbe_rx_checksum(adapter, rx_desc, skb);
d2f4fbe2
AV
1452
1453 /* probably a little skewed due to removing CRC */
1454 total_rx_bytes += skb->len;
1455 total_rx_packets++;
1456
fc77dc3c 1457 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
332d4a7d
YZ
1458#ifdef IXGBE_FCOE
1459 /* if ddp, not passing to ULD unless for FCP_RSP or error */
3d8fd385
YZ
1460 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1461 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1462 if (!ddp_bytes)
332d4a7d 1463 goto next_desc;
3d8fd385 1464 }
332d4a7d 1465#endif /* IXGBE_FCOE */
fdaff1ce 1466 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
9a799d71
AK
1467
1468next_desc:
1469 rx_desc->wb.upper.status_error = 0;
1470
c267fc16
AD
1471 (*work_done)++;
1472 if (*work_done >= work_to_do)
1473 break;
1474
9a799d71
AK
1475 /* return some buffers to hardware, one at a time is too slow */
1476 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
fc77dc3c 1477 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71
AK
1478 cleaned_count = 0;
1479 }
1480
1481 /* use prefetched values */
1482 rx_desc = next_rxd;
9a799d71 1483 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
177db6ff
MC
1484 }
1485
9a799d71
AK
1486 rx_ring->next_to_clean = i;
1487 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1488
1489 if (cleaned_count)
fc77dc3c 1490 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71 1491
3d8fd385
YZ
1492#ifdef IXGBE_FCOE
1493 /* include DDPed FCoE data */
1494 if (ddp_bytes > 0) {
1495 unsigned int mss;
1496
fc77dc3c 1497 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
3d8fd385
YZ
1498 sizeof(struct fc_frame_header) -
1499 sizeof(struct fcoe_crc_eof);
1500 if (mss > 512)
1501 mss &= ~511;
1502 total_rx_bytes += ddp_bytes;
1503 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1504 }
1505#endif /* IXGBE_FCOE */
1506
f494e8fa
AV
1507 rx_ring->total_packets += total_rx_packets;
1508 rx_ring->total_bytes += total_rx_bytes;
c267fc16
AD
1509 u64_stats_update_begin(&rx_ring->syncp);
1510 rx_ring->stats.packets += total_rx_packets;
1511 rx_ring->stats.bytes += total_rx_bytes;
1512 u64_stats_update_end(&rx_ring->syncp);
9a799d71
AK
1513}
1514
021230d4 1515static int ixgbe_clean_rxonly(struct napi_struct *, int);
9a799d71
AK
1516/**
1517 * ixgbe_configure_msix - Configure MSI-X hardware
1518 * @adapter: board private structure
1519 *
1520 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1521 * interrupts.
1522 **/
1523static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1524{
021230d4 1525 struct ixgbe_q_vector *q_vector;
bf29ee6c 1526 int i, q_vectors, v_idx, r_idx;
021230d4 1527 u32 mask;
9a799d71 1528
021230d4 1529 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71 1530
4df10466
JB
1531 /*
1532 * Populate the IVAR table and set the ITR values to the
021230d4
AV
1533 * corresponding register.
1534 */
1535 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
7a921c93 1536 q_vector = adapter->q_vector[v_idx];
984b3f57 1537 /* XXX for_each_set_bit(...) */
021230d4 1538 r_idx = find_first_bit(q_vector->rxr_idx,
e8e9f696 1539 adapter->num_rx_queues);
021230d4
AV
1540
1541 for (i = 0; i < q_vector->rxr_count; i++) {
bf29ee6c
AD
1542 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1543 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
021230d4 1544 r_idx = find_next_bit(q_vector->rxr_idx,
e8e9f696
JP
1545 adapter->num_rx_queues,
1546 r_idx + 1);
021230d4
AV
1547 }
1548 r_idx = find_first_bit(q_vector->txr_idx,
e8e9f696 1549 adapter->num_tx_queues);
021230d4
AV
1550
1551 for (i = 0; i < q_vector->txr_count; i++) {
bf29ee6c
AD
1552 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1553 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
021230d4 1554 r_idx = find_next_bit(q_vector->txr_idx,
e8e9f696
JP
1555 adapter->num_tx_queues,
1556 r_idx + 1);
021230d4
AV
1557 }
1558
021230d4 1559 if (q_vector->txr_count && !q_vector->rxr_count)
f7554a2b
NS
1560 /* tx only */
1561 q_vector->eitr = adapter->tx_eitr_param;
509ee935 1562 else if (q_vector->rxr_count)
f7554a2b
NS
1563 /* rx or mixed */
1564 q_vector->eitr = adapter->rx_eitr_param;
021230d4 1565
fe49f04a 1566 ixgbe_write_eitr(q_vector);
b25ebfd2
PW
1567 /* If Flow Director is enabled, set interrupt affinity */
1568 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1569 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1570 /*
1571 * Allocate the affinity_hint cpumask, assign the mask
1572 * for this vector, and set our affinity_hint for
1573 * this irq.
1574 */
1575 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1576 GFP_KERNEL))
1577 return;
1578 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1579 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1580 q_vector->affinity_mask);
1581 }
9a799d71
AK
1582 }
1583
bd508178
AD
1584 switch (adapter->hw.mac.type) {
1585 case ixgbe_mac_82598EB:
e8e26350 1586 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
e8e9f696 1587 v_idx);
bd508178
AD
1588 break;
1589 case ixgbe_mac_82599EB:
b93a2226 1590 case ixgbe_mac_X540:
e8e26350 1591 ixgbe_set_ivar(adapter, -1, 1, v_idx);
bd508178
AD
1592 break;
1593
1594 default:
1595 break;
1596 }
021230d4
AV
1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1598
41fb9248 1599 /* set up to autoclear timer, and the vectors */
021230d4 1600 mask = IXGBE_EIMS_ENABLE_MASK;
1cdd1ec8
GR
1601 if (adapter->num_vfs)
1602 mask &= ~(IXGBE_EIMS_OTHER |
1603 IXGBE_EIMS_MAILBOX |
1604 IXGBE_EIMS_LSC);
1605 else
1606 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
021230d4 1607 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
9a799d71
AK
1608}
1609
f494e8fa
AV
1610enum latency_range {
1611 lowest_latency = 0,
1612 low_latency = 1,
1613 bulk_latency = 2,
1614 latency_invalid = 255
1615};
1616
1617/**
1618 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1619 * @adapter: pointer to adapter
1620 * @eitr: eitr setting (ints per sec) to give last timeslice
1621 * @itr_setting: current throttle rate in ints/second
1622 * @packets: the number of packets during this measurement interval
1623 * @bytes: the number of bytes during this measurement interval
1624 *
1625 * Stores a new ITR value based on packets and byte
1626 * counts during the last interrupt. The advantage of per interrupt
1627 * computation is faster updates and more accurate ITR for the current
1628 * traffic pattern. Constants in this function were computed
1629 * based on theoretical maximum wire speed and thresholds were set based
1630 * on testing data as well as attempting to minimize response time
1631 * while increasing bulk throughput.
1632 * this functionality is controlled by the InterruptThrottleRate module
1633 * parameter (see ixgbe_param.c)
1634 **/
1635static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
e8e9f696
JP
1636 u32 eitr, u8 itr_setting,
1637 int packets, int bytes)
f494e8fa
AV
1638{
1639 unsigned int retval = itr_setting;
1640 u32 timepassed_us;
1641 u64 bytes_perint;
1642
1643 if (packets == 0)
1644 goto update_itr_done;
1645
1646
1647 /* simple throttlerate management
1648 * 0-20MB/s lowest (100000 ints/s)
1649 * 20-100MB/s low (20000 ints/s)
1650 * 100-1249MB/s bulk (8000 ints/s)
1651 */
1652 /* what was last interrupt timeslice? */
1653 timepassed_us = 1000000/eitr;
1654 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1655
1656 switch (itr_setting) {
1657 case lowest_latency:
1658 if (bytes_perint > adapter->eitr_low)
1659 retval = low_latency;
1660 break;
1661 case low_latency:
1662 if (bytes_perint > adapter->eitr_high)
1663 retval = bulk_latency;
1664 else if (bytes_perint <= adapter->eitr_low)
1665 retval = lowest_latency;
1666 break;
1667 case bulk_latency:
1668 if (bytes_perint <= adapter->eitr_high)
1669 retval = low_latency;
1670 break;
1671 }
1672
1673update_itr_done:
1674 return retval;
1675}
1676
509ee935
JB
1677/**
1678 * ixgbe_write_eitr - write EITR register in hardware specific way
fe49f04a 1679 * @q_vector: structure containing interrupt and ring information
509ee935
JB
1680 *
1681 * This function is made to be called by ethtool and by the driver
1682 * when it needs to update EITR registers at runtime. Hardware
1683 * specific quirks/differences are taken care of here.
1684 */
fe49f04a 1685void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
509ee935 1686{
fe49f04a 1687 struct ixgbe_adapter *adapter = q_vector->adapter;
509ee935 1688 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
1689 int v_idx = q_vector->v_idx;
1690 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1691
bd508178
AD
1692 switch (adapter->hw.mac.type) {
1693 case ixgbe_mac_82598EB:
509ee935
JB
1694 /* must write high and low 16 bits to reset counter */
1695 itr_reg |= (itr_reg << 16);
bd508178
AD
1696 break;
1697 case ixgbe_mac_82599EB:
b93a2226 1698 case ixgbe_mac_X540:
f8d1dcaf 1699 /*
b93a2226 1700 * 82599 and X540 can support a value of zero, so allow it for
f8d1dcaf
JB
1701 * max interrupt rate, but there is an errata where it can
1702 * not be zero with RSC
1703 */
1704 if (itr_reg == 8 &&
1705 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1706 itr_reg = 0;
1707
509ee935
JB
1708 /*
1709 * set the WDIS bit to not clear the timer bits and cause an
1710 * immediate assertion of the interrupt
1711 */
1712 itr_reg |= IXGBE_EITR_CNT_WDIS;
bd508178
AD
1713 break;
1714 default:
1715 break;
509ee935
JB
1716 }
1717 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1718}
1719
f494e8fa
AV
1720static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1721{
1722 struct ixgbe_adapter *adapter = q_vector->adapter;
125601bf 1723 int i, r_idx;
f494e8fa
AV
1724 u32 new_itr;
1725 u8 current_itr, ret_itr;
f494e8fa
AV
1726
1727 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1728 for (i = 0; i < q_vector->txr_count; i++) {
125601bf 1729 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
f494e8fa 1730 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1731 q_vector->tx_itr,
1732 tx_ring->total_packets,
1733 tx_ring->total_bytes);
f494e8fa
AV
1734 /* if the result for this queue would decrease interrupt
1735 * rate for this vector then use that result */
30efa5a3 1736 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
e8e9f696 1737 q_vector->tx_itr - 1 : ret_itr);
f494e8fa 1738 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1739 r_idx + 1);
f494e8fa
AV
1740 }
1741
1742 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1743 for (i = 0; i < q_vector->rxr_count; i++) {
125601bf 1744 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
f494e8fa 1745 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1746 q_vector->rx_itr,
1747 rx_ring->total_packets,
1748 rx_ring->total_bytes);
f494e8fa
AV
1749 /* if the result for this queue would decrease interrupt
1750 * rate for this vector then use that result */
30efa5a3 1751 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
e8e9f696 1752 q_vector->rx_itr - 1 : ret_itr);
f494e8fa 1753 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1754 r_idx + 1);
f494e8fa
AV
1755 }
1756
30efa5a3 1757 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1758
1759 switch (current_itr) {
1760 /* counts and packets in update_itr are dependent on these numbers */
1761 case lowest_latency:
1762 new_itr = 100000;
1763 break;
1764 case low_latency:
1765 new_itr = 20000; /* aka hwitr = ~200 */
1766 break;
1767 case bulk_latency:
1768 default:
1769 new_itr = 8000;
1770 break;
1771 }
1772
1773 if (new_itr != q_vector->eitr) {
fe49f04a 1774 /* do an exponential smoothing */
125601bf 1775 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
509ee935
JB
1776
1777 /* save the algorithm value here, not the smoothed one */
1778 q_vector->eitr = new_itr;
fe49f04a
AD
1779
1780 ixgbe_write_eitr(q_vector);
f494e8fa 1781 }
f494e8fa
AV
1782}
1783
119fc60a
MC
1784/**
1785 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1786 * @work: pointer to work_struct containing our data
1787 **/
1788static void ixgbe_check_overtemp_task(struct work_struct *work)
1789{
1790 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
1791 struct ixgbe_adapter,
1792 check_overtemp_task);
119fc60a
MC
1793 struct ixgbe_hw *hw = &adapter->hw;
1794 u32 eicr = adapter->interrupt_event;
1795
7ca647bd
JP
1796 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1797 return;
1798
1799 switch (hw->device_id) {
1800 case IXGBE_DEV_ID_82599_T3_LOM: {
1801 u32 autoneg;
1802 bool link_up = false;
1803
1804 if (hw->mac.ops.check_link)
1805 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1806
1807 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1808 (eicr & IXGBE_EICR_LSC))
1809 /* Check if this is due to overtemp */
1810 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1811 break;
1812 return;
1813 }
1814 default:
1815 if (!(eicr & IXGBE_EICR_GPI_SDP0))
119fc60a 1816 return;
7ca647bd 1817 break;
119fc60a 1818 }
7ca647bd
JP
1819 e_crit(drv,
1820 "Network adapter has been stopped because it has over heated. "
1821 "Restart the computer. If the problem persists, "
1822 "power off the system and replace the adapter\n");
1823 /* write to clear the interrupt */
1824 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
119fc60a
MC
1825}
1826
0befdb3e
JB
1827static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1828{
1829 struct ixgbe_hw *hw = &adapter->hw;
1830
1831 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1832 (eicr & IXGBE_EICR_GPI_SDP1)) {
396e799c 1833 e_crit(probe, "Fan has stopped, replace the adapter\n");
0befdb3e
JB
1834 /* write to clear the interrupt */
1835 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1836 }
1837}
cf8280ee 1838
e8e26350
PW
1839static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1840{
1841 struct ixgbe_hw *hw = &adapter->hw;
1842
73c4b7cd
AD
1843 if (eicr & IXGBE_EICR_GPI_SDP2) {
1844 /* Clear the interrupt */
1845 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1846 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1847 schedule_work(&adapter->sfp_config_module_task);
1848 }
1849
e8e26350
PW
1850 if (eicr & IXGBE_EICR_GPI_SDP1) {
1851 /* Clear the interrupt */
1852 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
73c4b7cd
AD
1853 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1854 schedule_work(&adapter->multispeed_fiber_task);
e8e26350
PW
1855 }
1856}
1857
cf8280ee
JB
1858static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1859{
1860 struct ixgbe_hw *hw = &adapter->hw;
1861
1862 adapter->lsc_int++;
1863 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1864 adapter->link_check_timeout = jiffies;
1865 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1866 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
8a0717f3 1867 IXGBE_WRITE_FLUSH(hw);
cf8280ee
JB
1868 schedule_work(&adapter->watchdog_task);
1869 }
1870}
1871
9a799d71
AK
1872static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1873{
1874 struct net_device *netdev = data;
1875 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1876 struct ixgbe_hw *hw = &adapter->hw;
54037505
DS
1877 u32 eicr;
1878
1879 /*
1880 * Workaround for Silicon errata. Use clear-by-write instead
1881 * of clear-by-read. Reading with EICS will return the
1882 * interrupt causes without clearing, which later be done
1883 * with the write to EICR.
1884 */
1885 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1886 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
9a799d71 1887
cf8280ee
JB
1888 if (eicr & IXGBE_EICR_LSC)
1889 ixgbe_check_lsc(adapter);
d4f80882 1890
1cdd1ec8
GR
1891 if (eicr & IXGBE_EICR_MAILBOX)
1892 ixgbe_msg_task(adapter);
1893
bd508178
AD
1894 switch (hw->mac.type) {
1895 case ixgbe_mac_82599EB:
b93a2226 1896 case ixgbe_mac_X540:
c4cf55e5
PWJ
1897 /* Handle Flow Director Full threshold interrupt */
1898 if (eicr & IXGBE_EICR_FLOW_DIR) {
1899 int i;
1900 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1901 /* Disable transmits before FDIR Re-initialization */
1902 netif_tx_stop_all_queues(netdev);
1903 for (i = 0; i < adapter->num_tx_queues; i++) {
1904 struct ixgbe_ring *tx_ring =
e8e9f696 1905 adapter->tx_ring[i];
7d637bcc
AD
1906 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1907 &tx_ring->state))
c4cf55e5
PWJ
1908 schedule_work(&adapter->fdir_reinit_task);
1909 }
1910 }
bd508178
AD
1911 ixgbe_check_sfp_event(adapter, eicr);
1912 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1913 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1914 adapter->interrupt_event = eicr;
1915 schedule_work(&adapter->check_overtemp_task);
1916 }
1917 break;
1918 default:
1919 break;
c4cf55e5 1920 }
bd508178
AD
1921
1922 ixgbe_check_fan_failure(adapter, eicr);
1923
d4f80882
AV
1924 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1925 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
9a799d71
AK
1926
1927 return IRQ_HANDLED;
1928}
1929
fe49f04a
AD
1930static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1931 u64 qmask)
1932{
1933 u32 mask;
bd508178 1934 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a 1935
bd508178
AD
1936 switch (hw->mac.type) {
1937 case ixgbe_mac_82598EB:
fe49f04a 1938 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
bd508178
AD
1939 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1940 break;
1941 case ixgbe_mac_82599EB:
b93a2226 1942 case ixgbe_mac_X540:
fe49f04a 1943 mask = (qmask & 0xFFFFFFFF);
bd508178
AD
1944 if (mask)
1945 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
fe49f04a 1946 mask = (qmask >> 32);
bd508178
AD
1947 if (mask)
1948 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1949 break;
1950 default:
1951 break;
fe49f04a
AD
1952 }
1953 /* skip the flush */
1954}
1955
1956static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
e8e9f696 1957 u64 qmask)
fe49f04a
AD
1958{
1959 u32 mask;
bd508178 1960 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a 1961
bd508178
AD
1962 switch (hw->mac.type) {
1963 case ixgbe_mac_82598EB:
fe49f04a 1964 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
bd508178
AD
1965 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1966 break;
1967 case ixgbe_mac_82599EB:
b93a2226 1968 case ixgbe_mac_X540:
fe49f04a 1969 mask = (qmask & 0xFFFFFFFF);
bd508178
AD
1970 if (mask)
1971 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
fe49f04a 1972 mask = (qmask >> 32);
bd508178
AD
1973 if (mask)
1974 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1975 break;
1976 default:
1977 break;
fe49f04a
AD
1978 }
1979 /* skip the flush */
1980}
1981
9a799d71
AK
1982static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1983{
021230d4
AV
1984 struct ixgbe_q_vector *q_vector = data;
1985 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1986 struct ixgbe_ring *tx_ring;
021230d4
AV
1987 int i, r_idx;
1988
1989 if (!q_vector->txr_count)
1990 return IRQ_HANDLED;
1991
1992 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1993 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1994 tx_ring = adapter->tx_ring[r_idx];
3a581073
JB
1995 tx_ring->total_bytes = 0;
1996 tx_ring->total_packets = 0;
021230d4 1997 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1998 r_idx + 1);
021230d4 1999 }
9a799d71 2000
9b471446 2001 /* EIAM disabled interrupts (on this vector) for us */
91281fd3
AD
2002 napi_schedule(&q_vector->napi);
2003
9a799d71
AK
2004 return IRQ_HANDLED;
2005}
2006
021230d4
AV
2007/**
2008 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
2009 * @irq: unused
2010 * @data: pointer to our q_vector struct for this interrupt vector
2011 **/
9a799d71
AK
2012static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2013{
021230d4
AV
2014 struct ixgbe_q_vector *q_vector = data;
2015 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 2016 struct ixgbe_ring *rx_ring;
021230d4 2017 int r_idx;
30efa5a3 2018 int i;
021230d4 2019
33cf09c9
AD
2020#ifdef CONFIG_IXGBE_DCA
2021 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2022 ixgbe_update_dca(q_vector);
2023#endif
2024
021230d4 2025 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
33cf09c9 2026 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2027 rx_ring = adapter->rx_ring[r_idx];
30efa5a3
JB
2028 rx_ring->total_bytes = 0;
2029 rx_ring->total_packets = 0;
2030 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2031 r_idx + 1);
30efa5a3
JB
2032 }
2033
021230d4
AV
2034 if (!q_vector->rxr_count)
2035 return IRQ_HANDLED;
2036
9b471446 2037 /* EIAM disabled interrupts (on this vector) for us */
288379f0 2038 napi_schedule(&q_vector->napi);
021230d4
AV
2039
2040 return IRQ_HANDLED;
2041}
2042
2043static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2044{
91281fd3
AD
2045 struct ixgbe_q_vector *q_vector = data;
2046 struct ixgbe_adapter *adapter = q_vector->adapter;
2047 struct ixgbe_ring *ring;
2048 int r_idx;
2049 int i;
2050
2051 if (!q_vector->txr_count && !q_vector->rxr_count)
2052 return IRQ_HANDLED;
2053
2054 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2055 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 2056 ring = adapter->tx_ring[r_idx];
91281fd3
AD
2057 ring->total_bytes = 0;
2058 ring->total_packets = 0;
2059 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 2060 r_idx + 1);
91281fd3
AD
2061 }
2062
2063 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2064 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2065 ring = adapter->rx_ring[r_idx];
91281fd3
AD
2066 ring->total_bytes = 0;
2067 ring->total_packets = 0;
2068 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2069 r_idx + 1);
91281fd3
AD
2070 }
2071
9b471446 2072 /* EIAM disabled interrupts (on this vector) for us */
91281fd3 2073 napi_schedule(&q_vector->napi);
9a799d71 2074
9a799d71
AK
2075 return IRQ_HANDLED;
2076}
2077
021230d4
AV
2078/**
2079 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
2080 * @napi: napi struct with our devices info in it
2081 * @budget: amount of work driver is allowed to do this pass, in packets
2082 *
f0848276
JB
2083 * This function is optimized for cleaning one queue only on a single
2084 * q_vector!!!
021230d4 2085 **/
9a799d71
AK
2086static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2087{
021230d4 2088 struct ixgbe_q_vector *q_vector =
e8e9f696 2089 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 2090 struct ixgbe_adapter *adapter = q_vector->adapter;
f0848276 2091 struct ixgbe_ring *rx_ring = NULL;
9a799d71 2092 int work_done = 0;
021230d4 2093 long r_idx;
9a799d71 2094
5dd2d332 2095#ifdef CONFIG_IXGBE_DCA
bd0362dd 2096 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
33cf09c9 2097 ixgbe_update_dca(q_vector);
bd0362dd 2098#endif
9a799d71 2099
33cf09c9
AD
2100 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2101 rx_ring = adapter->rx_ring[r_idx];
2102
78b6f4ce 2103 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
9a799d71 2104
021230d4
AV
2105 /* If all Rx work done, exit the polling mode */
2106 if (work_done < budget) {
288379f0 2107 napi_complete(napi);
f7554a2b 2108 if (adapter->rx_itr_setting & 1)
f494e8fa 2109 ixgbe_set_itr_msix(q_vector);
9a799d71 2110 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 2111 ixgbe_irq_enable_queues(adapter,
e8e9f696 2112 ((u64)1 << q_vector->v_idx));
9a799d71
AK
2113 }
2114
2115 return work_done;
2116}
2117
f0848276 2118/**
91281fd3 2119 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
f0848276
JB
2120 * @napi: napi struct with our devices info in it
2121 * @budget: amount of work driver is allowed to do this pass, in packets
2122 *
2123 * This function will clean more than one rx queue associated with a
2124 * q_vector.
2125 **/
91281fd3 2126static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
f0848276
JB
2127{
2128 struct ixgbe_q_vector *q_vector =
e8e9f696 2129 container_of(napi, struct ixgbe_q_vector, napi);
f0848276 2130 struct ixgbe_adapter *adapter = q_vector->adapter;
91281fd3 2131 struct ixgbe_ring *ring = NULL;
f0848276
JB
2132 int work_done = 0, i;
2133 long r_idx;
91281fd3
AD
2134 bool tx_clean_complete = true;
2135
33cf09c9
AD
2136#ifdef CONFIG_IXGBE_DCA
2137 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2138 ixgbe_update_dca(q_vector);
2139#endif
2140
91281fd3
AD
2141 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2142 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 2143 ring = adapter->tx_ring[r_idx];
91281fd3
AD
2144 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2145 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 2146 r_idx + 1);
91281fd3 2147 }
f0848276
JB
2148
2149 /* attempt to distribute budget to each queue fairly, but don't allow
2150 * the budget to go below 1 because we'll exit polling */
2151 budget /= (q_vector->rxr_count ?: 1);
2152 budget = max(budget, 1);
2153 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2154 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2155 ring = adapter->rx_ring[r_idx];
91281fd3 2156 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
f0848276 2157 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2158 r_idx + 1);
f0848276
JB
2159 }
2160
2161 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 2162 ring = adapter->rx_ring[r_idx];
f0848276 2163 /* If all Rx work done, exit the polling mode */
7f821875 2164 if (work_done < budget) {
288379f0 2165 napi_complete(napi);
f7554a2b 2166 if (adapter->rx_itr_setting & 1)
f0848276
JB
2167 ixgbe_set_itr_msix(q_vector);
2168 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 2169 ixgbe_irq_enable_queues(adapter,
e8e9f696 2170 ((u64)1 << q_vector->v_idx));
f0848276
JB
2171 return 0;
2172 }
2173
2174 return work_done;
2175}
91281fd3
AD
2176
2177/**
2178 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2179 * @napi: napi struct with our devices info in it
2180 * @budget: amount of work driver is allowed to do this pass, in packets
2181 *
2182 * This function is optimized for cleaning one queue only on a single
2183 * q_vector!!!
2184 **/
2185static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2186{
2187 struct ixgbe_q_vector *q_vector =
e8e9f696 2188 container_of(napi, struct ixgbe_q_vector, napi);
91281fd3
AD
2189 struct ixgbe_adapter *adapter = q_vector->adapter;
2190 struct ixgbe_ring *tx_ring = NULL;
2191 int work_done = 0;
2192 long r_idx;
2193
91281fd3
AD
2194#ifdef CONFIG_IXGBE_DCA
2195 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
33cf09c9 2196 ixgbe_update_dca(q_vector);
91281fd3
AD
2197#endif
2198
33cf09c9
AD
2199 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2200 tx_ring = adapter->tx_ring[r_idx];
2201
91281fd3
AD
2202 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2203 work_done = budget;
2204
f7554a2b 2205 /* If all Tx work done, exit the polling mode */
91281fd3
AD
2206 if (work_done < budget) {
2207 napi_complete(napi);
f7554a2b 2208 if (adapter->tx_itr_setting & 1)
91281fd3
AD
2209 ixgbe_set_itr_msix(q_vector);
2210 if (!test_bit(__IXGBE_DOWN, &adapter->state))
e8e9f696
JP
2211 ixgbe_irq_enable_queues(adapter,
2212 ((u64)1 << q_vector->v_idx));
91281fd3
AD
2213 }
2214
2215 return work_done;
2216}
2217
021230d4 2218static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2219 int r_idx)
021230d4 2220{
7a921c93 2221 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2274543f 2222 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
7a921c93
AD
2223
2224 set_bit(r_idx, q_vector->rxr_idx);
2225 q_vector->rxr_count++;
2274543f 2226 rx_ring->q_vector = q_vector;
021230d4
AV
2227}
2228
2229static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2230 int t_idx)
021230d4 2231{
7a921c93 2232 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2274543f 2233 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
7a921c93
AD
2234
2235 set_bit(t_idx, q_vector->txr_idx);
2236 q_vector->txr_count++;
2274543f 2237 tx_ring->q_vector = q_vector;
021230d4
AV
2238}
2239
9a799d71 2240/**
021230d4
AV
2241 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2242 * @adapter: board private structure to initialize
9a799d71 2243 *
021230d4
AV
2244 * This function maps descriptor rings to the queue-specific vectors
2245 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2246 * one vector per ring/queue, but on a constrained vector budget, we
2247 * group the rings as "efficiently" as possible. You would add new
2248 * mapping configurations in here.
9a799d71 2249 **/
d0759ebb 2250static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
021230d4 2251{
d0759ebb 2252 int q_vectors;
021230d4
AV
2253 int v_start = 0;
2254 int rxr_idx = 0, txr_idx = 0;
2255 int rxr_remaining = adapter->num_rx_queues;
2256 int txr_remaining = adapter->num_tx_queues;
2257 int i, j;
2258 int rqpv, tqpv;
2259 int err = 0;
2260
2261 /* No mapping required if MSI-X is disabled. */
2262 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2263 goto out;
9a799d71 2264
d0759ebb
AD
2265 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2266
021230d4
AV
2267 /*
2268 * The ideal configuration...
2269 * We have enough vectors to map one per queue.
2270 */
d0759ebb 2271 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
021230d4
AV
2272 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2273 map_vector_to_rxq(adapter, v_start, rxr_idx);
9a799d71 2274
021230d4
AV
2275 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2276 map_vector_to_txq(adapter, v_start, txr_idx);
9a799d71 2277
9a799d71 2278 goto out;
021230d4 2279 }
9a799d71 2280
021230d4
AV
2281 /*
2282 * If we don't have enough vectors for a 1-to-1
2283 * mapping, we'll have to group them so there are
2284 * multiple queues per vector.
2285 */
2286 /* Re-adjusting *qpv takes care of the remainder. */
d0759ebb
AD
2287 for (i = v_start; i < q_vectors; i++) {
2288 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
021230d4
AV
2289 for (j = 0; j < rqpv; j++) {
2290 map_vector_to_rxq(adapter, i, rxr_idx);
2291 rxr_idx++;
2292 rxr_remaining--;
2293 }
d0759ebb 2294 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
021230d4
AV
2295 for (j = 0; j < tqpv; j++) {
2296 map_vector_to_txq(adapter, i, txr_idx);
2297 txr_idx++;
2298 txr_remaining--;
9a799d71 2299 }
9a799d71 2300 }
021230d4
AV
2301out:
2302 return err;
2303}
2304
2305/**
2306 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2307 * @adapter: board private structure
2308 *
2309 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2310 * interrupts from the kernel.
2311 **/
2312static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2313{
2314 struct net_device *netdev = adapter->netdev;
2315 irqreturn_t (*handler)(int, void *);
2316 int i, vector, q_vectors, err;
e8e9f696 2317 int ri = 0, ti = 0;
021230d4
AV
2318
2319 /* Decrement for Other and TCP Timer vectors */
2320 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2321
d0759ebb 2322 err = ixgbe_map_rings_to_vectors(adapter);
021230d4 2323 if (err)
d0759ebb 2324 return err;
021230d4 2325
d0759ebb
AD
2326#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2327 ? &ixgbe_msix_clean_many : \
2328 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2329 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2330 NULL)
021230d4 2331 for (vector = 0; vector < q_vectors; vector++) {
d0759ebb
AD
2332 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2333 handler = SET_HANDLER(q_vector);
cb13fc20 2334
e8e9f696 2335 if (handler == &ixgbe_msix_clean_rx) {
d0759ebb 2336 sprintf(q_vector->name, "%s-%s-%d",
cb13fc20 2337 netdev->name, "rx", ri++);
e8e9f696 2338 } else if (handler == &ixgbe_msix_clean_tx) {
d0759ebb 2339 sprintf(q_vector->name, "%s-%s-%d",
cb13fc20 2340 netdev->name, "tx", ti++);
d0759ebb
AD
2341 } else if (handler == &ixgbe_msix_clean_many) {
2342 sprintf(q_vector->name, "%s-%s-%d",
32aa77a4
AD
2343 netdev->name, "TxRx", ri++);
2344 ti++;
d0759ebb
AD
2345 } else {
2346 /* skip this unused q_vector */
2347 continue;
32aa77a4 2348 }
021230d4 2349 err = request_irq(adapter->msix_entries[vector].vector,
d0759ebb
AD
2350 handler, 0, q_vector->name,
2351 q_vector);
9a799d71 2352 if (err) {
396e799c 2353 e_err(probe, "request_irq failed for MSIX interrupt "
849c4542 2354 "Error: %d\n", err);
021230d4 2355 goto free_queue_irqs;
9a799d71 2356 }
9a799d71
AK
2357 }
2358
d0759ebb 2359 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
021230d4 2360 err = request_irq(adapter->msix_entries[vector].vector,
d0759ebb 2361 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
9a799d71 2362 if (err) {
396e799c 2363 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
021230d4 2364 goto free_queue_irqs;
9a799d71
AK
2365 }
2366
9a799d71
AK
2367 return 0;
2368
021230d4
AV
2369free_queue_irqs:
2370 for (i = vector - 1; i >= 0; i--)
2371 free_irq(adapter->msix_entries[--vector].vector,
e8e9f696 2372 adapter->q_vector[i]);
021230d4
AV
2373 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2374 pci_disable_msix(adapter->pdev);
9a799d71
AK
2375 kfree(adapter->msix_entries);
2376 adapter->msix_entries = NULL;
9a799d71
AK
2377 return err;
2378}
2379
f494e8fa
AV
2380static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2381{
7a921c93 2382 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
4a0b9ca0
PW
2383 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2384 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
125601bf
AD
2385 u32 new_itr = q_vector->eitr;
2386 u8 current_itr;
f494e8fa 2387
30efa5a3 2388 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2389 q_vector->tx_itr,
2390 tx_ring->total_packets,
2391 tx_ring->total_bytes);
30efa5a3 2392 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2393 q_vector->rx_itr,
2394 rx_ring->total_packets,
2395 rx_ring->total_bytes);
f494e8fa 2396
30efa5a3 2397 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
2398
2399 switch (current_itr) {
2400 /* counts and packets in update_itr are dependent on these numbers */
2401 case lowest_latency:
2402 new_itr = 100000;
2403 break;
2404 case low_latency:
2405 new_itr = 20000; /* aka hwitr = ~200 */
2406 break;
2407 case bulk_latency:
2408 new_itr = 8000;
2409 break;
2410 default:
2411 break;
2412 }
2413
2414 if (new_itr != q_vector->eitr) {
fe49f04a 2415 /* do an exponential smoothing */
125601bf 2416 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
509ee935 2417
125601bf 2418 /* save the algorithm value here */
509ee935 2419 q_vector->eitr = new_itr;
fe49f04a
AD
2420
2421 ixgbe_write_eitr(q_vector);
f494e8fa 2422 }
f494e8fa
AV
2423}
2424
79aefa45
AD
2425/**
2426 * ixgbe_irq_enable - Enable default interrupt generation settings
2427 * @adapter: board private structure
2428 **/
6af3b9eb
ET
2429static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2430 bool flush)
79aefa45
AD
2431{
2432 u32 mask;
835462fc
NS
2433
2434 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
119fc60a
MC
2435 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2436 mask |= IXGBE_EIMS_GPI_SDP0;
6ab33d51
DM
2437 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2438 mask |= IXGBE_EIMS_GPI_SDP1;
bd508178
AD
2439 switch (adapter->hw.mac.type) {
2440 case ixgbe_mac_82599EB:
b93a2226 2441 case ixgbe_mac_X540:
2a41ff81 2442 mask |= IXGBE_EIMS_ECC;
e8e26350
PW
2443 mask |= IXGBE_EIMS_GPI_SDP1;
2444 mask |= IXGBE_EIMS_GPI_SDP2;
1cdd1ec8
GR
2445 if (adapter->num_vfs)
2446 mask |= IXGBE_EIMS_MAILBOX;
bd508178
AD
2447 break;
2448 default:
2449 break;
e8e26350 2450 }
c4cf55e5
PWJ
2451 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2452 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2453 mask |= IXGBE_EIMS_FLOW_DIR;
e8e26350 2454
79aefa45 2455 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
6af3b9eb
ET
2456 if (queues)
2457 ixgbe_irq_enable_queues(adapter, ~0);
2458 if (flush)
2459 IXGBE_WRITE_FLUSH(&adapter->hw);
1cdd1ec8
GR
2460
2461 if (adapter->num_vfs > 32) {
2462 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2463 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2464 }
79aefa45 2465}
021230d4 2466
9a799d71 2467/**
021230d4 2468 * ixgbe_intr - legacy mode Interrupt Handler
9a799d71
AK
2469 * @irq: interrupt number
2470 * @data: pointer to a network interface device structure
9a799d71
AK
2471 **/
2472static irqreturn_t ixgbe_intr(int irq, void *data)
2473{
2474 struct net_device *netdev = data;
2475 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2476 struct ixgbe_hw *hw = &adapter->hw;
7a921c93 2477 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
9a799d71
AK
2478 u32 eicr;
2479
54037505 2480 /*
6af3b9eb 2481 * Workaround for silicon errata on 82598. Mask the interrupts
54037505
DS
2482 * before the read of EICR.
2483 */
2484 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2485
021230d4
AV
2486 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2487 * therefore no explict interrupt disable is necessary */
2488 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
f47cf66e 2489 if (!eicr) {
6af3b9eb
ET
2490 /*
2491 * shared interrupt alert!
f47cf66e 2492 * make sure interrupts are enabled because the read will
6af3b9eb
ET
2493 * have disabled interrupts due to EIAM
2494 * finish the workaround of silicon errata on 82598. Unmask
2495 * the interrupt that we masked before the EICR read.
2496 */
2497 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2498 ixgbe_irq_enable(adapter, true, true);
9a799d71 2499 return IRQ_NONE; /* Not our interrupt */
f47cf66e 2500 }
9a799d71 2501
cf8280ee
JB
2502 if (eicr & IXGBE_EICR_LSC)
2503 ixgbe_check_lsc(adapter);
021230d4 2504
bd508178
AD
2505 switch (hw->mac.type) {
2506 case ixgbe_mac_82599EB:
b93a2226 2507 case ixgbe_mac_X540:
e8e26350 2508 ixgbe_check_sfp_event(adapter, eicr);
bd508178
AD
2509 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2510 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2511 adapter->interrupt_event = eicr;
2512 schedule_work(&adapter->check_overtemp_task);
2513 }
2514 break;
2515 default:
2516 break;
2517 }
e8e26350 2518
0befdb3e
JB
2519 ixgbe_check_fan_failure(adapter, eicr);
2520
7a921c93 2521 if (napi_schedule_prep(&(q_vector->napi))) {
4a0b9ca0
PW
2522 adapter->tx_ring[0]->total_packets = 0;
2523 adapter->tx_ring[0]->total_bytes = 0;
2524 adapter->rx_ring[0]->total_packets = 0;
2525 adapter->rx_ring[0]->total_bytes = 0;
021230d4 2526 /* would disable interrupts here but EIAM disabled it */
7a921c93 2527 __napi_schedule(&(q_vector->napi));
9a799d71
AK
2528 }
2529
6af3b9eb
ET
2530 /*
2531 * re-enable link(maybe) and non-queue interrupts, no flush.
2532 * ixgbe_poll will re-enable the queue interrupts
2533 */
2534
2535 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2536 ixgbe_irq_enable(adapter, false, false);
2537
9a799d71
AK
2538 return IRQ_HANDLED;
2539}
2540
021230d4
AV
2541static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2542{
2543 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2544
2545 for (i = 0; i < q_vectors; i++) {
7a921c93 2546 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
021230d4
AV
2547 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2548 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2549 q_vector->rxr_count = 0;
2550 q_vector->txr_count = 0;
2551 }
2552}
2553
9a799d71
AK
2554/**
2555 * ixgbe_request_irq - initialize interrupts
2556 * @adapter: board private structure
2557 *
2558 * Attempts to configure interrupts using the best available
2559 * capabilities of the hardware and kernel.
2560 **/
021230d4 2561static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
9a799d71
AK
2562{
2563 struct net_device *netdev = adapter->netdev;
021230d4 2564 int err;
9a799d71 2565
021230d4
AV
2566 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2567 err = ixgbe_request_msix_irqs(adapter);
2568 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
a0607fd3 2569 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
e8e9f696 2570 netdev->name, netdev);
021230d4 2571 } else {
a0607fd3 2572 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
e8e9f696 2573 netdev->name, netdev);
9a799d71
AK
2574 }
2575
9a799d71 2576 if (err)
396e799c 2577 e_err(probe, "request_irq failed, Error %d\n", err);
9a799d71 2578
9a799d71
AK
2579 return err;
2580}
2581
2582static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2583{
2584 struct net_device *netdev = adapter->netdev;
2585
2586 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
021230d4 2587 int i, q_vectors;
9a799d71 2588
021230d4
AV
2589 q_vectors = adapter->num_msix_vectors;
2590
2591 i = q_vectors - 1;
9a799d71 2592 free_irq(adapter->msix_entries[i].vector, netdev);
9a799d71 2593
021230d4
AV
2594 i--;
2595 for (; i >= 0; i--) {
2596 free_irq(adapter->msix_entries[i].vector,
e8e9f696 2597 adapter->q_vector[i]);
021230d4
AV
2598 }
2599
2600 ixgbe_reset_q_vectors(adapter);
2601 } else {
2602 free_irq(adapter->pdev->irq, netdev);
9a799d71
AK
2603 }
2604}
2605
22d5a71b
JB
2606/**
2607 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2608 * @adapter: board private structure
2609 **/
2610static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2611{
bd508178
AD
2612 switch (adapter->hw.mac.type) {
2613 case ixgbe_mac_82598EB:
835462fc 2614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
bd508178
AD
2615 break;
2616 case ixgbe_mac_82599EB:
b93a2226 2617 case ixgbe_mac_X540:
835462fc
NS
2618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
22d5a71b 2620 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1cdd1ec8
GR
2621 if (adapter->num_vfs > 32)
2622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
bd508178
AD
2623 break;
2624 default:
2625 break;
22d5a71b
JB
2626 }
2627 IXGBE_WRITE_FLUSH(&adapter->hw);
2628 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2629 int i;
2630 for (i = 0; i < adapter->num_msix_vectors; i++)
2631 synchronize_irq(adapter->msix_entries[i].vector);
2632 } else {
2633 synchronize_irq(adapter->pdev->irq);
2634 }
2635}
2636
9a799d71
AK
2637/**
2638 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2639 *
2640 **/
2641static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2642{
9a799d71
AK
2643 struct ixgbe_hw *hw = &adapter->hw;
2644
021230d4 2645 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
e8e9f696 2646 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
9a799d71 2647
e8e26350
PW
2648 ixgbe_set_ivar(adapter, 0, 0, 0);
2649 ixgbe_set_ivar(adapter, 1, 0, 0);
021230d4
AV
2650
2651 map_vector_to_rxq(adapter, 0, 0);
2652 map_vector_to_txq(adapter, 0, 0);
2653
396e799c 2654 e_info(hw, "Legacy interrupt IVAR setup done\n");
9a799d71
AK
2655}
2656
43e69bf0
AD
2657/**
2658 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2659 * @adapter: board private structure
2660 * @ring: structure containing ring specific data
2661 *
2662 * Configure the Tx descriptor ring after a reset.
2663 **/
84418e3b
AD
2664void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2665 struct ixgbe_ring *ring)
43e69bf0
AD
2666{
2667 struct ixgbe_hw *hw = &adapter->hw;
2668 u64 tdba = ring->dma;
2f1860b8
AD
2669 int wait_loop = 10;
2670 u32 txdctl;
bf29ee6c 2671 u8 reg_idx = ring->reg_idx;
43e69bf0 2672
2f1860b8
AD
2673 /* disable queue to avoid issues while updating state */
2674 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2675 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2676 txdctl & ~IXGBE_TXDCTL_ENABLE);
2677 IXGBE_WRITE_FLUSH(hw);
2678
43e69bf0 2679 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
e8e9f696 2680 (tdba & DMA_BIT_MASK(32)));
43e69bf0
AD
2681 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2682 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2683 ring->count * sizeof(union ixgbe_adv_tx_desc));
2684 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2685 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
84ea2591 2686 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
43e69bf0 2687
2f1860b8
AD
2688 /* configure fetching thresholds */
2689 if (adapter->rx_itr_setting == 0) {
2690 /* cannot set wthresh when itr==0 */
2691 txdctl &= ~0x007F0000;
2692 } else {
2693 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2694 txdctl |= (8 << 16);
2695 }
2696 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2697 /* PThresh workaround for Tx hang with DFP enabled. */
2698 txdctl |= 32;
2699 }
2700
2701 /* reinitialize flowdirector state */
ee9e0f0b
AD
2702 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2703 adapter->atr_sample_rate) {
2704 ring->atr_sample_rate = adapter->atr_sample_rate;
2705 ring->atr_count = 0;
2706 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2707 } else {
2708 ring->atr_sample_rate = 0;
2709 }
2f1860b8 2710
c84d324c
JF
2711 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2712
2f1860b8
AD
2713 /* enable queue */
2714 txdctl |= IXGBE_TXDCTL_ENABLE;
2715 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2716
2717 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2718 if (hw->mac.type == ixgbe_mac_82598EB &&
2719 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2720 return;
2721
2722 /* poll to verify queue is enabled */
2723 do {
2724 msleep(1);
2725 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2726 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2727 if (!wait_loop)
2728 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
43e69bf0
AD
2729}
2730
120ff942
AD
2731static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2732{
2733 struct ixgbe_hw *hw = &adapter->hw;
2734 u32 rttdcs;
2735 u32 mask;
2736
2737 if (hw->mac.type == ixgbe_mac_82598EB)
2738 return;
2739
2740 /* disable the arbiter while setting MTQC */
2741 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2742 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2744
2745 /* set transmit pool layout */
2746 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2747 switch (adapter->flags & mask) {
2748
2749 case (IXGBE_FLAG_SRIOV_ENABLED):
2750 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2751 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2752 break;
2753
2754 case (IXGBE_FLAG_DCB_ENABLED):
2755 /* We enable 8 traffic classes, DCB only */
2756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2757 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2758 break;
2759
2760 default:
2761 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2762 break;
2763 }
2764
2765 /* re-enable the arbiter */
2766 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2767 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2768}
2769
9a799d71 2770/**
3a581073 2771 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
9a799d71
AK
2772 * @adapter: board private structure
2773 *
2774 * Configure the Tx unit of the MAC after a reset.
2775 **/
2776static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2777{
2f1860b8
AD
2778 struct ixgbe_hw *hw = &adapter->hw;
2779 u32 dmatxctl;
43e69bf0 2780 u32 i;
9a799d71 2781
2f1860b8
AD
2782 ixgbe_setup_mtqc(adapter);
2783
2784 if (hw->mac.type != ixgbe_mac_82598EB) {
2785 /* DMATXCTL.EN must be before Tx queues are enabled */
2786 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2787 dmatxctl |= IXGBE_DMATXCTL_TE;
2788 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2789 }
2790
9a799d71 2791 /* Setup the HW Tx Head and Tail descriptor pointers */
43e69bf0
AD
2792 for (i = 0; i < adapter->num_tx_queues; i++)
2793 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
9a799d71
AK
2794}
2795
e8e26350 2796#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
cc41ac7c 2797
a6616b42 2798static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
e8e9f696 2799 struct ixgbe_ring *rx_ring)
cc41ac7c 2800{
cc41ac7c 2801 u32 srrctl;
bf29ee6c 2802 u8 reg_idx = rx_ring->reg_idx;
3be1adfb 2803
bd508178
AD
2804 switch (adapter->hw.mac.type) {
2805 case ixgbe_mac_82598EB: {
2806 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2807 const int mask = feature[RING_F_RSS].mask;
bf29ee6c 2808 reg_idx = reg_idx & mask;
cc41ac7c 2809 }
bd508178
AD
2810 break;
2811 case ixgbe_mac_82599EB:
b93a2226 2812 case ixgbe_mac_X540:
bd508178
AD
2813 default:
2814 break;
2815 }
2816
bf29ee6c 2817 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
cc41ac7c
JB
2818
2819 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2820 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
9e10e045
AD
2821 if (adapter->num_vfs)
2822 srrctl |= IXGBE_SRRCTL_DROP_EN;
cc41ac7c 2823
afafd5b0
AD
2824 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2825 IXGBE_SRRCTL_BSIZEHDR_MASK;
2826
7d637bcc 2827 if (ring_is_ps_enabled(rx_ring)) {
afafd5b0
AD
2828#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2829 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2830#else
2831 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2832#endif
cc41ac7c 2833 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
cc41ac7c 2834 } else {
afafd5b0
AD
2835 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2836 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
cc41ac7c 2837 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
cc41ac7c 2838 }
e8e26350 2839
bf29ee6c 2840 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
cc41ac7c 2841}
9a799d71 2842
05abb126 2843static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
0cefafad 2844{
05abb126
AD
2845 struct ixgbe_hw *hw = &adapter->hw;
2846 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
e8e9f696
JP
2847 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2848 0x6A3E67EA, 0x14364D17, 0x3BED200D};
05abb126
AD
2849 u32 mrqc = 0, reta = 0;
2850 u32 rxcsum;
2851 int i, j;
0cefafad
JB
2852 int mask;
2853
05abb126
AD
2854 /* Fill out hash function seeds */
2855 for (i = 0; i < 10; i++)
2856 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2857
2858 /* Fill out redirection table */
2859 for (i = 0, j = 0; i < 128; i++, j++) {
2860 if (j == adapter->ring_feature[RING_F_RSS].indices)
2861 j = 0;
2862 /* reta = 4-byte sliding window of
2863 * 0x00..(indices-1)(indices-1)00..etc. */
2864 reta = (reta << 8) | (j * 0x11);
2865 if ((i & 3) == 3)
2866 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2867 }
0cefafad 2868
05abb126
AD
2869 /* Disable indicating checksum in descriptor, enables RSS hash */
2870 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2871 rxcsum |= IXGBE_RXCSUM_PCSD;
2872 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2873
2874 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2875 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2876 else
2877 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
0cefafad 2878#ifdef CONFIG_IXGBE_DCB
05abb126 2879 | IXGBE_FLAG_DCB_ENABLED
0cefafad 2880#endif
05abb126
AD
2881 | IXGBE_FLAG_SRIOV_ENABLED
2882 );
0cefafad
JB
2883
2884 switch (mask) {
2885 case (IXGBE_FLAG_RSS_ENABLED):
2886 mrqc = IXGBE_MRQC_RSSEN;
2887 break;
1cdd1ec8
GR
2888 case (IXGBE_FLAG_SRIOV_ENABLED):
2889 mrqc = IXGBE_MRQC_VMDQEN;
2890 break;
0cefafad
JB
2891#ifdef CONFIG_IXGBE_DCB
2892 case (IXGBE_FLAG_DCB_ENABLED):
2893 mrqc = IXGBE_MRQC_RT8TCEN;
2894 break;
2895#endif /* CONFIG_IXGBE_DCB */
2896 default:
2897 break;
2898 }
2899
05abb126
AD
2900 /* Perform hash on these packet types */
2901 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2902 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2903 | IXGBE_MRQC_RSS_FIELD_IPV6
2904 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2905
2906 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
0cefafad
JB
2907}
2908
b93a2226
DS
2909/**
2910 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2911 * @adapter: address of board private structure
2912 * @ring: structure containing ring specific data
2913 **/
2914void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2915 struct ixgbe_ring *ring)
2916{
2917 struct ixgbe_hw *hw = &adapter->hw;
2918 u32 rscctrl;
2919 u8 reg_idx = ring->reg_idx;
2920
2921 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2922 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2923 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2924}
2925
bb5a9ad2
NS
2926/**
2927 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2928 * @adapter: address of board private structure
2929 * @index: index of ring to set
bb5a9ad2 2930 **/
b93a2226 2931void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
7367096a 2932 struct ixgbe_ring *ring)
bb5a9ad2 2933{
bb5a9ad2 2934 struct ixgbe_hw *hw = &adapter->hw;
bb5a9ad2 2935 u32 rscctrl;
edd2ea55 2936 int rx_buf_len;
bf29ee6c 2937 u8 reg_idx = ring->reg_idx;
7367096a 2938
7d637bcc 2939 if (!ring_is_rsc_enabled(ring))
7367096a 2940 return;
bb5a9ad2 2941
7367096a
AD
2942 rx_buf_len = ring->rx_buf_len;
2943 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
bb5a9ad2
NS
2944 rscctrl |= IXGBE_RSCCTL_RSCEN;
2945 /*
2946 * we must limit the number of descriptors so that the
2947 * total size of max desc * buf_len is not greater
2948 * than 65535
2949 */
7d637bcc 2950 if (ring_is_ps_enabled(ring)) {
bb5a9ad2
NS
2951#if (MAX_SKB_FRAGS > 16)
2952 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2953#elif (MAX_SKB_FRAGS > 8)
2954 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2955#elif (MAX_SKB_FRAGS > 4)
2956 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2957#else
2958 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2959#endif
2960 } else {
2961 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2962 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2963 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2964 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2965 else
2966 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2967 }
7367096a 2968 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
bb5a9ad2
NS
2969}
2970
9e10e045
AD
2971/**
2972 * ixgbe_set_uta - Set unicast filter table address
2973 * @adapter: board private structure
2974 *
2975 * The unicast table address is a register array of 32-bit registers.
2976 * The table is meant to be used in a way similar to how the MTA is used
2977 * however due to certain limitations in the hardware it is necessary to
2978 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2979 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2980 **/
2981static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2982{
2983 struct ixgbe_hw *hw = &adapter->hw;
2984 int i;
2985
2986 /* The UTA table only exists on 82599 hardware and newer */
2987 if (hw->mac.type < ixgbe_mac_82599EB)
2988 return;
2989
2990 /* we only need to do this if VMDq is enabled */
2991 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2992 return;
2993
2994 for (i = 0; i < 128; i++)
2995 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2996}
2997
2998#define IXGBE_MAX_RX_DESC_POLL 10
2999static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3000 struct ixgbe_ring *ring)
3001{
3002 struct ixgbe_hw *hw = &adapter->hw;
9e10e045
AD
3003 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3004 u32 rxdctl;
bf29ee6c 3005 u8 reg_idx = ring->reg_idx;
9e10e045
AD
3006
3007 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3008 if (hw->mac.type == ixgbe_mac_82598EB &&
3009 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3010 return;
3011
3012 do {
3013 msleep(1);
3014 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3015 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3016
3017 if (!wait_loop) {
3018 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3019 "the polling period\n", reg_idx);
3020 }
3021}
3022
84418e3b
AD
3023void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3024 struct ixgbe_ring *ring)
acd37177
AD
3025{
3026 struct ixgbe_hw *hw = &adapter->hw;
3027 u64 rdba = ring->dma;
9e10e045 3028 u32 rxdctl;
bf29ee6c 3029 u8 reg_idx = ring->reg_idx;
acd37177 3030
9e10e045
AD
3031 /* disable queue to avoid issues while updating state */
3032 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3033 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
3034 rxdctl & ~IXGBE_RXDCTL_ENABLE);
3035 IXGBE_WRITE_FLUSH(hw);
3036
acd37177
AD
3037 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3038 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3039 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3040 ring->count * sizeof(union ixgbe_adv_rx_desc));
3041 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3042 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
84ea2591 3043 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
9e10e045
AD
3044
3045 ixgbe_configure_srrctl(adapter, ring);
3046 ixgbe_configure_rscctl(adapter, ring);
3047
3048 if (hw->mac.type == ixgbe_mac_82598EB) {
3049 /*
3050 * enable cache line friendly hardware writes:
3051 * PTHRESH=32 descriptors (half the internal cache),
3052 * this also removes ugly rx_no_buffer_count increment
3053 * HTHRESH=4 descriptors (to minimize latency on fetch)
3054 * WTHRESH=8 burst writeback up to two cache lines
3055 */
3056 rxdctl &= ~0x3FFFFF;
3057 rxdctl |= 0x080420;
3058 }
3059
3060 /* enable receive descriptor ring */
3061 rxdctl |= IXGBE_RXDCTL_ENABLE;
3062 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3063
3064 ixgbe_rx_desc_queue_enable(adapter, ring);
fc77dc3c 3065 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
acd37177
AD
3066}
3067
48654521
AD
3068static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3069{
3070 struct ixgbe_hw *hw = &adapter->hw;
3071 int p;
3072
3073 /* PSRTYPE must be initialized in non 82598 adapters */
3074 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
e8e9f696
JP
3075 IXGBE_PSRTYPE_UDPHDR |
3076 IXGBE_PSRTYPE_IPV4HDR |
48654521 3077 IXGBE_PSRTYPE_L2HDR |
e8e9f696 3078 IXGBE_PSRTYPE_IPV6HDR;
48654521
AD
3079
3080 if (hw->mac.type == ixgbe_mac_82598EB)
3081 return;
3082
3083 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
3084 psrtype |= (adapter->num_rx_queues_per_pool << 29);
3085
3086 for (p = 0; p < adapter->num_rx_pools; p++)
3087 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
3088 psrtype);
3089}
3090
f5b4a52e
AD
3091static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3092{
3093 struct ixgbe_hw *hw = &adapter->hw;
3094 u32 gcr_ext;
3095 u32 vt_reg_bits;
3096 u32 reg_offset, vf_shift;
3097 u32 vmdctl;
3098
3099 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3100 return;
3101
3102 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3103 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
3104 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
3105 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
3106
3107 vf_shift = adapter->num_vfs % 32;
3108 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
3109
3110 /* Enable only the PF's pool for Tx/Rx */
3111 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
3112 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
3113 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
3114 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
3115 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3116
3117 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3118 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
3119
3120 /*
3121 * Set up VF register offsets for selected VT Mode,
3122 * i.e. 32 or 64 VFs for SR-IOV
3123 */
3124 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3125 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
3126 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
3127 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3128
3129 /* enable Tx loopback for VF/PF communication */
3130 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3131}
3132
477de6ed 3133static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
9a799d71 3134{
9a799d71
AK
3135 struct ixgbe_hw *hw = &adapter->hw;
3136 struct net_device *netdev = adapter->netdev;
3137 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
7c6e0a43 3138 int rx_buf_len;
477de6ed
AD
3139 struct ixgbe_ring *rx_ring;
3140 int i;
3141 u32 mhadd, hlreg0;
48654521 3142
9a799d71 3143 /* Decide whether to use packet split mode or not */
1cdd1ec8
GR
3144 /* Do not use packet split if we're in SR-IOV Mode */
3145 if (!adapter->num_vfs)
3146 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
9a799d71
AK
3147
3148 /* Set the RX buffer length according to the mode */
3149 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43 3150 rx_buf_len = IXGBE_RX_HDR_SIZE;
9a799d71 3151 } else {
0c19d6af 3152 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
f8212f97 3153 (netdev->mtu <= ETH_DATA_LEN))
7c6e0a43 3154 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9a799d71 3155 else
477de6ed 3156 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
9a799d71
AK
3157 }
3158
63f39bd1 3159#ifdef IXGBE_FCOE
477de6ed
AD
3160 /* adjust max frame to be able to do baby jumbo for FCoE */
3161 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3162 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3163 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
9a799d71 3164
477de6ed
AD
3165#endif /* IXGBE_FCOE */
3166 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3167 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3168 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3169 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3170
3171 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3172 }
3173
3174 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3175 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3176 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3177 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
9a799d71 3178
0cefafad
JB
3179 /*
3180 * Setup the HW Rx Head and Tail Descriptor Pointers and
3181 * the Base and Length of the Rx Descriptor Ring
3182 */
9a799d71 3183 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0 3184 rx_ring = adapter->rx_ring[i];
a6616b42 3185 rx_ring->rx_buf_len = rx_buf_len;
cc41ac7c 3186
6e455b89 3187 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
7d637bcc
AD
3188 set_ring_ps_enabled(rx_ring);
3189 else
3190 clear_ring_ps_enabled(rx_ring);
3191
3192 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3193 set_ring_rsc_enabled(rx_ring);
1b3ff02e 3194 else
7d637bcc 3195 clear_ring_rsc_enabled(rx_ring);
cc41ac7c 3196
63f39bd1 3197#ifdef IXGBE_FCOE
e8e9f696 3198 if (netdev->features & NETIF_F_FCOE_MTU) {
63f39bd1
YZ
3199 struct ixgbe_ring_feature *f;
3200 f = &adapter->ring_feature[RING_F_FCOE];
6e455b89 3201 if ((i >= f->mask) && (i < f->mask + f->indices)) {
7d637bcc 3202 clear_ring_ps_enabled(rx_ring);
6e455b89
YZ
3203 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
3204 rx_ring->rx_buf_len =
e8e9f696 3205 IXGBE_FCOE_JUMBO_FRAME_SIZE;
7d637bcc
AD
3206 } else if (!ring_is_rsc_enabled(rx_ring) &&
3207 !ring_is_ps_enabled(rx_ring)) {
3208 rx_ring->rx_buf_len =
3209 IXGBE_FCOE_JUMBO_FRAME_SIZE;
6e455b89 3210 }
63f39bd1 3211 }
63f39bd1 3212#endif /* IXGBE_FCOE */
477de6ed 3213 }
477de6ed
AD
3214}
3215
7367096a
AD
3216static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3217{
3218 struct ixgbe_hw *hw = &adapter->hw;
3219 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3220
3221 switch (hw->mac.type) {
3222 case ixgbe_mac_82598EB:
3223 /*
3224 * For VMDq support of different descriptor types or
3225 * buffer sizes through the use of multiple SRRCTL
3226 * registers, RDRXCTL.MVMEN must be set to 1
3227 *
3228 * also, the manual doesn't mention it clearly but DCA hints
3229 * will only use queue 0's tags unless this bit is set. Side
3230 * effects of setting this bit are only that SRRCTL must be
3231 * fully programmed [0..15]
3232 */
3233 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3234 break;
3235 case ixgbe_mac_82599EB:
b93a2226 3236 case ixgbe_mac_X540:
7367096a
AD
3237 /* Disable RSC for ACK packets */
3238 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3239 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3240 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3241 /* hardware requires some bits to be set by default */
3242 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3243 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3244 break;
3245 default:
3246 /* We should do nothing since we don't know this hardware */
3247 return;
3248 }
3249
3250 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3251}
3252
477de6ed
AD
3253/**
3254 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3255 * @adapter: board private structure
3256 *
3257 * Configure the Rx unit of the MAC after a reset.
3258 **/
3259static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3260{
3261 struct ixgbe_hw *hw = &adapter->hw;
477de6ed
AD
3262 int i;
3263 u32 rxctrl;
477de6ed
AD
3264
3265 /* disable receives while setting up the descriptors */
3266 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3267 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3268
3269 ixgbe_setup_psrtype(adapter);
7367096a 3270 ixgbe_setup_rdrxctl(adapter);
477de6ed 3271
9e10e045 3272 /* Program registers for the distribution of queues */
f5b4a52e 3273 ixgbe_setup_mrqc(adapter);
f5b4a52e 3274
9e10e045
AD
3275 ixgbe_set_uta(adapter);
3276
477de6ed
AD
3277 /* set_rx_buffer_len must be called before ring initialization */
3278 ixgbe_set_rx_buffer_len(adapter);
3279
3280 /*
3281 * Setup the HW Rx Head and Tail Descriptor Pointers and
3282 * the Base and Length of the Rx Descriptor Ring
3283 */
9e10e045
AD
3284 for (i = 0; i < adapter->num_rx_queues; i++)
3285 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
177db6ff 3286
9e10e045
AD
3287 /* disable drop enable for 82598 parts */
3288 if (hw->mac.type == ixgbe_mac_82598EB)
3289 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3290
3291 /* enable all receives */
3292 rxctrl |= IXGBE_RXCTRL_RXEN;
3293 hw->mac.ops.enable_rx_dma(hw, rxctrl);
9a799d71
AK
3294}
3295
068c89b0
DS
3296static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3297{
3298 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3299 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3300 int pool_ndx = adapter->num_vfs;
068c89b0
DS
3301
3302 /* add VID to filter table */
1ada1b1b 3303 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
f62bbb5e 3304 set_bit(vid, adapter->active_vlans);
068c89b0
DS
3305}
3306
3307static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3308{
3309 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3310 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3311 int pool_ndx = adapter->num_vfs;
068c89b0 3312
068c89b0 3313 /* remove VID from filter table */
1ada1b1b 3314 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
f62bbb5e 3315 clear_bit(vid, adapter->active_vlans);
068c89b0
DS
3316}
3317
5f6c0181
JB
3318/**
3319 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3320 * @adapter: driver data
3321 */
3322static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3323{
3324 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e
JG
3325 u32 vlnctrl;
3326
3327 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3328 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3329 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3330}
3331
3332/**
3333 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3334 * @adapter: driver data
3335 */
3336static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3337{
3338 struct ixgbe_hw *hw = &adapter->hw;
3339 u32 vlnctrl;
3340
3341 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3342 vlnctrl |= IXGBE_VLNCTRL_VFE;
3343 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3344 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3345}
3346
3347/**
3348 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3349 * @adapter: driver data
3350 */
3351static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3352{
3353 struct ixgbe_hw *hw = &adapter->hw;
3354 u32 vlnctrl;
5f6c0181
JB
3355 int i, j;
3356
3357 switch (hw->mac.type) {
3358 case ixgbe_mac_82598EB:
f62bbb5e
JG
3359 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3360 vlnctrl &= ~IXGBE_VLNCTRL_VME;
5f6c0181
JB
3361 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3362 break;
3363 case ixgbe_mac_82599EB:
b93a2226 3364 case ixgbe_mac_X540:
5f6c0181
JB
3365 for (i = 0; i < adapter->num_rx_queues; i++) {
3366 j = adapter->rx_ring[i]->reg_idx;
3367 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3368 vlnctrl &= ~IXGBE_RXDCTL_VME;
3369 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3370 }
3371 break;
3372 default:
3373 break;
3374 }
3375}
3376
3377/**
f62bbb5e 3378 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
5f6c0181
JB
3379 * @adapter: driver data
3380 */
f62bbb5e 3381static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
5f6c0181
JB
3382{
3383 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e 3384 u32 vlnctrl;
5f6c0181
JB
3385 int i, j;
3386
3387 switch (hw->mac.type) {
3388 case ixgbe_mac_82598EB:
f62bbb5e
JG
3389 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3390 vlnctrl |= IXGBE_VLNCTRL_VME;
5f6c0181
JB
3391 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3392 break;
3393 case ixgbe_mac_82599EB:
b93a2226 3394 case ixgbe_mac_X540:
5f6c0181
JB
3395 for (i = 0; i < adapter->num_rx_queues; i++) {
3396 j = adapter->rx_ring[i]->reg_idx;
3397 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3398 vlnctrl |= IXGBE_RXDCTL_VME;
3399 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3400 }
3401 break;
3402 default:
3403 break;
3404 }
3405}
3406
9a799d71
AK
3407static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3408{
f62bbb5e 3409 u16 vid;
9a799d71 3410
f62bbb5e
JG
3411 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3412
3413 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3414 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
9a799d71
AK
3415}
3416
2850062a
AD
3417/**
3418 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3419 * @netdev: network interface device structure
3420 *
3421 * Writes unicast address list to the RAR table.
3422 * Returns: -ENOMEM on failure/insufficient address space
3423 * 0 on no addresses written
3424 * X on writing X addresses to the RAR table
3425 **/
3426static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3427{
3428 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3429 struct ixgbe_hw *hw = &adapter->hw;
3430 unsigned int vfn = adapter->num_vfs;
3431 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3432 int count = 0;
3433
3434 /* return ENOMEM indicating insufficient memory for addresses */
3435 if (netdev_uc_count(netdev) > rar_entries)
3436 return -ENOMEM;
3437
3438 if (!netdev_uc_empty(netdev) && rar_entries) {
3439 struct netdev_hw_addr *ha;
3440 /* return error if we do not support writing to RAR table */
3441 if (!hw->mac.ops.set_rar)
3442 return -ENOMEM;
3443
3444 netdev_for_each_uc_addr(ha, netdev) {
3445 if (!rar_entries)
3446 break;
3447 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3448 vfn, IXGBE_RAH_AV);
3449 count++;
3450 }
3451 }
3452 /* write the addresses in reverse order to avoid write combining */
3453 for (; rar_entries > 0 ; rar_entries--)
3454 hw->mac.ops.clear_rar(hw, rar_entries);
3455
3456 return count;
3457}
3458
9a799d71 3459/**
2c5645cf 3460 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
9a799d71
AK
3461 * @netdev: network interface device structure
3462 *
2c5645cf
CL
3463 * The set_rx_method entry point is called whenever the unicast/multicast
3464 * address list or the network interface flags are updated. This routine is
3465 * responsible for configuring the hardware for proper unicast, multicast and
3466 * promiscuous mode.
9a799d71 3467 **/
7f870475 3468void ixgbe_set_rx_mode(struct net_device *netdev)
9a799d71
AK
3469{
3470 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3471 struct ixgbe_hw *hw = &adapter->hw;
2850062a
AD
3472 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3473 int count;
9a799d71
AK
3474
3475 /* Check for Promiscuous and All Multicast modes */
3476
3477 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3478
f5dc442b
AD
3479 /* set all bits that we expect to always be set */
3480 fctrl |= IXGBE_FCTRL_BAM;
3481 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3482 fctrl |= IXGBE_FCTRL_PMCF;
3483
2850062a
AD
3484 /* clear the bits we are changing the status of */
3485 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3486
9a799d71 3487 if (netdev->flags & IFF_PROMISC) {
e433ea1f 3488 hw->addr_ctrl.user_set_promisc = true;
9a799d71 3489 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2850062a 3490 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
5f6c0181
JB
3491 /* don't hardware filter vlans in promisc mode */
3492 ixgbe_vlan_filter_disable(adapter);
9a799d71 3493 } else {
746b9f02
PM
3494 if (netdev->flags & IFF_ALLMULTI) {
3495 fctrl |= IXGBE_FCTRL_MPE;
2850062a
AD
3496 vmolr |= IXGBE_VMOLR_MPE;
3497 } else {
3498 /*
3499 * Write addresses to the MTA, if the attempt fails
3500 * then we should just turn on promiscous mode so
3501 * that we can at least receive multicast traffic
3502 */
3503 hw->mac.ops.update_mc_addr_list(hw, netdev);
3504 vmolr |= IXGBE_VMOLR_ROMPE;
746b9f02 3505 }
5f6c0181 3506 ixgbe_vlan_filter_enable(adapter);
e433ea1f 3507 hw->addr_ctrl.user_set_promisc = false;
2850062a
AD
3508 /*
3509 * Write addresses to available RAR registers, if there is not
3510 * sufficient space to store all the addresses then enable
3511 * unicast promiscous mode
3512 */
3513 count = ixgbe_write_uc_addr_list(netdev);
3514 if (count < 0) {
3515 fctrl |= IXGBE_FCTRL_UPE;
3516 vmolr |= IXGBE_VMOLR_ROPE;
3517 }
9a799d71
AK
3518 }
3519
2850062a 3520 if (adapter->num_vfs) {
1cdd1ec8 3521 ixgbe_restore_vf_multicasts(adapter);
2850062a
AD
3522 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3523 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3524 IXGBE_VMOLR_ROPE);
3525 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3526 }
3527
3528 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
f62bbb5e
JG
3529
3530 if (netdev->features & NETIF_F_HW_VLAN_RX)
3531 ixgbe_vlan_strip_enable(adapter);
3532 else
3533 ixgbe_vlan_strip_disable(adapter);
9a799d71
AK
3534}
3535
021230d4
AV
3536static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3537{
3538 int q_idx;
3539 struct ixgbe_q_vector *q_vector;
3540 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3541
3542 /* legacy and MSI only use one vector */
3543 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3544 q_vectors = 1;
3545
3546 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
f0848276 3547 struct napi_struct *napi;
7a921c93 3548 q_vector = adapter->q_vector[q_idx];
f0848276 3549 napi = &q_vector->napi;
91281fd3
AD
3550 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3551 if (!q_vector->rxr_count || !q_vector->txr_count) {
3552 if (q_vector->txr_count == 1)
3553 napi->poll = &ixgbe_clean_txonly;
3554 else if (q_vector->rxr_count == 1)
3555 napi->poll = &ixgbe_clean_rxonly;
3556 }
3557 }
f0848276
JB
3558
3559 napi_enable(napi);
021230d4
AV
3560 }
3561}
3562
3563static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3564{
3565 int q_idx;
3566 struct ixgbe_q_vector *q_vector;
3567 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3568
3569 /* legacy and MSI only use one vector */
3570 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3571 q_vectors = 1;
3572
3573 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7a921c93 3574 q_vector = adapter->q_vector[q_idx];
021230d4
AV
3575 napi_disable(&q_vector->napi);
3576 }
3577}
3578
7a6b6f51 3579#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3580/*
3581 * ixgbe_configure_dcb - Configure DCB hardware
3582 * @adapter: ixgbe adapter struct
3583 *
3584 * This is called by the driver on open to configure the DCB hardware.
3585 * This is also called by the gennetlink interface when reconfiguring
3586 * the DCB state.
3587 */
3588static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3589{
3590 struct ixgbe_hw *hw = &adapter->hw;
9806307a 3591 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2f90b865 3592
67ebd791
AD
3593 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3594 if (hw->mac.type == ixgbe_mac_82598EB)
3595 netif_set_gso_max_size(adapter->netdev, 65536);
3596 return;
3597 }
3598
3599 if (hw->mac.type == ixgbe_mac_82598EB)
3600 netif_set_gso_max_size(adapter->netdev, 32768);
3601
9806307a
JF
3602#ifdef CONFIG_FCOE
3603 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3604 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3605#endif
3606
80ab193d 3607 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
9806307a 3608 DCB_TX_CONFIG);
80ab193d 3609 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
9806307a 3610 DCB_RX_CONFIG);
2f90b865 3611
2f90b865 3612 /* Enable VLAN tag insert/strip */
f62bbb5e 3613 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
5f6c0181 3614
2f90b865 3615 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
01fa7d90
AD
3616
3617 /* reconfigure the hardware */
3618 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
2f90b865
AD
3619}
3620
3621#endif
9a799d71
AK
3622static void ixgbe_configure(struct ixgbe_adapter *adapter)
3623{
3624 struct net_device *netdev = adapter->netdev;
c4cf55e5 3625 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
3626 int i;
3627
7a6b6f51 3628#ifdef CONFIG_IXGBE_DCB
67ebd791 3629 ixgbe_configure_dcb(adapter);
2f90b865 3630#endif
9a799d71 3631
f62bbb5e
JG
3632 ixgbe_set_rx_mode(netdev);
3633 ixgbe_restore_vlan(adapter);
3634
eacd73f7
YZ
3635#ifdef IXGBE_FCOE
3636 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3637 ixgbe_configure_fcoe(adapter);
3638
3639#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
3640 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3641 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 3642 adapter->tx_ring[i]->atr_sample_rate =
e8e9f696 3643 adapter->atr_sample_rate;
c4cf55e5
PWJ
3644 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3645 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3646 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3647 }
933d41f1 3648 ixgbe_configure_virtualization(adapter);
c4cf55e5 3649
9a799d71
AK
3650 ixgbe_configure_tx(adapter);
3651 ixgbe_configure_rx(adapter);
9a799d71
AK
3652}
3653
e8e26350
PW
3654static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3655{
3656 switch (hw->phy.type) {
3657 case ixgbe_phy_sfp_avago:
3658 case ixgbe_phy_sfp_ftl:
3659 case ixgbe_phy_sfp_intel:
3660 case ixgbe_phy_sfp_unknown:
ea0a04df
DS
3661 case ixgbe_phy_sfp_passive_tyco:
3662 case ixgbe_phy_sfp_passive_unknown:
3663 case ixgbe_phy_sfp_active_unknown:
3664 case ixgbe_phy_sfp_ftl_active:
e8e26350
PW
3665 return true;
3666 default:
3667 return false;
3668 }
3669}
3670
0ecc061d 3671/**
e8e26350
PW
3672 * ixgbe_sfp_link_config - set up SFP+ link
3673 * @adapter: pointer to private adapter struct
3674 **/
3675static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3676{
3677 struct ixgbe_hw *hw = &adapter->hw;
3678
3679 if (hw->phy.multispeed_fiber) {
3680 /*
3681 * In multispeed fiber setups, the device may not have
3682 * had a physical connection when the driver loaded.
3683 * If that's the case, the initial link configuration
3684 * couldn't get the MAC into 10G or 1G mode, so we'll
3685 * never have a link status change interrupt fire.
3686 * We need to try and force an autonegotiation
3687 * session, then bring up link.
3688 */
3689 hw->mac.ops.setup_sfp(hw);
3690 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3691 schedule_work(&adapter->multispeed_fiber_task);
3692 } else {
3693 /*
3694 * Direct Attach Cu and non-multispeed fiber modules
3695 * still need to be configured properly prior to
3696 * attempting link.
3697 */
3698 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3699 schedule_work(&adapter->sfp_config_module_task);
3700 }
3701}
3702
3703/**
3704 * ixgbe_non_sfp_link_config - set up non-SFP+ link
0ecc061d
PWJ
3705 * @hw: pointer to private hardware struct
3706 *
3707 * Returns 0 on success, negative on failure
3708 **/
e8e26350 3709static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
0ecc061d
PWJ
3710{
3711 u32 autoneg;
8620a103 3712 bool negotiation, link_up = false;
0ecc061d
PWJ
3713 u32 ret = IXGBE_ERR_LINK_SETUP;
3714
3715 if (hw->mac.ops.check_link)
3716 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3717
3718 if (ret)
3719 goto link_cfg_out;
3720
3721 if (hw->mac.ops.get_link_capabilities)
e8e9f696
JP
3722 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3723 &negotiation);
0ecc061d
PWJ
3724 if (ret)
3725 goto link_cfg_out;
3726
8620a103
MC
3727 if (hw->mac.ops.setup_link)
3728 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
0ecc061d
PWJ
3729link_cfg_out:
3730 return ret;
3731}
3732
a34bcfff 3733static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
9a799d71 3734{
9a799d71 3735 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3736 u32 gpie = 0;
9a799d71 3737
9b471446 3738 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
a34bcfff
AD
3739 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3740 IXGBE_GPIE_OCD;
3741 gpie |= IXGBE_GPIE_EIAME;
9b471446
JB
3742 /*
3743 * use EIAM to auto-mask when MSI-X interrupt is asserted
3744 * this saves a register write for every interrupt
3745 */
3746 switch (hw->mac.type) {
3747 case ixgbe_mac_82598EB:
3748 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3749 break;
9b471446 3750 case ixgbe_mac_82599EB:
b93a2226
DS
3751 case ixgbe_mac_X540:
3752 default:
9b471446
JB
3753 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3754 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3755 break;
3756 }
3757 } else {
021230d4
AV
3758 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3759 * specifically only auto mask tx and rx interrupts */
3760 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3761 }
9a799d71 3762
a34bcfff
AD
3763 /* XXX: to interrupt immediately for EICS writes, enable this */
3764 /* gpie |= IXGBE_GPIE_EIMEN; */
3765
3766 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3767 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3768 gpie |= IXGBE_GPIE_VTMODE_64;
119fc60a
MC
3769 }
3770
a34bcfff
AD
3771 /* Enable fan failure interrupt */
3772 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
0befdb3e 3773 gpie |= IXGBE_SDP1_GPIEN;
0befdb3e 3774
a34bcfff 3775 if (hw->mac.type == ixgbe_mac_82599EB)
e8e26350
PW
3776 gpie |= IXGBE_SDP1_GPIEN;
3777 gpie |= IXGBE_SDP2_GPIEN;
a34bcfff
AD
3778
3779 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3780}
3781
3782static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3783{
3784 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3785 int err;
a34bcfff
AD
3786 u32 ctrl_ext;
3787
3788 ixgbe_get_hw_control(adapter);
3789 ixgbe_setup_gpie(adapter);
e8e26350 3790
9a799d71
AK
3791 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3792 ixgbe_configure_msix(adapter);
3793 else
3794 ixgbe_configure_msi_and_legacy(adapter);
3795
61fac744 3796 /* enable the optics */
e3de4b7b 3797 if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser)
61fac744
PW
3798 hw->mac.ops.enable_tx_laser(hw);
3799
9a799d71 3800 clear_bit(__IXGBE_DOWN, &adapter->state);
021230d4
AV
3801 ixgbe_napi_enable_all(adapter);
3802
73c4b7cd
AD
3803 if (ixgbe_is_sfp(hw)) {
3804 ixgbe_sfp_link_config(adapter);
3805 } else {
3806 err = ixgbe_non_sfp_link_config(hw);
3807 if (err)
3808 e_err(probe, "link_config FAILED %d\n", err);
3809 }
3810
021230d4
AV
3811 /* clear any pending interrupts, may auto mask */
3812 IXGBE_READ_REG(hw, IXGBE_EICR);
6af3b9eb 3813 ixgbe_irq_enable(adapter, true, true);
9a799d71 3814
bf069c97
DS
3815 /*
3816 * If this adapter has a fan, check to see if we had a failure
3817 * before we enabled the interrupt.
3818 */
3819 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3820 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3821 if (esdp & IXGBE_ESDP_SDP1)
396e799c 3822 e_crit(drv, "Fan has stopped, replace the adapter\n");
bf069c97
DS
3823 }
3824
e8e26350
PW
3825 /*
3826 * For hot-pluggable SFP+ devices, a new SFP+ module may have
19343de2
DS
3827 * arrived before interrupts were enabled but after probe. Such
3828 * devices wouldn't have their type identified yet. We need to
3829 * kick off the SFP+ module setup first, then try to bring up link.
e8e26350
PW
3830 * If we're not hot-pluggable SFP+, we just need to configure link
3831 * and bring it up.
3832 */
73c4b7cd
AD
3833 if (hw->phy.type == ixgbe_phy_unknown)
3834 schedule_work(&adapter->sfp_config_module_task);
0ecc061d 3835
1da100bb 3836 /* enable transmits */
477de6ed 3837 netif_tx_start_all_queues(adapter->netdev);
1da100bb 3838
9a799d71
AK
3839 /* bring the link up in the watchdog, this could race with our first
3840 * link up interrupt but shouldn't be a problem */
cf8280ee
JB
3841 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3842 adapter->link_check_timeout = jiffies;
9a799d71 3843 mod_timer(&adapter->watchdog_timer, jiffies);
c9205697
GR
3844
3845 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3846 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3847 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3848 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3849
9a799d71
AK
3850 return 0;
3851}
3852
d4f80882
AV
3853void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3854{
3855 WARN_ON(in_interrupt());
3856 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3857 msleep(1);
3858 ixgbe_down(adapter);
5809a1ae
GR
3859 /*
3860 * If SR-IOV enabled then wait a bit before bringing the adapter
3861 * back up to give the VFs time to respond to the reset. The
3862 * two second wait is based upon the watchdog timer cycle in
3863 * the VF driver.
3864 */
3865 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3866 msleep(2000);
d4f80882
AV
3867 ixgbe_up(adapter);
3868 clear_bit(__IXGBE_RESETTING, &adapter->state);
3869}
3870
9a799d71
AK
3871int ixgbe_up(struct ixgbe_adapter *adapter)
3872{
3873 /* hardware has been reset, we need to reload some things */
3874 ixgbe_configure(adapter);
3875
3876 return ixgbe_up_complete(adapter);
3877}
3878
3879void ixgbe_reset(struct ixgbe_adapter *adapter)
3880{
c44ade9e 3881 struct ixgbe_hw *hw = &adapter->hw;
8ca783ab
DS
3882 int err;
3883
3884 err = hw->mac.ops.init_hw(hw);
da4dd0f7
PWJ
3885 switch (err) {
3886 case 0:
3887 case IXGBE_ERR_SFP_NOT_PRESENT:
3888 break;
3889 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
849c4542 3890 e_dev_err("master disable timed out\n");
da4dd0f7 3891 break;
794caeb2
PWJ
3892 case IXGBE_ERR_EEPROM_VERSION:
3893 /* We are running on a pre-production device, log a warning */
849c4542
ET
3894 e_dev_warn("This device is a pre-production adapter/LOM. "
3895 "Please be aware there may be issuesassociated with "
3896 "your hardware. If you are experiencing problems "
3897 "please contact your Intel or hardware "
3898 "representative who provided you with this "
3899 "hardware.\n");
794caeb2 3900 break;
da4dd0f7 3901 default:
849c4542 3902 e_dev_err("Hardware Error: %d\n", err);
da4dd0f7 3903 }
9a799d71
AK
3904
3905 /* reprogram the RAR[0] in case user changed it. */
1cdd1ec8
GR
3906 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3907 IXGBE_RAH_AV);
9a799d71
AK
3908}
3909
9a799d71
AK
3910/**
3911 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
9a799d71
AK
3912 * @rx_ring: ring to free buffers from
3913 **/
b6ec895e 3914static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
9a799d71 3915{
b6ec895e 3916 struct device *dev = rx_ring->dev;
9a799d71 3917 unsigned long size;
b6ec895e 3918 u16 i;
9a799d71 3919
84418e3b
AD
3920 /* ring already cleared, nothing to do */
3921 if (!rx_ring->rx_buffer_info)
3922 return;
9a799d71 3923
84418e3b 3924 /* Free all the Rx ring sk_buffs */
9a799d71
AK
3925 for (i = 0; i < rx_ring->count; i++) {
3926 struct ixgbe_rx_buffer *rx_buffer_info;
3927
3928 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3929 if (rx_buffer_info->dma) {
b6ec895e 3930 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
e8e9f696 3931 rx_ring->rx_buf_len,
1b507730 3932 DMA_FROM_DEVICE);
9a799d71
AK
3933 rx_buffer_info->dma = 0;
3934 }
3935 if (rx_buffer_info->skb) {
f8212f97 3936 struct sk_buff *skb = rx_buffer_info->skb;
9a799d71 3937 rx_buffer_info->skb = NULL;
f8212f97
AD
3938 do {
3939 struct sk_buff *this = skb;
e8171aaa 3940 if (IXGBE_RSC_CB(this)->delay_unmap) {
b6ec895e 3941 dma_unmap_single(dev,
1b507730 3942 IXGBE_RSC_CB(this)->dma,
e8e9f696 3943 rx_ring->rx_buf_len,
1b507730 3944 DMA_FROM_DEVICE);
fd3686a8 3945 IXGBE_RSC_CB(this)->dma = 0;
e8171aaa 3946 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 3947 }
f8212f97
AD
3948 skb = skb->prev;
3949 dev_kfree_skb(this);
3950 } while (skb);
9a799d71
AK
3951 }
3952 if (!rx_buffer_info->page)
3953 continue;
4f57ca6e 3954 if (rx_buffer_info->page_dma) {
b6ec895e 3955 dma_unmap_page(dev, rx_buffer_info->page_dma,
1b507730 3956 PAGE_SIZE / 2, DMA_FROM_DEVICE);
4f57ca6e
JB
3957 rx_buffer_info->page_dma = 0;
3958 }
9a799d71
AK
3959 put_page(rx_buffer_info->page);
3960 rx_buffer_info->page = NULL;
762f4c57 3961 rx_buffer_info->page_offset = 0;
9a799d71
AK
3962 }
3963
3964 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3965 memset(rx_ring->rx_buffer_info, 0, size);
3966
3967 /* Zero out the descriptor ring */
3968 memset(rx_ring->desc, 0, rx_ring->size);
3969
3970 rx_ring->next_to_clean = 0;
3971 rx_ring->next_to_use = 0;
9a799d71
AK
3972}
3973
3974/**
3975 * ixgbe_clean_tx_ring - Free Tx Buffers
9a799d71
AK
3976 * @tx_ring: ring to be cleaned
3977 **/
b6ec895e 3978static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
9a799d71
AK
3979{
3980 struct ixgbe_tx_buffer *tx_buffer_info;
3981 unsigned long size;
b6ec895e 3982 u16 i;
9a799d71 3983
84418e3b
AD
3984 /* ring already cleared, nothing to do */
3985 if (!tx_ring->tx_buffer_info)
3986 return;
9a799d71 3987
84418e3b 3988 /* Free all the Tx ring sk_buffs */
9a799d71
AK
3989 for (i = 0; i < tx_ring->count; i++) {
3990 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 3991 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
9a799d71
AK
3992 }
3993
3994 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3995 memset(tx_ring->tx_buffer_info, 0, size);
3996
3997 /* Zero out the descriptor ring */
3998 memset(tx_ring->desc, 0, tx_ring->size);
3999
4000 tx_ring->next_to_use = 0;
4001 tx_ring->next_to_clean = 0;
9a799d71
AK
4002}
4003
4004/**
021230d4 4005 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
9a799d71
AK
4006 * @adapter: board private structure
4007 **/
021230d4 4008static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
4009{
4010 int i;
4011
021230d4 4012 for (i = 0; i < adapter->num_rx_queues; i++)
b6ec895e 4013 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
9a799d71
AK
4014}
4015
4016/**
021230d4 4017 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
9a799d71
AK
4018 * @adapter: board private structure
4019 **/
021230d4 4020static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
4021{
4022 int i;
4023
021230d4 4024 for (i = 0; i < adapter->num_tx_queues; i++)
b6ec895e 4025 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
9a799d71
AK
4026}
4027
4028void ixgbe_down(struct ixgbe_adapter *adapter)
4029{
4030 struct net_device *netdev = adapter->netdev;
7f821875 4031 struct ixgbe_hw *hw = &adapter->hw;
9a799d71 4032 u32 rxctrl;
7f821875 4033 u32 txdctl;
bf29ee6c 4034 int i;
b25ebfd2 4035 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71
AK
4036
4037 /* signal that we are down to the interrupt handler */
4038 set_bit(__IXGBE_DOWN, &adapter->state);
4039
767081ad
GR
4040 /* disable receive for all VFs and wait one second */
4041 if (adapter->num_vfs) {
767081ad
GR
4042 /* ping all the active vfs to let them know we are going down */
4043 ixgbe_ping_all_vfs(adapter);
581d1aa7 4044
767081ad
GR
4045 /* Disable all VFTE/VFRE TX/RX */
4046 ixgbe_disable_tx_rx(adapter);
581d1aa7
GR
4047
4048 /* Mark all the VFs as inactive */
4049 for (i = 0 ; i < adapter->num_vfs; i++)
4050 adapter->vfinfo[i].clear_to_send = 0;
767081ad
GR
4051 }
4052
9a799d71 4053 /* disable receives */
7f821875
JB
4054 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4055 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
9a799d71 4056
7f821875 4057 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
4058 msleep(10);
4059
7f821875
JB
4060 netif_tx_stop_all_queues(netdev);
4061
0a1f87cb
DS
4062 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4063 del_timer_sync(&adapter->sfp_timer);
9a799d71 4064 del_timer_sync(&adapter->watchdog_timer);
cf8280ee 4065 cancel_work_sync(&adapter->watchdog_task);
9a799d71 4066
c0dfb90e
JF
4067 netif_carrier_off(netdev);
4068 netif_tx_disable(netdev);
4069
4070 ixgbe_irq_disable(adapter);
4071
4072 ixgbe_napi_disable_all(adapter);
4073
b25ebfd2
PW
4074 /* Cleanup the affinity_hint CPU mask memory and callback */
4075 for (i = 0; i < num_q_vectors; i++) {
4076 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
4077 /* clear the affinity_mask in the IRQ descriptor */
4078 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
4079 /* release the CPU mask memory */
4080 free_cpumask_var(q_vector->affinity_mask);
4081 }
4082
c4cf55e5
PWJ
4083 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4084 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4085 cancel_work_sync(&adapter->fdir_reinit_task);
4086
119fc60a
MC
4087 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
4088 cancel_work_sync(&adapter->check_overtemp_task);
4089
7f821875
JB
4090 /* disable transmits in the hardware now that interrupts are off */
4091 for (i = 0; i < adapter->num_tx_queues; i++) {
bf29ee6c
AD
4092 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4093 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
4094 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
e8e9f696 4095 (txdctl & ~IXGBE_TXDCTL_ENABLE));
7f821875 4096 }
88512539 4097 /* Disable the Tx DMA engine on 82599 */
bd508178
AD
4098 switch (hw->mac.type) {
4099 case ixgbe_mac_82599EB:
b93a2226 4100 case ixgbe_mac_X540:
88512539 4101 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
e8e9f696
JP
4102 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4103 ~IXGBE_DMATXCTL_TE));
bd508178
AD
4104 break;
4105 default:
4106 break;
4107 }
7f821875 4108
9f756f01 4109 /* power down the optics */
e3de4b7b 4110 if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
9f756f01
JF
4111 hw->mac.ops.disable_tx_laser(hw);
4112
9a713e7c
PW
4113 /* clear n-tuple filters that are cached */
4114 ethtool_ntuple_flush(netdev);
4115
6f4a0e45
PL
4116 if (!pci_channel_offline(adapter->pdev))
4117 ixgbe_reset(adapter);
9a799d71
AK
4118 ixgbe_clean_all_tx_rings(adapter);
4119 ixgbe_clean_all_rx_rings(adapter);
4120
5dd2d332 4121#ifdef CONFIG_IXGBE_DCA
96b0e0f6 4122 /* since we reset the hardware DCA settings were cleared */
e35ec126 4123 ixgbe_setup_dca(adapter);
96b0e0f6 4124#endif
9a799d71
AK
4125}
4126
9a799d71 4127/**
021230d4
AV
4128 * ixgbe_poll - NAPI Rx polling callback
4129 * @napi: structure for representing this polling device
4130 * @budget: how many packets driver is allowed to clean
4131 *
4132 * This function is used for legacy and MSI, NAPI mode
9a799d71 4133 **/
021230d4 4134static int ixgbe_poll(struct napi_struct *napi, int budget)
9a799d71 4135{
9a1a69ad 4136 struct ixgbe_q_vector *q_vector =
e8e9f696 4137 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 4138 struct ixgbe_adapter *adapter = q_vector->adapter;
9a1a69ad 4139 int tx_clean_complete, work_done = 0;
9a799d71 4140
5dd2d332 4141#ifdef CONFIG_IXGBE_DCA
33cf09c9
AD
4142 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4143 ixgbe_update_dca(q_vector);
bd0362dd
JC
4144#endif
4145
4a0b9ca0
PW
4146 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
4147 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
9a799d71 4148
9a1a69ad 4149 if (!tx_clean_complete)
d2c7ddd6
DM
4150 work_done = budget;
4151
53e52c72
DM
4152 /* If budget not fully consumed, exit the polling mode */
4153 if (work_done < budget) {
288379f0 4154 napi_complete(napi);
f7554a2b 4155 if (adapter->rx_itr_setting & 1)
f494e8fa 4156 ixgbe_set_itr(adapter);
d4f80882 4157 if (!test_bit(__IXGBE_DOWN, &adapter->state))
835462fc 4158 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
9a799d71 4159 }
9a799d71
AK
4160 return work_done;
4161}
4162
4163/**
4164 * ixgbe_tx_timeout - Respond to a Tx Hang
4165 * @netdev: network interface device structure
4166 **/
4167static void ixgbe_tx_timeout(struct net_device *netdev)
4168{
4169 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4170
c84d324c
JF
4171 adapter->tx_timeout_count++;
4172
9a799d71
AK
4173 /* Do the reset outside of interrupt context */
4174 schedule_work(&adapter->reset_task);
4175}
4176
4177static void ixgbe_reset_task(struct work_struct *work)
4178{
4179 struct ixgbe_adapter *adapter;
4180 adapter = container_of(work, struct ixgbe_adapter, reset_task);
4181
2f90b865
AD
4182 /* If we're already down or resetting, just bail */
4183 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
4184 test_bit(__IXGBE_RESETTING, &adapter->state))
4185 return;
4186
dcd79aeb
TI
4187 ixgbe_dump(adapter);
4188 netdev_err(adapter->netdev, "Reset adapter\n");
d4f80882 4189 ixgbe_reinit_locked(adapter);
9a799d71
AK
4190}
4191
bc97114d
PWJ
4192#ifdef CONFIG_IXGBE_DCB
4193static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
b9804972 4194{
bc97114d 4195 bool ret = false;
0cefafad 4196 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
b9804972 4197
0cefafad
JB
4198 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4199 return ret;
4200
4201 f->mask = 0x7 << 3;
4202 adapter->num_rx_queues = f->indices;
4203 adapter->num_tx_queues = f->indices;
4204 ret = true;
2f90b865 4205
bc97114d
PWJ
4206 return ret;
4207}
4208#endif
4209
4df10466
JB
4210/**
4211 * ixgbe_set_rss_queues: Allocate queues for RSS
4212 * @adapter: board private structure to initialize
4213 *
4214 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
4215 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
4216 *
4217 **/
bc97114d
PWJ
4218static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
4219{
4220 bool ret = false;
0cefafad 4221 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
bc97114d
PWJ
4222
4223 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
0cefafad
JB
4224 f->mask = 0xF;
4225 adapter->num_rx_queues = f->indices;
4226 adapter->num_tx_queues = f->indices;
bc97114d
PWJ
4227 ret = true;
4228 } else {
bc97114d 4229 ret = false;
b9804972
JB
4230 }
4231
bc97114d
PWJ
4232 return ret;
4233}
4234
c4cf55e5
PWJ
4235/**
4236 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4237 * @adapter: board private structure to initialize
4238 *
4239 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4240 * to the original CPU that initiated the Tx session. This runs in addition
4241 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4242 * Rx load across CPUs using RSS.
4243 *
4244 **/
e8e9f696 4245static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
4246{
4247 bool ret = false;
4248 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4249
4250 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4251 f_fdir->mask = 0;
4252
4253 /* Flow Director must have RSS enabled */
4254 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4255 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4256 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4257 adapter->num_tx_queues = f_fdir->indices;
4258 adapter->num_rx_queues = f_fdir->indices;
4259 ret = true;
4260 } else {
4261 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4262 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4263 }
4264 return ret;
4265}
4266
0331a832
YZ
4267#ifdef IXGBE_FCOE
4268/**
4269 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4270 * @adapter: board private structure to initialize
4271 *
4272 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4273 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4274 * rx queues out of the max number of rx queues, instead, it is used as the
4275 * index of the first rx queue used by FCoE.
4276 *
4277 **/
4278static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4279{
4280 bool ret = false;
4281 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4282
4283 f->indices = min((int)num_online_cpus(), f->indices);
4284 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
8de8b2e6
YZ
4285 adapter->num_rx_queues = 1;
4286 adapter->num_tx_queues = 1;
0331a832
YZ
4287#ifdef CONFIG_IXGBE_DCB
4288 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
396e799c 4289 e_info(probe, "FCoE enabled with DCB\n");
0331a832
YZ
4290 ixgbe_set_dcb_queues(adapter);
4291 }
4292#endif
4293 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
396e799c 4294 e_info(probe, "FCoE enabled with RSS\n");
8faa2a78
YZ
4295 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4296 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4297 ixgbe_set_fdir_queues(adapter);
4298 else
4299 ixgbe_set_rss_queues(adapter);
0331a832
YZ
4300 }
4301 /* adding FCoE rx rings to the end */
4302 f->mask = adapter->num_rx_queues;
4303 adapter->num_rx_queues += f->indices;
8de8b2e6 4304 adapter->num_tx_queues += f->indices;
0331a832
YZ
4305
4306 ret = true;
4307 }
4308
4309 return ret;
4310}
4311
4312#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4313/**
4314 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4315 * @adapter: board private structure to initialize
4316 *
4317 * IOV doesn't actually use anything, so just NAK the
4318 * request for now and let the other queue routines
4319 * figure out what to do.
4320 */
4321static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4322{
4323 return false;
4324}
4325
4df10466
JB
4326/*
4327 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4328 * @adapter: board private structure to initialize
4329 *
4330 * This is the top level queue allocation routine. The order here is very
4331 * important, starting with the "most" number of features turned on at once,
4332 * and ending with the smallest set of features. This way large combinations
4333 * can be allocated if they're turned on, and smaller combinations are the
4334 * fallthrough conditions.
4335 *
4336 **/
847f53ff 4337static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
bc97114d 4338{
1cdd1ec8
GR
4339 /* Start with base case */
4340 adapter->num_rx_queues = 1;
4341 adapter->num_tx_queues = 1;
4342 adapter->num_rx_pools = adapter->num_rx_queues;
4343 adapter->num_rx_queues_per_pool = 1;
4344
4345 if (ixgbe_set_sriov_queues(adapter))
847f53ff 4346 goto done;
1cdd1ec8 4347
0331a832
YZ
4348#ifdef IXGBE_FCOE
4349 if (ixgbe_set_fcoe_queues(adapter))
4350 goto done;
4351
4352#endif /* IXGBE_FCOE */
bc97114d
PWJ
4353#ifdef CONFIG_IXGBE_DCB
4354 if (ixgbe_set_dcb_queues(adapter))
af22ab1b 4355 goto done;
bc97114d
PWJ
4356
4357#endif
c4cf55e5
PWJ
4358 if (ixgbe_set_fdir_queues(adapter))
4359 goto done;
4360
bc97114d 4361 if (ixgbe_set_rss_queues(adapter))
af22ab1b
WF
4362 goto done;
4363
4364 /* fallback to base case */
4365 adapter->num_rx_queues = 1;
4366 adapter->num_tx_queues = 1;
4367
4368done:
847f53ff 4369 /* Notify the stack of the (possibly) reduced queue counts. */
f0796d5c 4370 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
847f53ff
BH
4371 return netif_set_real_num_rx_queues(adapter->netdev,
4372 adapter->num_rx_queues);
b9804972
JB
4373}
4374
021230d4 4375static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
e8e9f696 4376 int vectors)
021230d4
AV
4377{
4378 int err, vector_threshold;
4379
4380 /* We'll want at least 3 (vector_threshold):
4381 * 1) TxQ[0] Cleanup
4382 * 2) RxQ[0] Cleanup
4383 * 3) Other (Link Status Change, etc.)
4384 * 4) TCP Timer (optional)
4385 */
4386 vector_threshold = MIN_MSIX_COUNT;
4387
4388 /* The more we get, the more we will assign to Tx/Rx Cleanup
4389 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4390 * Right now, we simply care about how many we'll get; we'll
4391 * set them up later while requesting irq's.
4392 */
4393 while (vectors >= vector_threshold) {
4394 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
e8e9f696 4395 vectors);
021230d4
AV
4396 if (!err) /* Success in acquiring all requested vectors. */
4397 break;
4398 else if (err < 0)
4399 vectors = 0; /* Nasty failure, quit now */
4400 else /* err == number of vectors we should try again with */
4401 vectors = err;
4402 }
4403
4404 if (vectors < vector_threshold) {
4405 /* Can't allocate enough MSI-X interrupts? Oh well.
4406 * This just means we'll go with either a single MSI
4407 * vector or fall back to legacy interrupts.
4408 */
849c4542
ET
4409 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4410 "Unable to allocate MSI-X interrupts\n");
021230d4
AV
4411 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4412 kfree(adapter->msix_entries);
4413 adapter->msix_entries = NULL;
021230d4
AV
4414 } else {
4415 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
eb7f139c
PWJ
4416 /*
4417 * Adjust for only the vectors we'll use, which is minimum
4418 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4419 * vectors we were allocated.
4420 */
4421 adapter->num_msix_vectors = min(vectors,
e8e9f696 4422 adapter->max_msix_q_vectors + NON_Q_VECTORS);
021230d4
AV
4423 }
4424}
4425
021230d4 4426/**
bc97114d 4427 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
021230d4
AV
4428 * @adapter: board private structure to initialize
4429 *
bc97114d
PWJ
4430 * Cache the descriptor ring offsets for RSS to the assigned rings.
4431 *
021230d4 4432 **/
bc97114d 4433static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
021230d4 4434{
bc97114d 4435 int i;
bc97114d 4436
9d6b758f
AD
4437 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4438 return false;
bc97114d 4439
9d6b758f
AD
4440 for (i = 0; i < adapter->num_rx_queues; i++)
4441 adapter->rx_ring[i]->reg_idx = i;
4442 for (i = 0; i < adapter->num_tx_queues; i++)
4443 adapter->tx_ring[i]->reg_idx = i;
4444
4445 return true;
bc97114d
PWJ
4446}
4447
4448#ifdef CONFIG_IXGBE_DCB
4449/**
4450 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4451 * @adapter: board private structure to initialize
4452 *
4453 * Cache the descriptor ring offsets for DCB to the assigned rings.
4454 *
4455 **/
4456static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4457{
4458 int i;
4459 bool ret = false;
4460 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4461
bd508178
AD
4462 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4463 return false;
f92ef202 4464
bd508178
AD
4465 /* the number of queues is assumed to be symmetric */
4466 switch (adapter->hw.mac.type) {
4467 case ixgbe_mac_82598EB:
4468 for (i = 0; i < dcb_i; i++) {
4469 adapter->rx_ring[i]->reg_idx = i << 3;
4470 adapter->tx_ring[i]->reg_idx = i << 2;
4471 }
4472 ret = true;
4473 break;
4474 case ixgbe_mac_82599EB:
b93a2226 4475 case ixgbe_mac_X540:
bd508178
AD
4476 if (dcb_i == 8) {
4477 /*
4478 * Tx TC0 starts at: descriptor queue 0
4479 * Tx TC1 starts at: descriptor queue 32
4480 * Tx TC2 starts at: descriptor queue 64
4481 * Tx TC3 starts at: descriptor queue 80
4482 * Tx TC4 starts at: descriptor queue 96
4483 * Tx TC5 starts at: descriptor queue 104
4484 * Tx TC6 starts at: descriptor queue 112
4485 * Tx TC7 starts at: descriptor queue 120
4486 *
4487 * Rx TC0-TC7 are offset by 16 queues each
4488 */
4489 for (i = 0; i < 3; i++) {
4490 adapter->tx_ring[i]->reg_idx = i << 5;
4491 adapter->rx_ring[i]->reg_idx = i << 4;
e8e26350 4492 }
bd508178
AD
4493 for ( ; i < 5; i++) {
4494 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4495 adapter->rx_ring[i]->reg_idx = i << 4;
4496 }
4497 for ( ; i < dcb_i; i++) {
4498 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4499 adapter->rx_ring[i]->reg_idx = i << 4;
4500 }
4501 ret = true;
4502 } else if (dcb_i == 4) {
4503 /*
4504 * Tx TC0 starts at: descriptor queue 0
4505 * Tx TC1 starts at: descriptor queue 64
4506 * Tx TC2 starts at: descriptor queue 96
4507 * Tx TC3 starts at: descriptor queue 112
4508 *
4509 * Rx TC0-TC3 are offset by 32 queues each
4510 */
4511 adapter->tx_ring[0]->reg_idx = 0;
4512 adapter->tx_ring[1]->reg_idx = 64;
4513 adapter->tx_ring[2]->reg_idx = 96;
4514 adapter->tx_ring[3]->reg_idx = 112;
4515 for (i = 0 ; i < dcb_i; i++)
4516 adapter->rx_ring[i]->reg_idx = i << 5;
4517 ret = true;
021230d4 4518 }
bd508178
AD
4519 break;
4520 default:
4521 break;
021230d4 4522 }
bc97114d
PWJ
4523 return ret;
4524}
4525#endif
4526
c4cf55e5
PWJ
4527/**
4528 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4529 * @adapter: board private structure to initialize
4530 *
4531 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4532 *
4533 **/
e8e9f696 4534static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
4535{
4536 int i;
4537 bool ret = false;
4538
4539 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4540 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4541 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4542 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4543 adapter->rx_ring[i]->reg_idx = i;
c4cf55e5 4544 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4545 adapter->tx_ring[i]->reg_idx = i;
c4cf55e5
PWJ
4546 ret = true;
4547 }
4548
4549 return ret;
4550}
4551
0331a832
YZ
4552#ifdef IXGBE_FCOE
4553/**
4554 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4555 * @adapter: board private structure to initialize
4556 *
4557 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4558 *
4559 */
4560static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4561{
0331a832 4562 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
bf29ee6c
AD
4563 int i;
4564 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4565
4566 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4567 return false;
0331a832 4568
0331a832 4569#ifdef CONFIG_IXGBE_DCB
bf29ee6c
AD
4570 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4571 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
8de8b2e6 4572
bf29ee6c
AD
4573 ixgbe_cache_ring_dcb(adapter);
4574 /* find out queues in TC for FCoE */
4575 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4576 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4577 /*
4578 * In 82599, the number of Tx queues for each traffic
4579 * class for both 8-TC and 4-TC modes are:
4580 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4581 * 8 TCs: 32 32 16 16 8 8 8 8
4582 * 4 TCs: 64 64 32 32
4583 * We have max 8 queues for FCoE, where 8 the is
4584 * FCoE redirection table size. If TC for FCoE is
4585 * less than or equal to TC3, we have enough queues
4586 * to add max of 8 queues for FCoE, so we start FCoE
4587 * Tx queue from the next one, i.e., reg_idx + 1.
4588 * If TC for FCoE is above TC3, implying 8 TC mode,
4589 * and we need 8 for FCoE, we have to take all queues
4590 * in that traffic class for FCoE.
4591 */
4592 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4593 fcoe_tx_i--;
4594 }
0331a832 4595#endif /* CONFIG_IXGBE_DCB */
bf29ee6c
AD
4596 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4597 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4598 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4599 ixgbe_cache_ring_fdir(adapter);
4600 else
4601 ixgbe_cache_ring_rss(adapter);
8faa2a78 4602
bf29ee6c
AD
4603 fcoe_rx_i = f->mask;
4604 fcoe_tx_i = f->mask;
0331a832 4605 }
bf29ee6c
AD
4606 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4607 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4608 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4609 }
4610 return true;
0331a832
YZ
4611}
4612
4613#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4614/**
4615 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4616 * @adapter: board private structure to initialize
4617 *
4618 * SR-IOV doesn't use any descriptor rings but changes the default if
4619 * no other mapping is used.
4620 *
4621 */
4622static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4623{
4a0b9ca0
PW
4624 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4625 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
1cdd1ec8
GR
4626 if (adapter->num_vfs)
4627 return true;
4628 else
4629 return false;
4630}
4631
bc97114d
PWJ
4632/**
4633 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4634 * @adapter: board private structure to initialize
4635 *
4636 * Once we know the feature-set enabled for the device, we'll cache
4637 * the register offset the descriptor ring is assigned to.
4638 *
4639 * Note, the order the various feature calls is important. It must start with
4640 * the "most" features enabled at the same time, then trickle down to the
4641 * least amount of features turned on at once.
4642 **/
4643static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4644{
4645 /* start with default case */
4a0b9ca0
PW
4646 adapter->rx_ring[0]->reg_idx = 0;
4647 adapter->tx_ring[0]->reg_idx = 0;
bc97114d 4648
1cdd1ec8
GR
4649 if (ixgbe_cache_ring_sriov(adapter))
4650 return;
4651
0331a832
YZ
4652#ifdef IXGBE_FCOE
4653 if (ixgbe_cache_ring_fcoe(adapter))
4654 return;
4655
4656#endif /* IXGBE_FCOE */
bc97114d
PWJ
4657#ifdef CONFIG_IXGBE_DCB
4658 if (ixgbe_cache_ring_dcb(adapter))
4659 return;
4660
4661#endif
c4cf55e5
PWJ
4662 if (ixgbe_cache_ring_fdir(adapter))
4663 return;
4664
bc97114d
PWJ
4665 if (ixgbe_cache_ring_rss(adapter))
4666 return;
021230d4
AV
4667}
4668
9a799d71
AK
4669/**
4670 * ixgbe_alloc_queues - Allocate memory for all rings
4671 * @adapter: board private structure to initialize
4672 *
4673 * We allocate one ring per queue at run-time since we don't know the
4df10466
JB
4674 * number of queues at compile-time. The polling_netdev array is
4675 * intended for Multiqueue, but should work fine with a single queue.
9a799d71 4676 **/
2f90b865 4677static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
9a799d71 4678{
e2ddeba9 4679 int rx = 0, tx = 0, nid = adapter->node;
9a799d71 4680
e2ddeba9
ED
4681 if (nid < 0 || !node_online(nid))
4682 nid = first_online_node;
4683
4684 for (; tx < adapter->num_tx_queues; tx++) {
4685 struct ixgbe_ring *ring;
4686
4687 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4a0b9ca0 4688 if (!ring)
e2ddeba9 4689 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4a0b9ca0 4690 if (!ring)
e2ddeba9 4691 goto err_allocation;
4a0b9ca0 4692 ring->count = adapter->tx_ring_count;
e2ddeba9
ED
4693 ring->queue_index = tx;
4694 ring->numa_node = nid;
b6ec895e 4695 ring->dev = &adapter->pdev->dev;
fc77dc3c 4696 ring->netdev = adapter->netdev;
4a0b9ca0 4697
e2ddeba9 4698 adapter->tx_ring[tx] = ring;
021230d4 4699 }
b9804972 4700
e2ddeba9
ED
4701 for (; rx < adapter->num_rx_queues; rx++) {
4702 struct ixgbe_ring *ring;
4a0b9ca0 4703
e2ddeba9 4704 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4a0b9ca0 4705 if (!ring)
e2ddeba9 4706 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4a0b9ca0 4707 if (!ring)
e2ddeba9
ED
4708 goto err_allocation;
4709 ring->count = adapter->rx_ring_count;
4710 ring->queue_index = rx;
4711 ring->numa_node = nid;
b6ec895e 4712 ring->dev = &adapter->pdev->dev;
fc77dc3c 4713 ring->netdev = adapter->netdev;
4a0b9ca0 4714
e2ddeba9 4715 adapter->rx_ring[rx] = ring;
021230d4
AV
4716 }
4717
4718 ixgbe_cache_ring_register(adapter);
4719
4720 return 0;
4721
e2ddeba9
ED
4722err_allocation:
4723 while (tx)
4724 kfree(adapter->tx_ring[--tx]);
4725
4726 while (rx)
4727 kfree(adapter->rx_ring[--rx]);
021230d4
AV
4728 return -ENOMEM;
4729}
4730
4731/**
4732 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4733 * @adapter: board private structure to initialize
4734 *
4735 * Attempt to configure the interrupts using the best available
4736 * capabilities of the hardware and the kernel.
4737 **/
feea6a57 4738static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4 4739{
8be0e467 4740 struct ixgbe_hw *hw = &adapter->hw;
021230d4
AV
4741 int err = 0;
4742 int vector, v_budget;
4743
4744 /*
4745 * It's easy to be greedy for MSI-X vectors, but it really
4746 * doesn't do us much good if we have a lot more vectors
4747 * than CPU's. So let's be conservative and only ask for
342bde1b 4748 * (roughly) the same number of vectors as there are CPU's.
021230d4
AV
4749 */
4750 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
e8e9f696 4751 (int)num_online_cpus()) + NON_Q_VECTORS;
021230d4
AV
4752
4753 /*
4754 * At the same time, hardware can only support a maximum of
8be0e467
PW
4755 * hw.mac->max_msix_vectors vectors. With features
4756 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4757 * descriptor queues supported by our device. Thus, we cap it off in
4758 * those rare cases where the cpu count also exceeds our vector limit.
021230d4 4759 */
8be0e467 4760 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
021230d4
AV
4761
4762 /* A failure in MSI-X entry allocation isn't fatal, but it does
4763 * mean we disable MSI-X capabilities of the adapter. */
4764 adapter->msix_entries = kcalloc(v_budget,
e8e9f696 4765 sizeof(struct msix_entry), GFP_KERNEL);
7a921c93
AD
4766 if (adapter->msix_entries) {
4767 for (vector = 0; vector < v_budget; vector++)
4768 adapter->msix_entries[vector].entry = vector;
021230d4 4769
7a921c93 4770 ixgbe_acquire_msix_vectors(adapter, v_budget);
021230d4 4771
7a921c93
AD
4772 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4773 goto out;
4774 }
26d27844 4775
7a921c93
AD
4776 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4777 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
c4cf55e5
PWJ
4778 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4779 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4780 adapter->atr_sample_rate = 0;
1cdd1ec8
GR
4781 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4782 ixgbe_disable_sriov(adapter);
4783
847f53ff
BH
4784 err = ixgbe_set_num_queues(adapter);
4785 if (err)
4786 return err;
021230d4 4787
021230d4
AV
4788 err = pci_enable_msi(adapter->pdev);
4789 if (!err) {
4790 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4791 } else {
849c4542
ET
4792 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4793 "Unable to allocate MSI interrupt, "
4794 "falling back to legacy. Error: %d\n", err);
021230d4
AV
4795 /* reset err */
4796 err = 0;
4797 }
4798
4799out:
021230d4
AV
4800 return err;
4801}
4802
7a921c93
AD
4803/**
4804 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4805 * @adapter: board private structure to initialize
4806 *
4807 * We allocate one q_vector per queue interrupt. If allocation fails we
4808 * return -ENOMEM.
4809 **/
4810static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4811{
4812 int q_idx, num_q_vectors;
4813 struct ixgbe_q_vector *q_vector;
4814 int napi_vectors;
4815 int (*poll)(struct napi_struct *, int);
4816
4817 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4818 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4819 napi_vectors = adapter->num_rx_queues;
91281fd3 4820 poll = &ixgbe_clean_rxtx_many;
7a921c93
AD
4821 } else {
4822 num_q_vectors = 1;
4823 napi_vectors = 1;
4824 poll = &ixgbe_poll;
4825 }
4826
4827 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1a6c14a2 4828 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
e8e9f696 4829 GFP_KERNEL, adapter->node);
1a6c14a2
JB
4830 if (!q_vector)
4831 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
e8e9f696 4832 GFP_KERNEL);
7a921c93
AD
4833 if (!q_vector)
4834 goto err_out;
4835 q_vector->adapter = adapter;
f7554a2b
NS
4836 if (q_vector->txr_count && !q_vector->rxr_count)
4837 q_vector->eitr = adapter->tx_eitr_param;
4838 else
4839 q_vector->eitr = adapter->rx_eitr_param;
fe49f04a 4840 q_vector->v_idx = q_idx;
91281fd3 4841 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
7a921c93
AD
4842 adapter->q_vector[q_idx] = q_vector;
4843 }
4844
4845 return 0;
4846
4847err_out:
4848 while (q_idx) {
4849 q_idx--;
4850 q_vector = adapter->q_vector[q_idx];
4851 netif_napi_del(&q_vector->napi);
4852 kfree(q_vector);
4853 adapter->q_vector[q_idx] = NULL;
4854 }
4855 return -ENOMEM;
4856}
4857
4858/**
4859 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4860 * @adapter: board private structure to initialize
4861 *
4862 * This function frees the memory allocated to the q_vectors. In addition if
4863 * NAPI is enabled it will delete any references to the NAPI struct prior
4864 * to freeing the q_vector.
4865 **/
4866static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4867{
4868 int q_idx, num_q_vectors;
7a921c93 4869
91281fd3 4870 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
7a921c93 4871 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 4872 else
7a921c93 4873 num_q_vectors = 1;
7a921c93
AD
4874
4875 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4876 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
7a921c93 4877 adapter->q_vector[q_idx] = NULL;
91281fd3 4878 netif_napi_del(&q_vector->napi);
7a921c93
AD
4879 kfree(q_vector);
4880 }
4881}
4882
7b25cdba 4883static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4
AV
4884{
4885 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4886 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4887 pci_disable_msix(adapter->pdev);
4888 kfree(adapter->msix_entries);
4889 adapter->msix_entries = NULL;
4890 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4891 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4892 pci_disable_msi(adapter->pdev);
4893 }
021230d4
AV
4894}
4895
4896/**
4897 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4898 * @adapter: board private structure to initialize
4899 *
4900 * We determine which interrupt scheme to use based on...
4901 * - Kernel support (MSI, MSI-X)
4902 * - which can be user-defined (via MODULE_PARAM)
4903 * - Hardware queue count (num_*_queues)
4904 * - defined by miscellaneous hardware support/features (RSS, etc.)
4905 **/
2f90b865 4906int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
021230d4
AV
4907{
4908 int err;
4909
4910 /* Number of supported queues */
847f53ff
BH
4911 err = ixgbe_set_num_queues(adapter);
4912 if (err)
4913 return err;
021230d4 4914
021230d4
AV
4915 err = ixgbe_set_interrupt_capability(adapter);
4916 if (err) {
849c4542 4917 e_dev_err("Unable to setup interrupt capabilities\n");
021230d4 4918 goto err_set_interrupt;
9a799d71
AK
4919 }
4920
7a921c93
AD
4921 err = ixgbe_alloc_q_vectors(adapter);
4922 if (err) {
849c4542 4923 e_dev_err("Unable to allocate memory for queue vectors\n");
7a921c93
AD
4924 goto err_alloc_q_vectors;
4925 }
4926
4927 err = ixgbe_alloc_queues(adapter);
4928 if (err) {
849c4542 4929 e_dev_err("Unable to allocate memory for queues\n");
7a921c93
AD
4930 goto err_alloc_queues;
4931 }
4932
849c4542 4933 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
396e799c
ET
4934 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4935 adapter->num_rx_queues, adapter->num_tx_queues);
021230d4
AV
4936
4937 set_bit(__IXGBE_DOWN, &adapter->state);
4938
9a799d71 4939 return 0;
021230d4 4940
7a921c93
AD
4941err_alloc_queues:
4942 ixgbe_free_q_vectors(adapter);
4943err_alloc_q_vectors:
4944 ixgbe_reset_interrupt_capability(adapter);
021230d4 4945err_set_interrupt:
7a921c93
AD
4946 return err;
4947}
4948
1a51502b
ED
4949static void ring_free_rcu(struct rcu_head *head)
4950{
4951 kfree(container_of(head, struct ixgbe_ring, rcu));
4952}
4953
7a921c93
AD
4954/**
4955 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4956 * @adapter: board private structure to clear interrupt scheme on
4957 *
4958 * We go through and clear interrupt specific resources and reset the structure
4959 * to pre-load conditions
4960 **/
4961void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4962{
4a0b9ca0
PW
4963 int i;
4964
4965 for (i = 0; i < adapter->num_tx_queues; i++) {
4966 kfree(adapter->tx_ring[i]);
4967 adapter->tx_ring[i] = NULL;
4968 }
4969 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b
ED
4970 struct ixgbe_ring *ring = adapter->rx_ring[i];
4971
4972 /* ixgbe_get_stats64() might access this ring, we must wait
4973 * a grace period before freeing it.
4974 */
4975 call_rcu(&ring->rcu, ring_free_rcu);
4a0b9ca0
PW
4976 adapter->rx_ring[i] = NULL;
4977 }
7a921c93
AD
4978
4979 ixgbe_free_q_vectors(adapter);
4980 ixgbe_reset_interrupt_capability(adapter);
9a799d71
AK
4981}
4982
c4900be0
DS
4983/**
4984 * ixgbe_sfp_timer - worker thread to find a missing module
4985 * @data: pointer to our adapter struct
4986 **/
4987static void ixgbe_sfp_timer(unsigned long data)
4988{
4989 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4990
4df10466
JB
4991 /*
4992 * Do the sfp_timer outside of interrupt context due to the
c4900be0
DS
4993 * delays that sfp+ detection requires
4994 */
4995 schedule_work(&adapter->sfp_task);
4996}
4997
4998/**
4999 * ixgbe_sfp_task - worker thread to find a missing module
5000 * @work: pointer to work_struct containing our data
5001 **/
5002static void ixgbe_sfp_task(struct work_struct *work)
5003{
5004 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5005 struct ixgbe_adapter,
5006 sfp_task);
c4900be0
DS
5007 struct ixgbe_hw *hw = &adapter->hw;
5008
5009 if ((hw->phy.type == ixgbe_phy_nl) &&
5010 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
5011 s32 ret = hw->phy.ops.identify_sfp(hw);
63d6e1d8 5012 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
c4900be0
DS
5013 goto reschedule;
5014 ret = hw->phy.ops.reset(hw);
5015 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
5016 e_dev_err("failed to initialize because an unsupported "
5017 "SFP+ module type was detected.\n");
5018 e_dev_err("Reload the driver after installing a "
5019 "supported module.\n");
c4900be0
DS
5020 unregister_netdev(adapter->netdev);
5021 } else {
396e799c 5022 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
c4900be0
DS
5023 }
5024 /* don't need this routine any more */
5025 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5026 }
5027 return;
5028reschedule:
5029 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
5030 mod_timer(&adapter->sfp_timer,
e8e9f696 5031 round_jiffies(jiffies + (2 * HZ)));
c4900be0
DS
5032}
5033
9a799d71
AK
5034/**
5035 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5036 * @adapter: board private structure to initialize
5037 *
5038 * ixgbe_sw_init initializes the Adapter private data structure.
5039 * Fields are initialized based on PCI device information and
5040 * OS network device settings (MTU size).
5041 **/
5042static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5043{
5044 struct ixgbe_hw *hw = &adapter->hw;
5045 struct pci_dev *pdev = adapter->pdev;
9a713e7c 5046 struct net_device *dev = adapter->netdev;
021230d4 5047 unsigned int rss;
7a6b6f51 5048#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
5049 int j;
5050 struct tc_configuration *tc;
5051#endif
16b61beb 5052 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
021230d4 5053
c44ade9e
JB
5054 /* PCI config space info */
5055
5056 hw->vendor_id = pdev->vendor;
5057 hw->device_id = pdev->device;
5058 hw->revision_id = pdev->revision;
5059 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5060 hw->subsystem_device_id = pdev->subsystem_device;
5061
021230d4
AV
5062 /* Set capability flags */
5063 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
5064 adapter->ring_feature[RING_F_RSS].indices = rss;
5065 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2f90b865 5066 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
bd508178
AD
5067 switch (hw->mac.type) {
5068 case ixgbe_mac_82598EB:
bf069c97
DS
5069 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5070 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
e8e26350 5071 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
bd508178
AD
5072 break;
5073 case ixgbe_mac_82599EB:
b93a2226 5074 case ixgbe_mac_X540:
e8e26350 5075 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
0c19d6af
PWJ
5076 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5077 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
119fc60a
MC
5078 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5079 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
9a713e7c
PW
5080 if (dev->features & NETIF_F_NTUPLE) {
5081 /* Flow Director perfect filter enabled */
5082 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
5083 adapter->atr_sample_rate = 0;
5084 spin_lock_init(&adapter->fdir_perfect_lock);
5085 } else {
5086 /* Flow Director hash filters enabled */
5087 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5088 adapter->atr_sample_rate = 20;
5089 }
c4cf55e5 5090 adapter->ring_feature[RING_F_FDIR].indices =
e8e9f696 5091 IXGBE_MAX_FDIR_INDICES;
c4cf55e5 5092 adapter->fdir_pballoc = 0;
eacd73f7 5093#ifdef IXGBE_FCOE
0d551589
YZ
5094 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5095 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5096 adapter->ring_feature[RING_F_FCOE].indices = 0;
61a0f421 5097#ifdef CONFIG_IXGBE_DCB
6ee16520
YZ
5098 /* Default traffic class to use for FCoE */
5099 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
56075a98 5100 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
61a0f421 5101#endif
eacd73f7 5102#endif /* IXGBE_FCOE */
bd508178
AD
5103 break;
5104 default:
5105 break;
f8212f97 5106 }
2f90b865 5107
7a6b6f51 5108#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
5109 /* Configure DCB traffic classes */
5110 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5111 tc = &adapter->dcb_cfg.tc_config[j];
5112 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5113 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5114 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5115 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5116 tc->dcb_pfc = pfc_disabled;
5117 }
5118 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5119 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5120 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
264857b8 5121 adapter->dcb_cfg.pfc_mode_enable = false;
2f90b865
AD
5122 adapter->dcb_cfg.round_robin_enable = false;
5123 adapter->dcb_set_bitmap = 0x00;
5124 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
e8e9f696 5125 adapter->ring_feature[RING_F_DCB].indices);
2f90b865
AD
5126
5127#endif
9a799d71
AK
5128
5129 /* default flow control settings */
cd7664f6 5130 hw->fc.requested_mode = ixgbe_fc_full;
71fd570b 5131 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
264857b8
PWJ
5132#ifdef CONFIG_DCB
5133 adapter->last_lfc_mode = hw->fc.current_mode;
5134#endif
16b61beb
JF
5135 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5136 hw->fc.low_water = FC_LOW_WATER(max_frame);
2b9ade93
JB
5137 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5138 hw->fc.send_xon = true;
71fd570b 5139 hw->fc.disable_fc_autoneg = false;
9a799d71 5140
30efa5a3 5141 /* enable itr by default in dynamic mode */
f7554a2b
NS
5142 adapter->rx_itr_setting = 1;
5143 adapter->rx_eitr_param = 20000;
5144 adapter->tx_itr_setting = 1;
5145 adapter->tx_eitr_param = 10000;
30efa5a3
JB
5146
5147 /* set defaults for eitr in MegaBytes */
5148 adapter->eitr_low = 10;
5149 adapter->eitr_high = 20;
5150
5151 /* set default ring sizes */
5152 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5153 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5154
9a799d71 5155 /* initialize eeprom parameters */
c44ade9e 5156 if (ixgbe_init_eeprom_params_generic(hw)) {
849c4542 5157 e_dev_err("EEPROM initialization failed\n");
9a799d71
AK
5158 return -EIO;
5159 }
5160
021230d4 5161 /* enable rx csum by default */
9a799d71
AK
5162 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
5163
1a6c14a2
JB
5164 /* get assigned NUMA node */
5165 adapter->node = dev_to_node(&pdev->dev);
5166
9a799d71
AK
5167 set_bit(__IXGBE_DOWN, &adapter->state);
5168
5169 return 0;
5170}
5171
5172/**
5173 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3a581073 5174 * @tx_ring: tx descriptor ring (for a specific queue) to setup
9a799d71
AK
5175 *
5176 * Return 0 on success, negative on failure
5177 **/
b6ec895e 5178int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 5179{
b6ec895e 5180 struct device *dev = tx_ring->dev;
9a799d71
AK
5181 int size;
5182
3a581073 5183 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4a0b9ca0 5184 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
1a6c14a2
JB
5185 if (!tx_ring->tx_buffer_info)
5186 tx_ring->tx_buffer_info = vmalloc(size);
e01c31a5
JB
5187 if (!tx_ring->tx_buffer_info)
5188 goto err;
3a581073 5189 memset(tx_ring->tx_buffer_info, 0, size);
9a799d71
AK
5190
5191 /* round up to nearest 4K */
12207e49 5192 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3a581073 5193 tx_ring->size = ALIGN(tx_ring->size, 4096);
9a799d71 5194
b6ec895e 5195 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1b507730 5196 &tx_ring->dma, GFP_KERNEL);
e01c31a5
JB
5197 if (!tx_ring->desc)
5198 goto err;
9a799d71 5199
3a581073
JB
5200 tx_ring->next_to_use = 0;
5201 tx_ring->next_to_clean = 0;
5202 tx_ring->work_limit = tx_ring->count;
9a799d71 5203 return 0;
e01c31a5
JB
5204
5205err:
5206 vfree(tx_ring->tx_buffer_info);
5207 tx_ring->tx_buffer_info = NULL;
b6ec895e 5208 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
e01c31a5 5209 return -ENOMEM;
9a799d71
AK
5210}
5211
69888674
AD
5212/**
5213 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5214 * @adapter: board private structure
5215 *
5216 * If this function returns with an error, then it's possible one or
5217 * more of the rings is populated (while the rest are not). It is the
5218 * callers duty to clean those orphaned rings.
5219 *
5220 * Return 0 on success, negative on failure
5221 **/
5222static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5223{
5224 int i, err = 0;
5225
5226 for (i = 0; i < adapter->num_tx_queues; i++) {
b6ec895e 5227 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
69888674
AD
5228 if (!err)
5229 continue;
396e799c 5230 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
69888674
AD
5231 break;
5232 }
5233
5234 return err;
5235}
5236
9a799d71
AK
5237/**
5238 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3a581073 5239 * @rx_ring: rx descriptor ring (for a specific queue) to setup
9a799d71
AK
5240 *
5241 * Returns 0 on success, negative on failure
5242 **/
b6ec895e 5243int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5244{
b6ec895e 5245 struct device *dev = rx_ring->dev;
021230d4 5246 int size;
9a799d71 5247
3a581073 5248 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
b6ec895e 5249 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
1a6c14a2
JB
5250 if (!rx_ring->rx_buffer_info)
5251 rx_ring->rx_buffer_info = vmalloc(size);
b6ec895e
AD
5252 if (!rx_ring->rx_buffer_info)
5253 goto err;
3a581073 5254 memset(rx_ring->rx_buffer_info, 0, size);
9a799d71 5255
9a799d71 5256 /* Round up to nearest 4K */
3a581073
JB
5257 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5258 rx_ring->size = ALIGN(rx_ring->size, 4096);
9a799d71 5259
b6ec895e 5260 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1b507730 5261 &rx_ring->dma, GFP_KERNEL);
9a799d71 5262
b6ec895e
AD
5263 if (!rx_ring->desc)
5264 goto err;
9a799d71 5265
3a581073
JB
5266 rx_ring->next_to_clean = 0;
5267 rx_ring->next_to_use = 0;
9a799d71
AK
5268
5269 return 0;
b6ec895e
AD
5270err:
5271 vfree(rx_ring->rx_buffer_info);
5272 rx_ring->rx_buffer_info = NULL;
5273 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
177db6ff 5274 return -ENOMEM;
9a799d71
AK
5275}
5276
69888674
AD
5277/**
5278 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5279 * @adapter: board private structure
5280 *
5281 * If this function returns with an error, then it's possible one or
5282 * more of the rings is populated (while the rest are not). It is the
5283 * callers duty to clean those orphaned rings.
5284 *
5285 * Return 0 on success, negative on failure
5286 **/
69888674
AD
5287static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5288{
5289 int i, err = 0;
5290
5291 for (i = 0; i < adapter->num_rx_queues; i++) {
b6ec895e 5292 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
69888674
AD
5293 if (!err)
5294 continue;
396e799c 5295 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
69888674
AD
5296 break;
5297 }
5298
5299 return err;
5300}
5301
9a799d71
AK
5302/**
5303 * ixgbe_free_tx_resources - Free Tx Resources per Queue
9a799d71
AK
5304 * @tx_ring: Tx descriptor ring for a specific queue
5305 *
5306 * Free all transmit software resources
5307 **/
b6ec895e 5308void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 5309{
b6ec895e 5310 ixgbe_clean_tx_ring(tx_ring);
9a799d71
AK
5311
5312 vfree(tx_ring->tx_buffer_info);
5313 tx_ring->tx_buffer_info = NULL;
5314
b6ec895e
AD
5315 /* if not set, then don't free */
5316 if (!tx_ring->desc)
5317 return;
5318
5319 dma_free_coherent(tx_ring->dev, tx_ring->size,
5320 tx_ring->desc, tx_ring->dma);
9a799d71
AK
5321
5322 tx_ring->desc = NULL;
5323}
5324
5325/**
5326 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5327 * @adapter: board private structure
5328 *
5329 * Free all transmit software resources
5330 **/
5331static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5332{
5333 int i;
5334
5335 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 5336 if (adapter->tx_ring[i]->desc)
b6ec895e 5337 ixgbe_free_tx_resources(adapter->tx_ring[i]);
9a799d71
AK
5338}
5339
5340/**
b4617240 5341 * ixgbe_free_rx_resources - Free Rx Resources
9a799d71
AK
5342 * @rx_ring: ring to clean the resources from
5343 *
5344 * Free all receive software resources
5345 **/
b6ec895e 5346void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5347{
b6ec895e 5348 ixgbe_clean_rx_ring(rx_ring);
9a799d71
AK
5349
5350 vfree(rx_ring->rx_buffer_info);
5351 rx_ring->rx_buffer_info = NULL;
5352
b6ec895e
AD
5353 /* if not set, then don't free */
5354 if (!rx_ring->desc)
5355 return;
5356
5357 dma_free_coherent(rx_ring->dev, rx_ring->size,
5358 rx_ring->desc, rx_ring->dma);
9a799d71
AK
5359
5360 rx_ring->desc = NULL;
5361}
5362
5363/**
5364 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5365 * @adapter: board private structure
5366 *
5367 * Free all receive software resources
5368 **/
5369static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5370{
5371 int i;
5372
5373 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 5374 if (adapter->rx_ring[i]->desc)
b6ec895e 5375 ixgbe_free_rx_resources(adapter->rx_ring[i]);
9a799d71
AK
5376}
5377
9a799d71
AK
5378/**
5379 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5380 * @netdev: network interface device structure
5381 * @new_mtu: new value for maximum frame size
5382 *
5383 * Returns 0 on success, negative on failure
5384 **/
5385static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5386{
5387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
16b61beb 5388 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
5389 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5390
42c783c5
JB
5391 /* MTU < 68 is an error and causes problems on some kernels */
5392 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
9a799d71
AK
5393 return -EINVAL;
5394
396e799c 5395 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
021230d4 5396 /* must set new MTU before calling down or up */
9a799d71
AK
5397 netdev->mtu = new_mtu;
5398
16b61beb
JF
5399 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5400 hw->fc.low_water = FC_LOW_WATER(max_frame);
5401
d4f80882
AV
5402 if (netif_running(netdev))
5403 ixgbe_reinit_locked(adapter);
9a799d71
AK
5404
5405 return 0;
5406}
5407
5408/**
5409 * ixgbe_open - Called when a network interface is made active
5410 * @netdev: network interface device structure
5411 *
5412 * Returns 0 on success, negative value on failure
5413 *
5414 * The open entry point is called when a network interface is made
5415 * active by the system (IFF_UP). At this point all resources needed
5416 * for transmit and receive operations are allocated, the interrupt
5417 * handler is registered with the OS, the watchdog timer is started,
5418 * and the stack is notified that the interface is ready.
5419 **/
5420static int ixgbe_open(struct net_device *netdev)
5421{
5422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5423 int err;
4bebfaa5
AK
5424
5425 /* disallow open during test */
5426 if (test_bit(__IXGBE_TESTING, &adapter->state))
5427 return -EBUSY;
9a799d71 5428
54386467
JB
5429 netif_carrier_off(netdev);
5430
9a799d71
AK
5431 /* allocate transmit descriptors */
5432 err = ixgbe_setup_all_tx_resources(adapter);
5433 if (err)
5434 goto err_setup_tx;
5435
9a799d71
AK
5436 /* allocate receive descriptors */
5437 err = ixgbe_setup_all_rx_resources(adapter);
5438 if (err)
5439 goto err_setup_rx;
5440
5441 ixgbe_configure(adapter);
5442
021230d4 5443 err = ixgbe_request_irq(adapter);
9a799d71
AK
5444 if (err)
5445 goto err_req_irq;
5446
9a799d71
AK
5447 err = ixgbe_up_complete(adapter);
5448 if (err)
5449 goto err_up;
5450
d55b53ff
JK
5451 netif_tx_start_all_queues(netdev);
5452
9a799d71
AK
5453 return 0;
5454
5455err_up:
5eba3699 5456 ixgbe_release_hw_control(adapter);
9a799d71
AK
5457 ixgbe_free_irq(adapter);
5458err_req_irq:
9a799d71 5459err_setup_rx:
a20a1199 5460 ixgbe_free_all_rx_resources(adapter);
9a799d71 5461err_setup_tx:
a20a1199 5462 ixgbe_free_all_tx_resources(adapter);
9a799d71
AK
5463 ixgbe_reset(adapter);
5464
5465 return err;
5466}
5467
5468/**
5469 * ixgbe_close - Disables a network interface
5470 * @netdev: network interface device structure
5471 *
5472 * Returns 0, this is not allowed to fail
5473 *
5474 * The close entry point is called when an interface is de-activated
5475 * by the OS. The hardware is still under the drivers control, but
5476 * needs to be disabled. A global MAC reset is issued to stop the
5477 * hardware, and all transmit and receive resources are freed.
5478 **/
5479static int ixgbe_close(struct net_device *netdev)
5480{
5481 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5482
5483 ixgbe_down(adapter);
5484 ixgbe_free_irq(adapter);
5485
5486 ixgbe_free_all_tx_resources(adapter);
5487 ixgbe_free_all_rx_resources(adapter);
5488
5eba3699 5489 ixgbe_release_hw_control(adapter);
9a799d71
AK
5490
5491 return 0;
5492}
5493
b3c8b4ba
AD
5494#ifdef CONFIG_PM
5495static int ixgbe_resume(struct pci_dev *pdev)
5496{
c60fbb00
AD
5497 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5498 struct net_device *netdev = adapter->netdev;
b3c8b4ba
AD
5499 u32 err;
5500
5501 pci_set_power_state(pdev, PCI_D0);
5502 pci_restore_state(pdev);
656ab817
DS
5503 /*
5504 * pci_restore_state clears dev->state_saved so call
5505 * pci_save_state to restore it.
5506 */
5507 pci_save_state(pdev);
9ce77666 5508
5509 err = pci_enable_device_mem(pdev);
b3c8b4ba 5510 if (err) {
849c4542 5511 e_dev_err("Cannot enable PCI device from suspend\n");
b3c8b4ba
AD
5512 return err;
5513 }
5514 pci_set_master(pdev);
5515
dd4d8ca6 5516 pci_wake_from_d3(pdev, false);
b3c8b4ba
AD
5517
5518 err = ixgbe_init_interrupt_scheme(adapter);
5519 if (err) {
849c4542 5520 e_dev_err("Cannot initialize interrupts for device\n");
b3c8b4ba
AD
5521 return err;
5522 }
5523
b3c8b4ba
AD
5524 ixgbe_reset(adapter);
5525
495dce12
WJP
5526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5527
b3c8b4ba 5528 if (netif_running(netdev)) {
c60fbb00 5529 err = ixgbe_open(netdev);
b3c8b4ba
AD
5530 if (err)
5531 return err;
5532 }
5533
5534 netif_device_attach(netdev);
5535
5536 return 0;
5537}
b3c8b4ba 5538#endif /* CONFIG_PM */
9d8d05ae
RW
5539
5540static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
b3c8b4ba 5541{
c60fbb00
AD
5542 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5543 struct net_device *netdev = adapter->netdev;
e8e26350
PW
5544 struct ixgbe_hw *hw = &adapter->hw;
5545 u32 ctrl, fctrl;
5546 u32 wufc = adapter->wol;
b3c8b4ba
AD
5547#ifdef CONFIG_PM
5548 int retval = 0;
5549#endif
5550
5551 netif_device_detach(netdev);
5552
5553 if (netif_running(netdev)) {
5554 ixgbe_down(adapter);
5555 ixgbe_free_irq(adapter);
5556 ixgbe_free_all_tx_resources(adapter);
5557 ixgbe_free_all_rx_resources(adapter);
5558 }
b3c8b4ba 5559
5f5ae6fc
AD
5560 ixgbe_clear_interrupt_scheme(adapter);
5561
b3c8b4ba
AD
5562#ifdef CONFIG_PM
5563 retval = pci_save_state(pdev);
5564 if (retval)
5565 return retval;
4df10466 5566
b3c8b4ba 5567#endif
e8e26350
PW
5568 if (wufc) {
5569 ixgbe_set_rx_mode(netdev);
b3c8b4ba 5570
e8e26350
PW
5571 /* turn on all-multi mode if wake on multicast is enabled */
5572 if (wufc & IXGBE_WUFC_MC) {
5573 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5574 fctrl |= IXGBE_FCTRL_MPE;
5575 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5576 }
5577
5578 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5579 ctrl |= IXGBE_CTRL_GIO_DIS;
5580 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5581
5582 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5583 } else {
5584 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5585 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5586 }
5587
bd508178
AD
5588 switch (hw->mac.type) {
5589 case ixgbe_mac_82598EB:
dd4d8ca6 5590 pci_wake_from_d3(pdev, false);
bd508178
AD
5591 break;
5592 case ixgbe_mac_82599EB:
b93a2226 5593 case ixgbe_mac_X540:
bd508178
AD
5594 pci_wake_from_d3(pdev, !!wufc);
5595 break;
5596 default:
5597 break;
5598 }
b3c8b4ba 5599
9d8d05ae
RW
5600 *enable_wake = !!wufc;
5601
b3c8b4ba
AD
5602 ixgbe_release_hw_control(adapter);
5603
5604 pci_disable_device(pdev);
5605
9d8d05ae
RW
5606 return 0;
5607}
5608
5609#ifdef CONFIG_PM
5610static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5611{
5612 int retval;
5613 bool wake;
5614
5615 retval = __ixgbe_shutdown(pdev, &wake);
5616 if (retval)
5617 return retval;
5618
5619 if (wake) {
5620 pci_prepare_to_sleep(pdev);
5621 } else {
5622 pci_wake_from_d3(pdev, false);
5623 pci_set_power_state(pdev, PCI_D3hot);
5624 }
b3c8b4ba
AD
5625
5626 return 0;
5627}
9d8d05ae 5628#endif /* CONFIG_PM */
b3c8b4ba
AD
5629
5630static void ixgbe_shutdown(struct pci_dev *pdev)
5631{
9d8d05ae
RW
5632 bool wake;
5633
5634 __ixgbe_shutdown(pdev, &wake);
5635
5636 if (system_state == SYSTEM_POWER_OFF) {
5637 pci_wake_from_d3(pdev, wake);
5638 pci_set_power_state(pdev, PCI_D3hot);
5639 }
b3c8b4ba
AD
5640}
5641
9a799d71
AK
5642/**
5643 * ixgbe_update_stats - Update the board statistics counters.
5644 * @adapter: board private structure
5645 **/
5646void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5647{
2d86f139 5648 struct net_device *netdev = adapter->netdev;
9a799d71 5649 struct ixgbe_hw *hw = &adapter->hw;
5b7da515 5650 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6f11eef7
AV
5651 u64 total_mpc = 0;
5652 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5b7da515
AD
5653 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5654 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5655 u64 bytes = 0, packets = 0;
9a799d71 5656
d08935c2
DS
5657 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5658 test_bit(__IXGBE_RESETTING, &adapter->state))
5659 return;
5660
94b982b2 5661 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
f8212f97 5662 u64 rsc_count = 0;
94b982b2 5663 u64 rsc_flush = 0;
d51019a4
PW
5664 for (i = 0; i < 16; i++)
5665 adapter->hw_rx_no_dma_resources +=
7ca647bd 5666 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
94b982b2 5667 for (i = 0; i < adapter->num_rx_queues; i++) {
5b7da515
AD
5668 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5669 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
94b982b2
MC
5670 }
5671 adapter->rsc_total_count = rsc_count;
5672 adapter->rsc_total_flush = rsc_flush;
d51019a4
PW
5673 }
5674
5b7da515
AD
5675 for (i = 0; i < adapter->num_rx_queues; i++) {
5676 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5677 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5678 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5679 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5680 bytes += rx_ring->stats.bytes;
5681 packets += rx_ring->stats.packets;
5682 }
5683 adapter->non_eop_descs = non_eop_descs;
5684 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5685 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5686 netdev->stats.rx_bytes = bytes;
5687 netdev->stats.rx_packets = packets;
5688
5689 bytes = 0;
5690 packets = 0;
7ca3bc58 5691 /* gather some stats to the adapter struct that are per queue */
5b7da515
AD
5692 for (i = 0; i < adapter->num_tx_queues; i++) {
5693 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5694 restart_queue += tx_ring->tx_stats.restart_queue;
5695 tx_busy += tx_ring->tx_stats.tx_busy;
5696 bytes += tx_ring->stats.bytes;
5697 packets += tx_ring->stats.packets;
5698 }
eb985f09 5699 adapter->restart_queue = restart_queue;
5b7da515
AD
5700 adapter->tx_busy = tx_busy;
5701 netdev->stats.tx_bytes = bytes;
5702 netdev->stats.tx_packets = packets;
7ca3bc58 5703
7ca647bd 5704 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6f11eef7
AV
5705 for (i = 0; i < 8; i++) {
5706 /* for packet buffers not used, the register should read 0 */
5707 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5708 missed_rx += mpc;
7ca647bd
JP
5709 hwstats->mpc[i] += mpc;
5710 total_mpc += hwstats->mpc[i];
e8e26350 5711 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5712 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5713 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5714 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5715 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5716 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
bd508178
AD
5717 switch (hw->mac.type) {
5718 case ixgbe_mac_82598EB:
7ca647bd
JP
5719 hwstats->pxonrxc[i] +=
5720 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
bd508178
AD
5721 break;
5722 case ixgbe_mac_82599EB:
b93a2226 5723 case ixgbe_mac_X540:
bd508178
AD
5724 hwstats->pxonrxc[i] +=
5725 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
bd508178
AD
5726 break;
5727 default:
5728 break;
e8e26350 5729 }
7ca647bd
JP
5730 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5731 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6f11eef7 5732 }
7ca647bd 5733 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6f11eef7 5734 /* work around hardware counting issue */
7ca647bd 5735 hwstats->gprc -= missed_rx;
6f11eef7 5736
c84d324c
JF
5737 ixgbe_update_xoff_received(adapter);
5738
6f11eef7 5739 /* 82598 hardware only has a 32 bit counter in the high register */
bd508178
AD
5740 switch (hw->mac.type) {
5741 case ixgbe_mac_82598EB:
5742 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
bd508178
AD
5743 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5744 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5745 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5746 break;
5747 case ixgbe_mac_82599EB:
b93a2226 5748 case ixgbe_mac_X540:
7ca647bd 5749 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
bd508178 5750 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
7ca647bd 5751 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
bd508178 5752 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
7ca647bd 5753 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
bd508178 5754 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
7ca647bd 5755 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7ca647bd
JP
5756 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5757 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6d45522c 5758#ifdef IXGBE_FCOE
7ca647bd
JP
5759 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5760 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5761 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5762 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5763 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5764 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6d45522c 5765#endif /* IXGBE_FCOE */
bd508178
AD
5766 break;
5767 default:
5768 break;
e8e26350 5769 }
9a799d71 5770 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7ca647bd
JP
5771 hwstats->bprc += bprc;
5772 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
e8e26350 5773 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5774 hwstats->mprc -= bprc;
5775 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5776 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5777 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5778 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5779 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5780 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5781 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5782 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6f11eef7 5783 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7ca647bd 5784 hwstats->lxontxc += lxon;
6f11eef7 5785 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7ca647bd
JP
5786 hwstats->lxofftxc += lxoff;
5787 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5788 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5789 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6f11eef7
AV
5790 /*
5791 * 82598 errata - tx of flow control packets is included in tx counters
5792 */
5793 xon_off_tot = lxon + lxoff;
7ca647bd
JP
5794 hwstats->gptc -= xon_off_tot;
5795 hwstats->mptc -= xon_off_tot;
5796 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5797 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5798 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5799 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5800 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5801 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5802 hwstats->ptc64 -= xon_off_tot;
5803 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5804 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5805 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5806 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5807 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5808 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
9a799d71
AK
5809
5810 /* Fill out the OS statistics structure */
7ca647bd 5811 netdev->stats.multicast = hwstats->mprc;
9a799d71
AK
5812
5813 /* Rx Errors */
7ca647bd 5814 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
2d86f139 5815 netdev->stats.rx_dropped = 0;
7ca647bd
JP
5816 netdev->stats.rx_length_errors = hwstats->rlec;
5817 netdev->stats.rx_crc_errors = hwstats->crcerrs;
2d86f139 5818 netdev->stats.rx_missed_errors = total_mpc;
9a799d71
AK
5819}
5820
5821/**
5822 * ixgbe_watchdog - Timer Call-back
5823 * @data: pointer to adapter cast into an unsigned long
5824 **/
5825static void ixgbe_watchdog(unsigned long data)
5826{
5827 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
cf8280ee 5828 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
5829 u64 eics = 0;
5830 int i;
cf8280ee 5831
fe49f04a
AD
5832 /*
5833 * Do the watchdog outside of interrupt context due to the lovely
5834 * delays that some of the newer hardware requires
5835 */
22d5a71b 5836
fe49f04a
AD
5837 if (test_bit(__IXGBE_DOWN, &adapter->state))
5838 goto watchdog_short_circuit;
22d5a71b 5839
fe49f04a
AD
5840 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5841 /*
5842 * for legacy and MSI interrupts don't set any bits
5843 * that are enabled for EIAM, because this operation
5844 * would set *both* EIMS and EICS for any bit in EIAM
5845 */
5846 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5847 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5848 goto watchdog_reschedule;
5849 }
5850
5851 /* get one bit for every active tx/rx interrupt vector */
5852 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5853 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5854 if (qv->rxr_count || qv->txr_count)
5855 eics |= ((u64)1 << i);
cf8280ee 5856 }
9a799d71 5857
fe49f04a
AD
5858 /* Cause software interrupt to ensure rx rings are cleaned */
5859 ixgbe_irq_rearm_queues(adapter, eics);
5860
5861watchdog_reschedule:
5862 /* Reset the timer */
5863 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5864
5865watchdog_short_circuit:
cf8280ee
JB
5866 schedule_work(&adapter->watchdog_task);
5867}
5868
e8e26350
PW
5869/**
5870 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5871 * @work: pointer to work_struct containing our data
5872 **/
5873static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5874{
5875 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5876 struct ixgbe_adapter,
5877 multispeed_fiber_task);
e8e26350
PW
5878 struct ixgbe_hw *hw = &adapter->hw;
5879 u32 autoneg;
8620a103 5880 bool negotiation;
e8e26350
PW
5881
5882 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
a1f25324
MC
5883 autoneg = hw->phy.autoneg_advertised;
5884 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
8620a103 5885 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
1097cd17 5886 hw->mac.autotry_restart = false;
8620a103
MC
5887 if (hw->mac.ops.setup_link)
5888 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
e8e26350
PW
5889 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5890 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5891}
5892
5893/**
5894 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5895 * @work: pointer to work_struct containing our data
5896 **/
5897static void ixgbe_sfp_config_module_task(struct work_struct *work)
5898{
5899 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5900 struct ixgbe_adapter,
5901 sfp_config_module_task);
e8e26350
PW
5902 struct ixgbe_hw *hw = &adapter->hw;
5903 u32 err;
5904
5905 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
63d6e1d8
DS
5906
5907 /* Time for electrical oscillations to settle down */
5908 msleep(100);
e8e26350 5909 err = hw->phy.ops.identify_sfp(hw);
63d6e1d8 5910
e8e26350 5911 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
5912 e_dev_err("failed to initialize because an unsupported SFP+ "
5913 "module type was detected.\n");
5914 e_dev_err("Reload the driver after installing a supported "
5915 "module.\n");
63d6e1d8 5916 unregister_netdev(adapter->netdev);
e8e26350
PW
5917 return;
5918 }
5919 hw->mac.ops.setup_sfp(hw);
5920
8d1c3c07 5921 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
e8e26350
PW
5922 /* This will also work for DA Twinax connections */
5923 schedule_work(&adapter->multispeed_fiber_task);
5924 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5925}
5926
c4cf55e5
PWJ
5927/**
5928 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5929 * @work: pointer to work_struct containing our data
5930 **/
5931static void ixgbe_fdir_reinit_task(struct work_struct *work)
5932{
5933 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5934 struct ixgbe_adapter,
5935 fdir_reinit_task);
c4cf55e5
PWJ
5936 struct ixgbe_hw *hw = &adapter->hw;
5937 int i;
5938
5939 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5940 for (i = 0; i < adapter->num_tx_queues; i++)
7d637bcc
AD
5941 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5942 &(adapter->tx_ring[i]->state));
c4cf55e5 5943 } else {
396e799c 5944 e_err(probe, "failed to finish FDIR re-initialization, "
849c4542 5945 "ignored adding FDIR ATR filters\n");
c4cf55e5
PWJ
5946 }
5947 /* Done FDIR Re-initialization, enable transmits */
5948 netif_tx_start_all_queues(adapter->netdev);
5949}
5950
10eec955
JF
5951static DEFINE_MUTEX(ixgbe_watchdog_lock);
5952
cf8280ee 5953/**
69888674
AD
5954 * ixgbe_watchdog_task - worker thread to bring link up
5955 * @work: pointer to work_struct containing our data
cf8280ee
JB
5956 **/
5957static void ixgbe_watchdog_task(struct work_struct *work)
5958{
5959 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5960 struct ixgbe_adapter,
5961 watchdog_task);
cf8280ee
JB
5962 struct net_device *netdev = adapter->netdev;
5963 struct ixgbe_hw *hw = &adapter->hw;
10eec955
JF
5964 u32 link_speed;
5965 bool link_up;
bc59fcda
NS
5966 int i;
5967 struct ixgbe_ring *tx_ring;
5968 int some_tx_pending = 0;
cf8280ee 5969
10eec955
JF
5970 mutex_lock(&ixgbe_watchdog_lock);
5971
5972 link_up = adapter->link_up;
5973 link_speed = adapter->link_speed;
cf8280ee
JB
5974
5975 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5976 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
264857b8
PWJ
5977 if (link_up) {
5978#ifdef CONFIG_DCB
5979 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5980 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
620fa036 5981 hw->mac.ops.fc_enable(hw, i);
264857b8 5982 } else {
620fa036 5983 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5984 }
5985#else
620fa036 5986 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5987#endif
5988 }
5989
cf8280ee
JB
5990 if (link_up ||
5991 time_after(jiffies, (adapter->link_check_timeout +
e8e9f696 5992 IXGBE_TRY_LINK_TIMEOUT))) {
cf8280ee 5993 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
264857b8 5994 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
cf8280ee
JB
5995 }
5996 adapter->link_up = link_up;
5997 adapter->link_speed = link_speed;
5998 }
9a799d71
AK
5999
6000 if (link_up) {
6001 if (!netif_carrier_ok(netdev)) {
e8e26350
PW
6002 bool flow_rx, flow_tx;
6003
bd508178
AD
6004 switch (hw->mac.type) {
6005 case ixgbe_mac_82598EB: {
e8e26350
PW
6006 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6007 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
078788b6
PWJ
6008 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6009 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
e8e26350 6010 }
bd508178 6011 break;
b93a2226
DS
6012 case ixgbe_mac_82599EB:
6013 case ixgbe_mac_X540: {
bd508178
AD
6014 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6015 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6016 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6017 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6018 }
6019 break;
6020 default:
6021 flow_tx = false;
6022 flow_rx = false;
6023 break;
6024 }
e8e26350 6025
396e799c 6026 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
a46e534b 6027 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
849c4542
ET
6028 "10 Gbps" :
6029 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6030 "1 Gbps" : "unknown speed")),
e8e26350 6031 ((flow_rx && flow_tx) ? "RX/TX" :
849c4542
ET
6032 (flow_rx ? "RX" :
6033 (flow_tx ? "TX" : "None"))));
9a799d71
AK
6034
6035 netif_carrier_on(netdev);
9a799d71
AK
6036 } else {
6037 /* Force detection of hung controller */
7d637bcc
AD
6038 for (i = 0; i < adapter->num_tx_queues; i++) {
6039 tx_ring = adapter->tx_ring[i];
6040 set_check_for_tx_hang(tx_ring);
6041 }
9a799d71
AK
6042 }
6043 } else {
cf8280ee
JB
6044 adapter->link_up = false;
6045 adapter->link_speed = 0;
9a799d71 6046 if (netif_carrier_ok(netdev)) {
396e799c 6047 e_info(drv, "NIC Link is Down\n");
9a799d71 6048 netif_carrier_off(netdev);
9a799d71
AK
6049 }
6050 }
6051
bc59fcda
NS
6052 if (!netif_carrier_ok(netdev)) {
6053 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 6054 tx_ring = adapter->tx_ring[i];
bc59fcda
NS
6055 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6056 some_tx_pending = 1;
6057 break;
6058 }
6059 }
6060
6061 if (some_tx_pending) {
6062 /* We've lost link, so the controller stops DMA,
6063 * but we've got queued Tx work that's never going
6064 * to get done, so reset controller to flush Tx.
6065 * (Do the reset outside of interrupt context).
6066 */
6067 schedule_work(&adapter->reset_task);
6068 }
6069 }
6070
9a799d71 6071 ixgbe_update_stats(adapter);
10eec955 6072 mutex_unlock(&ixgbe_watchdog_lock);
9a799d71
AK
6073}
6074
9a799d71 6075static int ixgbe_tso(struct ixgbe_adapter *adapter,
e8e9f696 6076 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5e09a105 6077 u32 tx_flags, u8 *hdr_len, __be16 protocol)
9a799d71
AK
6078{
6079 struct ixgbe_adv_tx_context_desc *context_desc;
6080 unsigned int i;
6081 int err;
6082 struct ixgbe_tx_buffer *tx_buffer_info;
9f8cdf4f
JB
6083 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6084 u32 mss_l4len_idx, l4len;
9a799d71
AK
6085
6086 if (skb_is_gso(skb)) {
6087 if (skb_header_cloned(skb)) {
6088 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6089 if (err)
6090 return err;
6091 }
6092 l4len = tcp_hdrlen(skb);
6093 *hdr_len += l4len;
6094
5e09a105 6095 if (protocol == htons(ETH_P_IP)) {
9a799d71
AK
6096 struct iphdr *iph = ip_hdr(skb);
6097 iph->tot_len = 0;
6098 iph->check = 0;
6099 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
e8e9f696
JP
6100 iph->daddr, 0,
6101 IPPROTO_TCP,
6102 0);
8e1e8a47 6103 } else if (skb_is_gso_v6(skb)) {
9a799d71
AK
6104 ipv6_hdr(skb)->payload_len = 0;
6105 tcp_hdr(skb)->check =
6106 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
e8e9f696
JP
6107 &ipv6_hdr(skb)->daddr,
6108 0, IPPROTO_TCP, 0);
9a799d71
AK
6109 }
6110
6111 i = tx_ring->next_to_use;
6112
6113 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6114 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
6115
6116 /* VLAN MACLEN IPLEN */
6117 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6118 vlan_macip_lens |=
6119 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6120 vlan_macip_lens |= ((skb_network_offset(skb)) <<
e8e9f696 6121 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
6122 *hdr_len += skb_network_offset(skb);
6123 vlan_macip_lens |=
6124 (skb_transport_header(skb) - skb_network_header(skb));
6125 *hdr_len +=
6126 (skb_transport_header(skb) - skb_network_header(skb));
6127 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6128 context_desc->seqnum_seed = 0;
6129
6130 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
9f8cdf4f 6131 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
e8e9f696 6132 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 6133
5e09a105 6134 if (protocol == htons(ETH_P_IP))
9a799d71
AK
6135 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
6136 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6137 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6138
6139 /* MSS L4LEN IDX */
9f8cdf4f 6140 mss_l4len_idx =
9a799d71
AK
6141 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6142 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4eeae6fd
PW
6143 /* use index 1 for TSO */
6144 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6145 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6146
6147 tx_buffer_info->time_stamp = jiffies;
6148 tx_buffer_info->next_to_watch = i;
6149
6150 i++;
6151 if (i == tx_ring->count)
6152 i = 0;
6153 tx_ring->next_to_use = i;
6154
6155 return true;
6156 }
6157 return false;
6158}
6159
5e09a105
HZ
6160static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6161 __be16 protocol)
7ca647bd
JP
6162{
6163 u32 rtn = 0;
7ca647bd
JP
6164
6165 switch (protocol) {
6166 case cpu_to_be16(ETH_P_IP):
6167 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
6168 switch (ip_hdr(skb)->protocol) {
6169 case IPPROTO_TCP:
6170 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6171 break;
6172 case IPPROTO_SCTP:
6173 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6174 break;
6175 }
6176 break;
6177 case cpu_to_be16(ETH_P_IPV6):
6178 /* XXX what about other V6 headers?? */
6179 switch (ipv6_hdr(skb)->nexthdr) {
6180 case IPPROTO_TCP:
6181 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6182 break;
6183 case IPPROTO_SCTP:
6184 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6185 break;
6186 }
6187 break;
6188 default:
6189 if (unlikely(net_ratelimit()))
6190 e_warn(probe, "partial checksum but proto=%x!\n",
5e09a105 6191 protocol);
7ca647bd
JP
6192 break;
6193 }
6194
6195 return rtn;
6196}
6197
9a799d71 6198static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
e8e9f696 6199 struct ixgbe_ring *tx_ring,
5e09a105
HZ
6200 struct sk_buff *skb, u32 tx_flags,
6201 __be16 protocol)
9a799d71
AK
6202{
6203 struct ixgbe_adv_tx_context_desc *context_desc;
6204 unsigned int i;
6205 struct ixgbe_tx_buffer *tx_buffer_info;
6206 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6207
6208 if (skb->ip_summed == CHECKSUM_PARTIAL ||
6209 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
6210 i = tx_ring->next_to_use;
6211 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6212 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
6213
6214 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6215 vlan_macip_lens |=
6216 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6217 vlan_macip_lens |= (skb_network_offset(skb) <<
e8e9f696 6218 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
6219 if (skb->ip_summed == CHECKSUM_PARTIAL)
6220 vlan_macip_lens |= (skb_transport_header(skb) -
e8e9f696 6221 skb_network_header(skb));
9a799d71
AK
6222
6223 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6224 context_desc->seqnum_seed = 0;
6225
6226 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
e8e9f696 6227 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 6228
7ca647bd 6229 if (skb->ip_summed == CHECKSUM_PARTIAL)
5e09a105 6230 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
9a799d71
AK
6231
6232 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4eeae6fd 6233 /* use index zero for tx checksum offload */
9a799d71
AK
6234 context_desc->mss_l4len_idx = 0;
6235
6236 tx_buffer_info->time_stamp = jiffies;
6237 tx_buffer_info->next_to_watch = i;
9f8cdf4f 6238
9a799d71
AK
6239 i++;
6240 if (i == tx_ring->count)
6241 i = 0;
6242 tx_ring->next_to_use = i;
6243
6244 return true;
6245 }
9f8cdf4f 6246
9a799d71
AK
6247 return false;
6248}
6249
6250static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
e8e9f696
JP
6251 struct ixgbe_ring *tx_ring,
6252 struct sk_buff *skb, u32 tx_flags,
8ad494b0 6253 unsigned int first, const u8 hdr_len)
9a799d71 6254{
b6ec895e 6255 struct device *dev = tx_ring->dev;
9a799d71 6256 struct ixgbe_tx_buffer *tx_buffer_info;
eacd73f7
YZ
6257 unsigned int len;
6258 unsigned int total = skb->len;
9a799d71
AK
6259 unsigned int offset = 0, size, count = 0, i;
6260 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6261 unsigned int f;
8ad494b0
AD
6262 unsigned int bytecount = skb->len;
6263 u16 gso_segs = 1;
9a799d71
AK
6264
6265 i = tx_ring->next_to_use;
6266
eacd73f7
YZ
6267 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6268 /* excluding fcoe_crc_eof for FCoE */
6269 total -= sizeof(struct fcoe_crc_eof);
6270
6271 len = min(skb_headlen(skb), total);
9a799d71
AK
6272 while (len) {
6273 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6274 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6275
6276 tx_buffer_info->length = size;
e5a43549 6277 tx_buffer_info->mapped_as_page = false;
b6ec895e 6278 tx_buffer_info->dma = dma_map_single(dev,
e5a43549 6279 skb->data + offset,
1b507730 6280 size, DMA_TO_DEVICE);
b6ec895e 6281 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6282 goto dma_error;
9a799d71
AK
6283 tx_buffer_info->time_stamp = jiffies;
6284 tx_buffer_info->next_to_watch = i;
6285
6286 len -= size;
eacd73f7 6287 total -= size;
9a799d71
AK
6288 offset += size;
6289 count++;
44df32c5
AD
6290
6291 if (len) {
6292 i++;
6293 if (i == tx_ring->count)
6294 i = 0;
6295 }
9a799d71
AK
6296 }
6297
6298 for (f = 0; f < nr_frags; f++) {
6299 struct skb_frag_struct *frag;
6300
6301 frag = &skb_shinfo(skb)->frags[f];
eacd73f7 6302 len = min((unsigned int)frag->size, total);
e5a43549 6303 offset = frag->page_offset;
9a799d71
AK
6304
6305 while (len) {
44df32c5
AD
6306 i++;
6307 if (i == tx_ring->count)
6308 i = 0;
6309
9a799d71
AK
6310 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6311 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6312
6313 tx_buffer_info->length = size;
b6ec895e 6314 tx_buffer_info->dma = dma_map_page(dev,
e5a43549
AD
6315 frag->page,
6316 offset, size,
1b507730 6317 DMA_TO_DEVICE);
e5a43549 6318 tx_buffer_info->mapped_as_page = true;
b6ec895e 6319 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6320 goto dma_error;
9a799d71
AK
6321 tx_buffer_info->time_stamp = jiffies;
6322 tx_buffer_info->next_to_watch = i;
6323
6324 len -= size;
eacd73f7 6325 total -= size;
9a799d71
AK
6326 offset += size;
6327 count++;
9a799d71 6328 }
eacd73f7
YZ
6329 if (total == 0)
6330 break;
9a799d71 6331 }
44df32c5 6332
8ad494b0
AD
6333 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6334 gso_segs = skb_shinfo(skb)->gso_segs;
6335#ifdef IXGBE_FCOE
6336 /* adjust for FCoE Sequence Offload */
6337 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6338 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6339 skb_shinfo(skb)->gso_size);
6340#endif /* IXGBE_FCOE */
6341 bytecount += (gso_segs - 1) * hdr_len;
6342
6343 /* multiply data chunks by size of headers */
6344 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6345 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
9a799d71
AK
6346 tx_ring->tx_buffer_info[i].skb = skb;
6347 tx_ring->tx_buffer_info[first].next_to_watch = i;
6348
e5a43549
AD
6349 return count;
6350
6351dma_error:
849c4542 6352 e_dev_err("TX DMA map failed\n");
e5a43549
AD
6353
6354 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6355 tx_buffer_info->dma = 0;
6356 tx_buffer_info->time_stamp = 0;
6357 tx_buffer_info->next_to_watch = 0;
c1fa347f
RK
6358 if (count)
6359 count--;
e5a43549
AD
6360
6361 /* clear timestamp and dma mappings for remaining portion of packet */
c1fa347f 6362 while (count--) {
e8e9f696 6363 if (i == 0)
e5a43549 6364 i += tx_ring->count;
c1fa347f 6365 i--;
e5a43549 6366 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 6367 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
e5a43549
AD
6368 }
6369
e44d38e1 6370 return 0;
9a799d71
AK
6371}
6372
84ea2591 6373static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
e8e9f696 6374 int tx_flags, int count, u32 paylen, u8 hdr_len)
9a799d71
AK
6375{
6376 union ixgbe_adv_tx_desc *tx_desc = NULL;
6377 struct ixgbe_tx_buffer *tx_buffer_info;
6378 u32 olinfo_status = 0, cmd_type_len = 0;
6379 unsigned int i;
6380 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6381
6382 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6383
6384 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6385
6386 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6387 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6388
6389 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6390 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6391
6392 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6393 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6394
4eeae6fd
PW
6395 /* use index 1 context for tso */
6396 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6397 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6398 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
e8e9f696 6399 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71
AK
6400
6401 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6402 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6403 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6404
eacd73f7
YZ
6405 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6406 olinfo_status |= IXGBE_ADVTXD_CC;
6407 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6408 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6409 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6410 }
6411
9a799d71
AK
6412 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6413
6414 i = tx_ring->next_to_use;
6415 while (count--) {
6416 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6417 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71
AK
6418 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6419 tx_desc->read.cmd_type_len =
e8e9f696 6420 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
9a799d71 6421 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
9a799d71
AK
6422 i++;
6423 if (i == tx_ring->count)
6424 i = 0;
6425 }
6426
6427 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6428
6429 /*
6430 * Force memory writes to complete before letting h/w
6431 * know there are new descriptors to fetch. (Only
6432 * applicable for weak-ordered memory model archs,
6433 * such as IA-64).
6434 */
6435 wmb();
6436
6437 tx_ring->next_to_use = i;
84ea2591 6438 writel(i, tx_ring->tail);
9a799d71
AK
6439}
6440
c4cf55e5 6441static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
ee9e0f0b 6442 u8 queue, u32 tx_flags, __be16 protocol)
c4cf55e5 6443{
c4cf55e5 6444 struct ixgbe_atr_input atr_input;
c4cf55e5
PWJ
6445 struct iphdr *iph = ip_hdr(skb);
6446 struct ethhdr *eth = (struct ethhdr *)skb->data;
ee9e0f0b
AD
6447 struct tcphdr *th;
6448 u16 vlan_id;
c4cf55e5 6449
ee9e0f0b
AD
6450 /* Right now, we support IPv4 w/ TCP only */
6451 if (protocol != htons(ETH_P_IP) ||
6452 iph->protocol != IPPROTO_TCP)
d3ead241 6453 return;
c4cf55e5
PWJ
6454
6455 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6456
6457 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
e8e9f696 6458 IXGBE_TX_FLAGS_VLAN_SHIFT;
ee9e0f0b
AD
6459
6460 th = tcp_hdr(skb);
c4cf55e5
PWJ
6461
6462 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
ee9e0f0b
AD
6463 ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
6464 ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
6465 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
6466 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
c4cf55e5 6467 /* src and dst are inverted, think how the receiver sees them */
ee9e0f0b
AD
6468 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
6469 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
c4cf55e5
PWJ
6470
6471 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6472 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6473}
6474
fc77dc3c 6475static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60 6476{
fc77dc3c 6477 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
e092be60
AV
6478 /* Herbert's original patch had:
6479 * smp_mb__after_netif_stop_queue();
6480 * but since that doesn't exist yet, just open code it. */
6481 smp_mb();
6482
6483 /* We need to check again in a case another CPU has just
6484 * made room available. */
6485 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6486 return -EBUSY;
6487
6488 /* A reprieve! - use start_queue because it doesn't call schedule */
fc77dc3c 6489 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 6490 ++tx_ring->tx_stats.restart_queue;
e092be60
AV
6491 return 0;
6492}
6493
fc77dc3c 6494static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60
AV
6495{
6496 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6497 return 0;
fc77dc3c 6498 return __ixgbe_maybe_stop_tx(tx_ring, size);
e092be60
AV
6499}
6500
09a3b1f8
SH
6501static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6502{
6503 struct ixgbe_adapter *adapter = netdev_priv(dev);
5f715823 6504 int txq = smp_processor_id();
56075a98 6505#ifdef IXGBE_FCOE
5e09a105
HZ
6506 __be16 protocol;
6507
6508 protocol = vlan_get_protocol(skb);
6509
6510 if ((protocol == htons(ETH_P_FCOE)) ||
6511 (protocol == htons(ETH_P_FIP))) {
56075a98
JF
6512 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6513 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6514 txq += adapter->ring_feature[RING_F_FCOE].mask;
6515 return txq;
4bc091d8 6516#ifdef CONFIG_IXGBE_DCB
56075a98
JF
6517 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6518 txq = adapter->fcoe.up;
6519 return txq;
4bc091d8 6520#endif
56075a98
JF
6521 }
6522 }
6523#endif
6524
fdd3d631
KK
6525 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6526 while (unlikely(txq >= dev->real_num_tx_queues))
6527 txq -= dev->real_num_tx_queues;
5f715823 6528 return txq;
fdd3d631 6529 }
c4cf55e5 6530
2ea186ae
JF
6531 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6532 if (skb->priority == TC_PRIO_CONTROL)
6533 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6534 else
6535 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6536 >> 13;
6537 return txq;
6538 }
09a3b1f8
SH
6539
6540 return skb_tx_hash(dev, skb);
6541}
6542
fc77dc3c 6543netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
84418e3b
AD
6544 struct ixgbe_adapter *adapter,
6545 struct ixgbe_ring *tx_ring)
9a799d71 6546{
fc77dc3c 6547 struct net_device *netdev = tx_ring->netdev;
60d51134 6548 struct netdev_queue *txq;
9a799d71
AK
6549 unsigned int first;
6550 unsigned int tx_flags = 0;
30eba97a 6551 u8 hdr_len = 0;
5f715823 6552 int tso;
9a799d71
AK
6553 int count = 0;
6554 unsigned int f;
5e09a105
HZ
6555 __be16 protocol;
6556
6557 protocol = vlan_get_protocol(skb);
9f8cdf4f 6558
eab6d18d 6559 if (vlan_tx_tag_present(skb)) {
9f8cdf4f 6560 tx_flags |= vlan_tx_tag_get(skb);
2f90b865
AD
6561 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6562 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5f715823 6563 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
2f90b865
AD
6564 }
6565 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6566 tx_flags |= IXGBE_TX_FLAGS_VLAN;
33c66bd1
JF
6567 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6568 skb->priority != TC_PRIO_CONTROL) {
2ea186ae
JF
6569 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6570 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6571 tx_flags |= IXGBE_TX_FLAGS_VLAN;
9a799d71 6572 }
eacd73f7 6573
09ad1cc0 6574#ifdef IXGBE_FCOE
56075a98
JF
6575 /* for FCoE with DCB, we force the priority to what
6576 * was specified by the switch */
6577 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
5e09a105
HZ
6578 (protocol == htons(ETH_P_FCOE) ||
6579 protocol == htons(ETH_P_FIP))) {
4bc091d8
JF
6580#ifdef CONFIG_IXGBE_DCB
6581 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6582 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6583 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6584 tx_flags |= ((adapter->fcoe.up << 13)
6585 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6586 }
6587#endif
ca77cd59 6588 /* flag for FCoE offloads */
5e09a105 6589 if (protocol == htons(ETH_P_FCOE))
ca77cd59 6590 tx_flags |= IXGBE_TX_FLAGS_FCOE;
09ad1cc0 6591 }
ca77cd59
RL
6592#endif
6593
eacd73f7 6594 /* four things can cause us to need a context descriptor */
9f8cdf4f
JB
6595 if (skb_is_gso(skb) ||
6596 (skb->ip_summed == CHECKSUM_PARTIAL) ||
eacd73f7
YZ
6597 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6598 (tx_flags & IXGBE_TX_FLAGS_FCOE))
9a799d71
AK
6599 count++;
6600
9f8cdf4f
JB
6601 count += TXD_USE_COUNT(skb_headlen(skb));
6602 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9a799d71
AK
6603 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6604
fc77dc3c 6605 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
5b7da515 6606 tx_ring->tx_stats.tx_busy++;
9a799d71
AK
6607 return NETDEV_TX_BUSY;
6608 }
9a799d71 6609
9a799d71 6610 first = tx_ring->next_to_use;
eacd73f7
YZ
6611 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6612#ifdef IXGBE_FCOE
6613 /* setup tx offload for FCoE */
6614 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6615 if (tso < 0) {
6616 dev_kfree_skb_any(skb);
6617 return NETDEV_TX_OK;
6618 }
6619 if (tso)
6620 tx_flags |= IXGBE_TX_FLAGS_FSO;
6621#endif /* IXGBE_FCOE */
6622 } else {
5e09a105 6623 if (protocol == htons(ETH_P_IP))
eacd73f7 6624 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5e09a105
HZ
6625 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6626 protocol);
eacd73f7
YZ
6627 if (tso < 0) {
6628 dev_kfree_skb_any(skb);
6629 return NETDEV_TX_OK;
6630 }
9a799d71 6631
eacd73f7
YZ
6632 if (tso)
6633 tx_flags |= IXGBE_TX_FLAGS_TSO;
5e09a105
HZ
6634 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6635 protocol) &&
eacd73f7
YZ
6636 (skb->ip_summed == CHECKSUM_PARTIAL))
6637 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6638 }
9a799d71 6639
8ad494b0 6640 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
44df32c5 6641 if (count) {
c4cf55e5
PWJ
6642 /* add the ATR filter if ATR is on */
6643 if (tx_ring->atr_sample_rate) {
6644 ++tx_ring->atr_count;
6645 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
7d637bcc
AD
6646 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6647 &tx_ring->state)) {
c4cf55e5 6648 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5e09a105 6649 tx_flags, protocol);
c4cf55e5
PWJ
6650 tx_ring->atr_count = 0;
6651 }
6652 }
60d51134
ED
6653 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6654 txq->tx_bytes += skb->len;
6655 txq->tx_packets++;
84ea2591 6656 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
fc77dc3c 6657 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
9a799d71 6658
44df32c5
AD
6659 } else {
6660 dev_kfree_skb_any(skb);
6661 tx_ring->tx_buffer_info[first].time_stamp = 0;
6662 tx_ring->next_to_use = first;
6663 }
9a799d71
AK
6664
6665 return NETDEV_TX_OK;
6666}
6667
84418e3b
AD
6668static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6669{
6670 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6671 struct ixgbe_ring *tx_ring;
6672
6673 tx_ring = adapter->tx_ring[skb->queue_mapping];
fc77dc3c 6674 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
84418e3b
AD
6675}
6676
9a799d71
AK
6677/**
6678 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6679 * @netdev: network interface device structure
6680 * @p: pointer to an address structure
6681 *
6682 * Returns 0 on success, negative on failure
6683 **/
6684static int ixgbe_set_mac(struct net_device *netdev, void *p)
6685{
6686 struct ixgbe_adapter *adapter = netdev_priv(netdev);
b4617240 6687 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
6688 struct sockaddr *addr = p;
6689
6690 if (!is_valid_ether_addr(addr->sa_data))
6691 return -EADDRNOTAVAIL;
6692
6693 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
b4617240 6694 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9a799d71 6695
1cdd1ec8
GR
6696 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6697 IXGBE_RAH_AV);
9a799d71
AK
6698
6699 return 0;
6700}
6701
6b73e10d
BH
6702static int
6703ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6704{
6705 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6706 struct ixgbe_hw *hw = &adapter->hw;
6707 u16 value;
6708 int rc;
6709
6710 if (prtad != hw->phy.mdio.prtad)
6711 return -EINVAL;
6712 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6713 if (!rc)
6714 rc = value;
6715 return rc;
6716}
6717
6718static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6719 u16 addr, u16 value)
6720{
6721 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6722 struct ixgbe_hw *hw = &adapter->hw;
6723
6724 if (prtad != hw->phy.mdio.prtad)
6725 return -EINVAL;
6726 return hw->phy.ops.write_reg(hw, addr, devad, value);
6727}
6728
6729static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6730{
6731 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6732
6733 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6734}
6735
0365e6e4
PW
6736/**
6737 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
31278e71 6738 * netdev->dev_addrs
0365e6e4
PW
6739 * @netdev: network interface device structure
6740 *
6741 * Returns non-zero on failure
6742 **/
6743static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6744{
6745 int err = 0;
6746 struct ixgbe_adapter *adapter = netdev_priv(dev);
6747 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6748
6749 if (is_valid_ether_addr(mac->san_addr)) {
6750 rtnl_lock();
6751 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6752 rtnl_unlock();
6753 }
6754 return err;
6755}
6756
6757/**
6758 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
31278e71 6759 * netdev->dev_addrs
0365e6e4
PW
6760 * @netdev: network interface device structure
6761 *
6762 * Returns non-zero on failure
6763 **/
6764static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6765{
6766 int err = 0;
6767 struct ixgbe_adapter *adapter = netdev_priv(dev);
6768 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6769
6770 if (is_valid_ether_addr(mac->san_addr)) {
6771 rtnl_lock();
6772 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6773 rtnl_unlock();
6774 }
6775 return err;
6776}
6777
9a799d71
AK
6778#ifdef CONFIG_NET_POLL_CONTROLLER
6779/*
6780 * Polling 'interrupt' - used by things like netconsole to send skbs
6781 * without having to re-enable interrupts. It's not called while
6782 * the interrupt routine is executing.
6783 */
6784static void ixgbe_netpoll(struct net_device *netdev)
6785{
6786 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8f9a7167 6787 int i;
9a799d71 6788
1a647bd2
AD
6789 /* if interface is down do nothing */
6790 if (test_bit(__IXGBE_DOWN, &adapter->state))
6791 return;
6792
9a799d71 6793 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
8f9a7167
PWJ
6794 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6795 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6796 for (i = 0; i < num_q_vectors; i++) {
6797 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6798 ixgbe_msix_clean_many(0, q_vector);
6799 }
6800 } else {
6801 ixgbe_intr(adapter->pdev->irq, netdev);
6802 }
9a799d71 6803 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
9a799d71
AK
6804}
6805#endif
6806
de1036b1
ED
6807static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6808 struct rtnl_link_stats64 *stats)
6809{
6810 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6811 int i;
6812
6813 /* accurate rx/tx bytes/packets stats */
6814 dev_txq_stats_fold(netdev, stats);
1a51502b 6815 rcu_read_lock();
de1036b1 6816 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b 6817 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
de1036b1
ED
6818 u64 bytes, packets;
6819 unsigned int start;
6820
1a51502b
ED
6821 if (ring) {
6822 do {
6823 start = u64_stats_fetch_begin_bh(&ring->syncp);
6824 packets = ring->stats.packets;
6825 bytes = ring->stats.bytes;
6826 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6827 stats->rx_packets += packets;
6828 stats->rx_bytes += bytes;
6829 }
de1036b1 6830 }
1a51502b 6831 rcu_read_unlock();
de1036b1
ED
6832 /* following stats updated by ixgbe_watchdog_task() */
6833 stats->multicast = netdev->stats.multicast;
6834 stats->rx_errors = netdev->stats.rx_errors;
6835 stats->rx_length_errors = netdev->stats.rx_length_errors;
6836 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6837 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6838 return stats;
6839}
6840
6841
0edc3527 6842static const struct net_device_ops ixgbe_netdev_ops = {
e8e9f696 6843 .ndo_open = ixgbe_open,
0edc3527 6844 .ndo_stop = ixgbe_close,
00829823 6845 .ndo_start_xmit = ixgbe_xmit_frame,
09a3b1f8 6846 .ndo_select_queue = ixgbe_select_queue,
e90d400c 6847 .ndo_set_rx_mode = ixgbe_set_rx_mode,
0edc3527
SH
6848 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6849 .ndo_validate_addr = eth_validate_addr,
6850 .ndo_set_mac_address = ixgbe_set_mac,
6851 .ndo_change_mtu = ixgbe_change_mtu,
6852 .ndo_tx_timeout = ixgbe_tx_timeout,
0edc3527
SH
6853 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6854 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6b73e10d 6855 .ndo_do_ioctl = ixgbe_ioctl,
7f01648a
GR
6856 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6857 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6858 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6859 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
de1036b1 6860 .ndo_get_stats64 = ixgbe_get_stats64,
0edc3527
SH
6861#ifdef CONFIG_NET_POLL_CONTROLLER
6862 .ndo_poll_controller = ixgbe_netpoll,
6863#endif
332d4a7d
YZ
6864#ifdef IXGBE_FCOE
6865 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6866 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8450ff8c
YZ
6867 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6868 .ndo_fcoe_disable = ixgbe_fcoe_disable,
61a1fa10 6869 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
332d4a7d 6870#endif /* IXGBE_FCOE */
0edc3527
SH
6871};
6872
1cdd1ec8
GR
6873static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6874 const struct ixgbe_info *ii)
6875{
6876#ifdef CONFIG_PCI_IOV
6877 struct ixgbe_hw *hw = &adapter->hw;
6878 int err;
6879
6880 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6881 return;
6882
6883 /* The 82599 supports up to 64 VFs per physical function
6884 * but this implementation limits allocation to 63 so that
6885 * basic networking resources are still available to the
6886 * physical function
6887 */
6888 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6889 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6890 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6891 if (err) {
396e799c 6892 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
1cdd1ec8
GR
6893 goto err_novfs;
6894 }
6895 /* If call to enable VFs succeeded then allocate memory
6896 * for per VF control structures.
6897 */
6898 adapter->vfinfo =
6899 kcalloc(adapter->num_vfs,
6900 sizeof(struct vf_data_storage), GFP_KERNEL);
6901 if (adapter->vfinfo) {
6902 /* Now that we're sure SR-IOV is enabled
6903 * and memory allocated set up the mailbox parameters
6904 */
6905 ixgbe_init_mbx_params_pf(hw);
6906 memcpy(&hw->mbx.ops, ii->mbx_ops,
6907 sizeof(hw->mbx.ops));
6908
6909 /* Disable RSC when in SR-IOV mode */
6910 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6911 IXGBE_FLAG2_RSC_ENABLED);
6912 return;
6913 }
6914
6915 /* Oh oh */
396e799c
ET
6916 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6917 "SRIOV disabled\n");
1cdd1ec8
GR
6918 pci_disable_sriov(adapter->pdev);
6919
6920err_novfs:
6921 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6922 adapter->num_vfs = 0;
6923#endif /* CONFIG_PCI_IOV */
6924}
6925
9a799d71
AK
6926/**
6927 * ixgbe_probe - Device Initialization Routine
6928 * @pdev: PCI device information struct
6929 * @ent: entry in ixgbe_pci_tbl
6930 *
6931 * Returns 0 on success, negative on failure
6932 *
6933 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6934 * The OS initialization, configuring of the adapter private structure,
6935 * and a hardware reset occur.
6936 **/
6937static int __devinit ixgbe_probe(struct pci_dev *pdev,
e8e9f696 6938 const struct pci_device_id *ent)
9a799d71
AK
6939{
6940 struct net_device *netdev;
6941 struct ixgbe_adapter *adapter = NULL;
6942 struct ixgbe_hw *hw;
6943 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9a799d71
AK
6944 static int cards_found;
6945 int i, err, pci_using_dac;
c85a2618 6946 unsigned int indices = num_possible_cpus();
eacd73f7
YZ
6947#ifdef IXGBE_FCOE
6948 u16 device_caps;
6949#endif
c44ade9e 6950 u32 part_num, eec;
9a799d71 6951
bded64a7
AG
6952 /* Catch broken hardware that put the wrong VF device ID in
6953 * the PCIe SR-IOV capability.
6954 */
6955 if (pdev->is_virtfn) {
6956 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6957 pci_name(pdev), pdev->vendor, pdev->device);
6958 return -EINVAL;
6959 }
6960
9ce77666 6961 err = pci_enable_device_mem(pdev);
9a799d71
AK
6962 if (err)
6963 return err;
6964
1b507730
NN
6965 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6966 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
9a799d71
AK
6967 pci_using_dac = 1;
6968 } else {
1b507730 6969 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9a799d71 6970 if (err) {
1b507730
NN
6971 err = dma_set_coherent_mask(&pdev->dev,
6972 DMA_BIT_MASK(32));
9a799d71 6973 if (err) {
b8bc0421
DC
6974 dev_err(&pdev->dev,
6975 "No usable DMA configuration, aborting\n");
9a799d71
AK
6976 goto err_dma;
6977 }
6978 }
6979 pci_using_dac = 0;
6980 }
6981
9ce77666 6982 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 6983 IORESOURCE_MEM), ixgbe_driver_name);
9a799d71 6984 if (err) {
b8bc0421
DC
6985 dev_err(&pdev->dev,
6986 "pci_request_selected_regions failed 0x%x\n", err);
9a799d71
AK
6987 goto err_pci_reg;
6988 }
6989
19d5afd4 6990 pci_enable_pcie_error_reporting(pdev);
6fabd715 6991
9a799d71 6992 pci_set_master(pdev);
fb3b27bc 6993 pci_save_state(pdev);
9a799d71 6994
c85a2618
JF
6995 if (ii->mac == ixgbe_mac_82598EB)
6996 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6997 else
6998 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6999
7000 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7001#ifdef IXGBE_FCOE
7002 indices += min_t(unsigned int, num_possible_cpus(),
7003 IXGBE_MAX_FCOE_INDICES);
7004#endif
c85a2618 7005 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
9a799d71
AK
7006 if (!netdev) {
7007 err = -ENOMEM;
7008 goto err_alloc_etherdev;
7009 }
7010
9a799d71
AK
7011 SET_NETDEV_DEV(netdev, &pdev->dev);
7012
9a799d71 7013 adapter = netdev_priv(netdev);
c60fbb00 7014 pci_set_drvdata(pdev, adapter);
9a799d71
AK
7015
7016 adapter->netdev = netdev;
7017 adapter->pdev = pdev;
7018 hw = &adapter->hw;
7019 hw->back = adapter;
7020 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
7021
05857980 7022 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
e8e9f696 7023 pci_resource_len(pdev, 0));
9a799d71
AK
7024 if (!hw->hw_addr) {
7025 err = -EIO;
7026 goto err_ioremap;
7027 }
7028
7029 for (i = 1; i <= 5; i++) {
7030 if (pci_resource_len(pdev, i) == 0)
7031 continue;
7032 }
7033
0edc3527 7034 netdev->netdev_ops = &ixgbe_netdev_ops;
9a799d71 7035 ixgbe_set_ethtool_ops(netdev);
9a799d71 7036 netdev->watchdog_timeo = 5 * HZ;
9a799d71
AK
7037 strcpy(netdev->name, pci_name(pdev));
7038
9a799d71
AK
7039 adapter->bd_number = cards_found;
7040
9a799d71
AK
7041 /* Setup hw api */
7042 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
021230d4 7043 hw->mac.type = ii->mac;
9a799d71 7044
c44ade9e
JB
7045 /* EEPROM */
7046 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7047 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7048 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7049 if (!(eec & (1 << 8)))
7050 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7051
7052 /* PHY */
7053 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
c4900be0 7054 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6b73e10d
BH
7055 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
7056 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7057 hw->phy.mdio.mmds = 0;
7058 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7059 hw->phy.mdio.dev = netdev;
7060 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7061 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
c4900be0
DS
7062
7063 /* set up this timer and work struct before calling get_invariants
7064 * which might start the timer
7065 */
7066 init_timer(&adapter->sfp_timer);
c061b18d 7067 adapter->sfp_timer.function = ixgbe_sfp_timer;
c4900be0
DS
7068 adapter->sfp_timer.data = (unsigned long) adapter;
7069
7070 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
c44ade9e 7071
e8e26350
PW
7072 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
7073 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
7074
7075 /* a new SFP+ module arrival, called from GPI SDP2 context */
7076 INIT_WORK(&adapter->sfp_config_module_task,
e8e9f696 7077 ixgbe_sfp_config_module_task);
e8e26350 7078
8ca783ab 7079 ii->get_invariants(hw);
9a799d71
AK
7080
7081 /* setup the private structure */
7082 err = ixgbe_sw_init(adapter);
7083 if (err)
7084 goto err_sw_init;
7085
e86bff0e 7086 /* Make it possible the adapter to be woken up via WOL */
b93a2226
DS
7087 switch (adapter->hw.mac.type) {
7088 case ixgbe_mac_82599EB:
7089 case ixgbe_mac_X540:
e86bff0e 7090 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
b93a2226
DS
7091 break;
7092 default:
7093 break;
7094 }
e86bff0e 7095
bf069c97
DS
7096 /*
7097 * If there is a fan on this device and it has failed log the
7098 * failure.
7099 */
7100 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7101 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7102 if (esdp & IXGBE_ESDP_SDP1)
396e799c 7103 e_crit(probe, "Fan has stopped, replace the adapter\n");
bf069c97
DS
7104 }
7105
c44ade9e 7106 /* reset_hw fills in the perm_addr as well */
119fc60a 7107 hw->phy.reset_if_overtemp = true;
c44ade9e 7108 err = hw->mac.ops.reset_hw(hw);
119fc60a 7109 hw->phy.reset_if_overtemp = false;
8ca783ab
DS
7110 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7111 hw->mac.type == ixgbe_mac_82598EB) {
7112 /*
7113 * Start a kernel thread to watch for a module to arrive.
7114 * Only do this for 82598, since 82599 will generate
7115 * interrupts on module arrival.
7116 */
7117 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7118 mod_timer(&adapter->sfp_timer,
7119 round_jiffies(jiffies + (2 * HZ)));
7120 err = 0;
7121 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
7122 e_dev_err("failed to initialize because an unsupported SFP+ "
7123 "module type was detected.\n");
7124 e_dev_err("Reload the driver after installing a supported "
7125 "module.\n");
04f165ef
PW
7126 goto err_sw_init;
7127 } else if (err) {
849c4542 7128 e_dev_err("HW Init failed: %d\n", err);
c44ade9e
JB
7129 goto err_sw_init;
7130 }
7131
1cdd1ec8
GR
7132 ixgbe_probe_vf(adapter, ii);
7133
396e799c 7134 netdev->features = NETIF_F_SG |
e8e9f696
JP
7135 NETIF_F_IP_CSUM |
7136 NETIF_F_HW_VLAN_TX |
7137 NETIF_F_HW_VLAN_RX |
7138 NETIF_F_HW_VLAN_FILTER;
9a799d71 7139
e9990a9c 7140 netdev->features |= NETIF_F_IPV6_CSUM;
9a799d71 7141 netdev->features |= NETIF_F_TSO;
9a799d71 7142 netdev->features |= NETIF_F_TSO6;
78b6f4ce 7143 netdev->features |= NETIF_F_GRO;
ad31c402 7144
45a5ead0
JB
7145 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
7146 netdev->features |= NETIF_F_SCTP_CSUM;
7147
ad31c402
JK
7148 netdev->vlan_features |= NETIF_F_TSO;
7149 netdev->vlan_features |= NETIF_F_TSO6;
22f32b7a 7150 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 7151 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
ad31c402
JK
7152 netdev->vlan_features |= NETIF_F_SG;
7153
1cdd1ec8
GR
7154 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7155 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7156 IXGBE_FLAG_DCB_ENABLED);
2f90b865
AD
7157 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
7158 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
7159
7a6b6f51 7160#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
7161 netdev->dcbnl_ops = &dcbnl_ops;
7162#endif
7163
eacd73f7 7164#ifdef IXGBE_FCOE
0d551589 7165 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
eacd73f7
YZ
7166 if (hw->mac.ops.get_device_caps) {
7167 hw->mac.ops.get_device_caps(hw, &device_caps);
0d551589
YZ
7168 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7169 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
eacd73f7
YZ
7170 }
7171 }
5e09d7f6
YZ
7172 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7173 netdev->vlan_features |= NETIF_F_FCOE_CRC;
7174 netdev->vlan_features |= NETIF_F_FSO;
7175 netdev->vlan_features |= NETIF_F_FCOE_MTU;
7176 }
eacd73f7 7177#endif /* IXGBE_FCOE */
7b872a55 7178 if (pci_using_dac) {
9a799d71 7179 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
7180 netdev->vlan_features |= NETIF_F_HIGHDMA;
7181 }
9a799d71 7182
0c19d6af 7183 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
f8212f97
AD
7184 netdev->features |= NETIF_F_LRO;
7185
9a799d71 7186 /* make sure the EEPROM is good */
c44ade9e 7187 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
849c4542 7188 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9a799d71
AK
7189 err = -EIO;
7190 goto err_eeprom;
7191 }
7192
7193 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7194 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7195
c44ade9e 7196 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
849c4542 7197 e_dev_err("invalid MAC address\n");
9a799d71
AK
7198 err = -EIO;
7199 goto err_eeprom;
7200 }
7201
61fac744 7202 /* power down the optics */
e3de4b7b 7203 if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
61fac744
PW
7204 hw->mac.ops.disable_tx_laser(hw);
7205
9a799d71 7206 init_timer(&adapter->watchdog_timer);
c061b18d 7207 adapter->watchdog_timer.function = ixgbe_watchdog;
9a799d71
AK
7208 adapter->watchdog_timer.data = (unsigned long)adapter;
7209
7210 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
cf8280ee 7211 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
9a799d71 7212
021230d4
AV
7213 err = ixgbe_init_interrupt_scheme(adapter);
7214 if (err)
7215 goto err_sw_init;
9a799d71 7216
e8e26350 7217 switch (pdev->device) {
50d6c681
AD
7218 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7219 /* All except this subdevice support WOL */
7220 if (pdev->subsystem_device ==
7221 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
7222 adapter->wol = 0;
7223 break;
7224 }
e8e26350 7225 case IXGBE_DEV_ID_82599_KX4:
495dce12 7226 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
e8e9f696 7227 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
e8e26350
PW
7228 break;
7229 default:
7230 adapter->wol = 0;
7231 break;
7232 }
e8e26350
PW
7233 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7234
04f165ef
PW
7235 /* pick up the PCI bus settings for reporting later */
7236 hw->mac.ops.get_bus_info(hw);
7237
9a799d71 7238 /* print bus type/speed/width info */
849c4542 7239 e_dev_info("(PCI Express:%s:%s) %pM\n",
e8e9f696
JP
7240 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
7241 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
7242 "Unknown"),
7243 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7244 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7245 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7246 "Unknown"),
7247 netdev->dev_addr);
c44ade9e 7248 ixgbe_read_pba_num_generic(hw, &part_num);
e8e26350 7249 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
849c4542
ET
7250 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
7251 "PBA No: %06x-%03x\n",
7252 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7253 (part_num >> 8), (part_num & 0xff));
e8e26350 7254 else
849c4542
ET
7255 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
7256 hw->mac.type, hw->phy.type,
7257 (part_num >> 8), (part_num & 0xff));
9a799d71 7258
e8e26350 7259 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
849c4542
ET
7260 e_dev_warn("PCI-Express bandwidth available for this card is "
7261 "not sufficient for optimal performance.\n");
7262 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7263 "is required.\n");
0c254d86
AK
7264 }
7265
34b0368c
PWJ
7266 /* save off EEPROM version number */
7267 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
7268
9a799d71 7269 /* reset the hardware with the new settings */
794caeb2 7270 err = hw->mac.ops.start_hw(hw);
c44ade9e 7271
794caeb2
PWJ
7272 if (err == IXGBE_ERR_EEPROM_VERSION) {
7273 /* We are running on a pre-production device, log a warning */
849c4542
ET
7274 e_dev_warn("This device is a pre-production adapter/LOM. "
7275 "Please be aware there may be issues associated "
7276 "with your hardware. If you are experiencing "
7277 "problems please contact your Intel or hardware "
7278 "representative who provided you with this "
7279 "hardware.\n");
794caeb2 7280 }
9a799d71
AK
7281 strcpy(netdev->name, "eth%d");
7282 err = register_netdev(netdev);
7283 if (err)
7284 goto err_register;
7285
54386467
JB
7286 /* carrier off reporting is important to ethtool even BEFORE open */
7287 netif_carrier_off(netdev);
7288
c4cf55e5
PWJ
7289 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7290 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7291 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7292
119fc60a 7293 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
e8e9f696
JP
7294 INIT_WORK(&adapter->check_overtemp_task,
7295 ixgbe_check_overtemp_task);
5dd2d332 7296#ifdef CONFIG_IXGBE_DCA
652f093f 7297 if (dca_add_requester(&pdev->dev) == 0) {
bd0362dd 7298 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
7299 ixgbe_setup_dca(adapter);
7300 }
7301#endif
1cdd1ec8 7302 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
396e799c 7303 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
1cdd1ec8
GR
7304 for (i = 0; i < adapter->num_vfs; i++)
7305 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7306 }
7307
0365e6e4
PW
7308 /* add san mac addr to netdev */
7309 ixgbe_add_sanmac_netdev(netdev);
9a799d71 7310
849c4542 7311 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
9a799d71
AK
7312 cards_found++;
7313 return 0;
7314
7315err_register:
5eba3699 7316 ixgbe_release_hw_control(adapter);
7a921c93 7317 ixgbe_clear_interrupt_scheme(adapter);
9a799d71
AK
7318err_sw_init:
7319err_eeprom:
1cdd1ec8
GR
7320 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7321 ixgbe_disable_sriov(adapter);
c4900be0
DS
7322 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7323 del_timer_sync(&adapter->sfp_timer);
7324 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
7325 cancel_work_sync(&adapter->multispeed_fiber_task);
7326 cancel_work_sync(&adapter->sfp_config_module_task);
9a799d71
AK
7327 iounmap(hw->hw_addr);
7328err_ioremap:
7329 free_netdev(netdev);
7330err_alloc_etherdev:
e8e9f696
JP
7331 pci_release_selected_regions(pdev,
7332 pci_select_bars(pdev, IORESOURCE_MEM));
9a799d71
AK
7333err_pci_reg:
7334err_dma:
7335 pci_disable_device(pdev);
7336 return err;
7337}
7338
7339/**
7340 * ixgbe_remove - Device Removal Routine
7341 * @pdev: PCI device information struct
7342 *
7343 * ixgbe_remove is called by the PCI subsystem to alert the driver
7344 * that it should release a PCI device. The could be caused by a
7345 * Hot-Plug event, or because the driver is going to be removed from
7346 * memory.
7347 **/
7348static void __devexit ixgbe_remove(struct pci_dev *pdev)
7349{
c60fbb00
AD
7350 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7351 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7352
7353 set_bit(__IXGBE_DOWN, &adapter->state);
c4900be0
DS
7354 /* clear the module not found bit to make sure the worker won't
7355 * reschedule
7356 */
7357 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
9a799d71
AK
7358 del_timer_sync(&adapter->watchdog_timer);
7359
c4900be0
DS
7360 del_timer_sync(&adapter->sfp_timer);
7361 cancel_work_sync(&adapter->watchdog_task);
7362 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
7363 cancel_work_sync(&adapter->multispeed_fiber_task);
7364 cancel_work_sync(&adapter->sfp_config_module_task);
c4cf55e5
PWJ
7365 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7366 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7367 cancel_work_sync(&adapter->fdir_reinit_task);
9a799d71
AK
7368 flush_scheduled_work();
7369
5dd2d332 7370#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7371 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7372 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7373 dca_remove_requester(&pdev->dev);
7374 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7375 }
7376
7377#endif
332d4a7d
YZ
7378#ifdef IXGBE_FCOE
7379 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7380 ixgbe_cleanup_fcoe(adapter);
7381
7382#endif /* IXGBE_FCOE */
0365e6e4
PW
7383
7384 /* remove the added san mac */
7385 ixgbe_del_sanmac_netdev(netdev);
7386
c4900be0
DS
7387 if (netdev->reg_state == NETREG_REGISTERED)
7388 unregister_netdev(netdev);
9a799d71 7389
1cdd1ec8
GR
7390 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7391 ixgbe_disable_sriov(adapter);
7392
7a921c93 7393 ixgbe_clear_interrupt_scheme(adapter);
5eba3699 7394
021230d4 7395 ixgbe_release_hw_control(adapter);
9a799d71
AK
7396
7397 iounmap(adapter->hw.hw_addr);
9ce77666 7398 pci_release_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 7399 IORESOURCE_MEM));
9a799d71 7400
849c4542 7401 e_dev_info("complete\n");
021230d4 7402
9a799d71
AK
7403 free_netdev(netdev);
7404
19d5afd4 7405 pci_disable_pcie_error_reporting(pdev);
6fabd715 7406
9a799d71
AK
7407 pci_disable_device(pdev);
7408}
7409
7410/**
7411 * ixgbe_io_error_detected - called when PCI error is detected
7412 * @pdev: Pointer to PCI device
7413 * @state: The current pci connection state
7414 *
7415 * This function is called after a PCI bus error affecting
7416 * this device has been detected.
7417 */
7418static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
e8e9f696 7419 pci_channel_state_t state)
9a799d71 7420{
c60fbb00
AD
7421 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7422 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7423
7424 netif_device_detach(netdev);
7425
3044b8d1
BL
7426 if (state == pci_channel_io_perm_failure)
7427 return PCI_ERS_RESULT_DISCONNECT;
7428
9a799d71
AK
7429 if (netif_running(netdev))
7430 ixgbe_down(adapter);
7431 pci_disable_device(pdev);
7432
b4617240 7433 /* Request a slot reset. */
9a799d71
AK
7434 return PCI_ERS_RESULT_NEED_RESET;
7435}
7436
7437/**
7438 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7439 * @pdev: Pointer to PCI device
7440 *
7441 * Restart the card from scratch, as if from a cold-boot.
7442 */
7443static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7444{
c60fbb00 7445 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6fabd715
PWJ
7446 pci_ers_result_t result;
7447 int err;
9a799d71 7448
9ce77666 7449 if (pci_enable_device_mem(pdev)) {
396e799c 7450 e_err(probe, "Cannot re-enable PCI device after reset.\n");
6fabd715
PWJ
7451 result = PCI_ERS_RESULT_DISCONNECT;
7452 } else {
7453 pci_set_master(pdev);
7454 pci_restore_state(pdev);
c0e1f68b 7455 pci_save_state(pdev);
9a799d71 7456
dd4d8ca6 7457 pci_wake_from_d3(pdev, false);
9a799d71 7458
6fabd715 7459 ixgbe_reset(adapter);
88512539 7460 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6fabd715
PWJ
7461 result = PCI_ERS_RESULT_RECOVERED;
7462 }
7463
7464 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7465 if (err) {
849c4542
ET
7466 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7467 "failed 0x%0x\n", err);
6fabd715
PWJ
7468 /* non-fatal, continue */
7469 }
9a799d71 7470
6fabd715 7471 return result;
9a799d71
AK
7472}
7473
7474/**
7475 * ixgbe_io_resume - called when traffic can start flowing again.
7476 * @pdev: Pointer to PCI device
7477 *
7478 * This callback is called when the error recovery driver tells us that
7479 * its OK to resume normal operation.
7480 */
7481static void ixgbe_io_resume(struct pci_dev *pdev)
7482{
c60fbb00
AD
7483 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7484 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7485
7486 if (netif_running(netdev)) {
7487 if (ixgbe_up(adapter)) {
396e799c 7488 e_info(probe, "ixgbe_up failed after reset\n");
9a799d71
AK
7489 return;
7490 }
7491 }
7492
7493 netif_device_attach(netdev);
9a799d71
AK
7494}
7495
7496static struct pci_error_handlers ixgbe_err_handler = {
7497 .error_detected = ixgbe_io_error_detected,
7498 .slot_reset = ixgbe_io_slot_reset,
7499 .resume = ixgbe_io_resume,
7500};
7501
7502static struct pci_driver ixgbe_driver = {
7503 .name = ixgbe_driver_name,
7504 .id_table = ixgbe_pci_tbl,
7505 .probe = ixgbe_probe,
7506 .remove = __devexit_p(ixgbe_remove),
7507#ifdef CONFIG_PM
7508 .suspend = ixgbe_suspend,
7509 .resume = ixgbe_resume,
7510#endif
7511 .shutdown = ixgbe_shutdown,
7512 .err_handler = &ixgbe_err_handler
7513};
7514
7515/**
7516 * ixgbe_init_module - Driver Registration Routine
7517 *
7518 * ixgbe_init_module is the first routine called when the driver is
7519 * loaded. All it does is register with the PCI subsystem.
7520 **/
7521static int __init ixgbe_init_module(void)
7522{
7523 int ret;
c7689578 7524 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
849c4542 7525 pr_info("%s\n", ixgbe_copyright);
9a799d71 7526
5dd2d332 7527#ifdef CONFIG_IXGBE_DCA
bd0362dd 7528 dca_register_notify(&dca_notifier);
bd0362dd 7529#endif
5dd2d332 7530
9a799d71
AK
7531 ret = pci_register_driver(&ixgbe_driver);
7532 return ret;
7533}
b4617240 7534
9a799d71
AK
7535module_init(ixgbe_init_module);
7536
7537/**
7538 * ixgbe_exit_module - Driver Exit Cleanup Routine
7539 *
7540 * ixgbe_exit_module is called just before the driver is removed
7541 * from memory.
7542 **/
7543static void __exit ixgbe_exit_module(void)
7544{
5dd2d332 7545#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7546 dca_unregister_notify(&dca_notifier);
7547#endif
9a799d71 7548 pci_unregister_driver(&ixgbe_driver);
1a51502b 7549 rcu_barrier(); /* Wait for completion of call_rcu()'s */
9a799d71 7550}
bd0362dd 7551
5dd2d332 7552#ifdef CONFIG_IXGBE_DCA
bd0362dd 7553static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
e8e9f696 7554 void *p)
bd0362dd
JC
7555{
7556 int ret_val;
7557
7558 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
e8e9f696 7559 __ixgbe_notify_dca);
bd0362dd
JC
7560
7561 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7562}
b453368d 7563
5dd2d332 7564#endif /* CONFIG_IXGBE_DCA */
849c4542 7565
b453368d 7566/**
849c4542 7567 * ixgbe_get_hw_dev return device
b453368d
AD
7568 * used by hardware layer to print debugging information
7569 **/
849c4542 7570struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
b453368d
AD
7571{
7572 struct ixgbe_adapter *adapter = hw->back;
849c4542 7573 return adapter->netdev;
b453368d 7574}
bd0362dd 7575
9a799d71
AK
7576module_exit(ixgbe_exit_module);
7577
7578/* ixgbe_main.c */