]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ixgbe/ixgbe_main.c
ixgbe: move adapter into pci_dev driver data instead of netdev
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
8c47eaa7 4 Copyright(c) 1999 - 2010 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
60127865 37#include <linux/pkt_sched.h>
9a799d71 38#include <linux/ipv6.h>
5a0e3ad6 39#include <linux/slab.h>
9a799d71
AK
40#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
eacd73f7 44#include <scsi/fc/fc_fcoe.h>
9a799d71
AK
45
46#include "ixgbe.h"
47#include "ixgbe_common.h"
ee5f784a 48#include "ixgbe_dcb_82599.h"
1cdd1ec8 49#include "ixgbe_sriov.h"
9a799d71
AK
50
51char ixgbe_driver_name[] = "ixgbe";
9c8eb720 52static const char ixgbe_driver_string[] =
e8e9f696 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
9a799d71 54
99faf68e 55#define DRV_VERSION "2.0.84-k2"
9c8eb720 56const char ixgbe_driver_version[] = DRV_VERSION;
8c47eaa7 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
9a799d71
AK
58
59static const struct ixgbe_info *ixgbe_info_tbl[] = {
b4617240 60 [board_82598] = &ixgbe_82598_info,
e8e26350 61 [board_82599] = &ixgbe_82599_info,
9a799d71
AK
62};
63
64/* ixgbe_pci_tbl - PCI Device ID Table
65 *
66 * Wildcard entries (PCI_ANY_ID) should come last
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70 * Class, Class Mask, private data (not used) }
71 */
a3aa1884 72static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
1e336d0f
DS
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74 board_82598 },
9a799d71 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
3957d63d 76 board_82598 },
9a799d71 77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
3957d63d 78 board_82598 },
0befdb3e
JB
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80 board_82598 },
3845bec0
PWJ
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82 board_82598 },
9a799d71 83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
3957d63d 84 board_82598 },
8d792cd9
JB
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86 board_82598 },
c4900be0
DS
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88 board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90 board_82598 },
b95f5fcb
JB
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92 board_82598 },
c4900be0
DS
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94 board_82598 },
2f21bdd3
DS
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96 board_82598 },
e8e26350
PW
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98 board_82599 },
1fcf03e6
PWJ
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100 board_82599 },
74757d49
DS
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102 board_82599 },
e8e26350
PW
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104 board_82599 },
38ad1c8e
DS
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106 board_82599 },
dbfec662
DS
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108 board_82599 },
8911184f
PWJ
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 },
119fc60a
MC
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 },
312eb931
DS
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 },
9a799d71
AK
115
116 /* required last entry */
117 {0, }
118};
119MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
5dd2d332 121#ifdef CONFIG_IXGBE_DCA
bd0362dd 122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
e8e9f696 123 void *p);
bd0362dd
JC
124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL,
127 .priority = 0
128};
129#endif
130
1cdd1ec8
GR
131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0);
e8e9f696
JP
134MODULE_PARM_DESC(max_vfs,
135 "Maximum number of virtual functions to allocate per physical function");
1cdd1ec8
GR
136#endif /* CONFIG_PCI_IOV */
137
9a799d71
AK
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
139MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_VERSION);
142
143#define DEFAULT_DEBUG_LEVEL_SHIFT 3
144
1cdd1ec8
GR
145static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
146{
147 struct ixgbe_hw *hw = &adapter->hw;
148 u32 gcr;
149 u32 gpie;
150 u32 vmdctl;
151
152#ifdef CONFIG_PCI_IOV
153 /* disable iov and allow time for transactions to clear */
154 pci_disable_sriov(adapter->pdev);
155#endif
156
157 /* turn off device IOV mode */
158 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
159 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
160 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
161 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
162 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
163 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
164
165 /* set default pool back to 0 */
166 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
167 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
168 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
169
170 /* take a breather then clean up driver data */
171 msleep(100);
e8e9f696
JP
172
173 kfree(adapter->vfinfo);
1cdd1ec8
GR
174 adapter->vfinfo = NULL;
175
176 adapter->num_vfs = 0;
177 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178}
179
dcd79aeb
TI
180struct ixgbe_reg_info {
181 u32 ofs;
182 char *name;
183};
184
185static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
186
187 /* General Registers */
188 {IXGBE_CTRL, "CTRL"},
189 {IXGBE_STATUS, "STATUS"},
190 {IXGBE_CTRL_EXT, "CTRL_EXT"},
191
192 /* Interrupt Registers */
193 {IXGBE_EICR, "EICR"},
194
195 /* RX Registers */
196 {IXGBE_SRRCTL(0), "SRRCTL"},
197 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
198 {IXGBE_RDLEN(0), "RDLEN"},
199 {IXGBE_RDH(0), "RDH"},
200 {IXGBE_RDT(0), "RDT"},
201 {IXGBE_RXDCTL(0), "RXDCTL"},
202 {IXGBE_RDBAL(0), "RDBAL"},
203 {IXGBE_RDBAH(0), "RDBAH"},
204
205 /* TX Registers */
206 {IXGBE_TDBAL(0), "TDBAL"},
207 {IXGBE_TDBAH(0), "TDBAH"},
208 {IXGBE_TDLEN(0), "TDLEN"},
209 {IXGBE_TDH(0), "TDH"},
210 {IXGBE_TDT(0), "TDT"},
211 {IXGBE_TXDCTL(0), "TXDCTL"},
212
213 /* List Terminator */
214 {}
215};
216
217
218/*
219 * ixgbe_regdump - register printout routine
220 */
221static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
222{
223 int i = 0, j = 0;
224 char rname[16];
225 u32 regs[64];
226
227 switch (reginfo->ofs) {
228 case IXGBE_SRRCTL(0):
229 for (i = 0; i < 64; i++)
230 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
231 break;
232 case IXGBE_DCA_RXCTRL(0):
233 for (i = 0; i < 64; i++)
234 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
235 break;
236 case IXGBE_RDLEN(0):
237 for (i = 0; i < 64; i++)
238 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
239 break;
240 case IXGBE_RDH(0):
241 for (i = 0; i < 64; i++)
242 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
243 break;
244 case IXGBE_RDT(0):
245 for (i = 0; i < 64; i++)
246 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
247 break;
248 case IXGBE_RXDCTL(0):
249 for (i = 0; i < 64; i++)
250 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
251 break;
252 case IXGBE_RDBAL(0):
253 for (i = 0; i < 64; i++)
254 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
255 break;
256 case IXGBE_RDBAH(0):
257 for (i = 0; i < 64; i++)
258 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
259 break;
260 case IXGBE_TDBAL(0):
261 for (i = 0; i < 64; i++)
262 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
263 break;
264 case IXGBE_TDBAH(0):
265 for (i = 0; i < 64; i++)
266 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
267 break;
268 case IXGBE_TDLEN(0):
269 for (i = 0; i < 64; i++)
270 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
271 break;
272 case IXGBE_TDH(0):
273 for (i = 0; i < 64; i++)
274 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
275 break;
276 case IXGBE_TDT(0):
277 for (i = 0; i < 64; i++)
278 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
279 break;
280 case IXGBE_TXDCTL(0):
281 for (i = 0; i < 64; i++)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break;
284 default:
c7689578 285 pr_info("%-15s %08x\n", reginfo->name,
dcd79aeb
TI
286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return;
288 }
289
290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
c7689578 292 pr_err("%-15s", rname);
dcd79aeb 293 for (j = 0; j < 8; j++)
c7689578
JP
294 pr_cont(" %08x", regs[i*8+j]);
295 pr_cont("\n");
dcd79aeb
TI
296 }
297
298}
299
300/*
301 * ixgbe_dump - Print registers, tx-rings and rx-rings
302 */
303static void ixgbe_dump(struct ixgbe_adapter *adapter)
304{
305 struct net_device *netdev = adapter->netdev;
306 struct ixgbe_hw *hw = &adapter->hw;
307 struct ixgbe_reg_info *reginfo;
308 int n = 0;
309 struct ixgbe_ring *tx_ring;
310 struct ixgbe_tx_buffer *tx_buffer_info;
311 union ixgbe_adv_tx_desc *tx_desc;
312 struct my_u0 { u64 a; u64 b; } *u0;
313 struct ixgbe_ring *rx_ring;
314 union ixgbe_adv_rx_desc *rx_desc;
315 struct ixgbe_rx_buffer *rx_buffer_info;
316 u32 staterr;
317 int i = 0;
318
319 if (!netif_msg_hw(adapter))
320 return;
321
322 /* Print netdevice Info */
323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n");
c7689578 325 pr_info("Device Name state "
dcd79aeb 326 "trans_start last_rx\n");
c7689578
JP
327 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name,
329 netdev->state,
330 netdev->trans_start,
331 netdev->last_rx);
dcd79aeb
TI
332 }
333
334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n");
c7689578 336 pr_info(" Register Name Value\n");
dcd79aeb
TI
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo);
340 }
341
342 /* Print TX Ring Summary */
343 if (!netdev || !netif_running(netdev))
344 goto exit;
345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
c7689578 347 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
dcd79aeb
TI
348 for (n = 0; n < adapter->num_tx_queues; n++) {
349 tx_ring = adapter->tx_ring[n];
350 tx_buffer_info =
351 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
c7689578 352 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
dcd79aeb
TI
353 n, tx_ring->next_to_use, tx_ring->next_to_clean,
354 (u64)tx_buffer_info->dma,
355 tx_buffer_info->length,
356 tx_buffer_info->next_to_watch,
357 (u64)tx_buffer_info->time_stamp);
358 }
359
360 /* Print TX Rings */
361 if (!netif_msg_tx_done(adapter))
362 goto rx_ring_summary;
363
364 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
365
366 /* Transmit Descriptor Formats
367 *
368 * Advanced Transmit Descriptor
369 * +--------------------------------------------------------------+
370 * 0 | Buffer Address [63:0] |
371 * +--------------------------------------------------------------+
372 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
373 * +--------------------------------------------------------------+
374 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
375 */
376
377 for (n = 0; n < adapter->num_tx_queues; n++) {
378 tx_ring = adapter->tx_ring[n];
c7689578
JP
379 pr_info("------------------------------------\n");
380 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
381 pr_info("------------------------------------\n");
382 pr_info("T [desc] [address 63:0 ] "
dcd79aeb
TI
383 "[PlPOIdStDDt Ln] [bi->dma ] "
384 "leng ntw timestamp bi->skb\n");
385
386 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
31f05a2d 387 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
dcd79aeb
TI
388 tx_buffer_info = &tx_ring->tx_buffer_info[i];
389 u0 = (struct my_u0 *)tx_desc;
c7689578 390 pr_info("T [0x%03X] %016llX %016llX %016llX"
dcd79aeb
TI
391 " %04X %3X %016llX %p", i,
392 le64_to_cpu(u0->a),
393 le64_to_cpu(u0->b),
394 (u64)tx_buffer_info->dma,
395 tx_buffer_info->length,
396 tx_buffer_info->next_to_watch,
397 (u64)tx_buffer_info->time_stamp,
398 tx_buffer_info->skb);
399 if (i == tx_ring->next_to_use &&
400 i == tx_ring->next_to_clean)
c7689578 401 pr_cont(" NTC/U\n");
dcd79aeb 402 else if (i == tx_ring->next_to_use)
c7689578 403 pr_cont(" NTU\n");
dcd79aeb 404 else if (i == tx_ring->next_to_clean)
c7689578 405 pr_cont(" NTC\n");
dcd79aeb 406 else
c7689578 407 pr_cont("\n");
dcd79aeb
TI
408
409 if (netif_msg_pktdata(adapter) &&
410 tx_buffer_info->dma != 0)
411 print_hex_dump(KERN_INFO, "",
412 DUMP_PREFIX_ADDRESS, 16, 1,
413 phys_to_virt(tx_buffer_info->dma),
414 tx_buffer_info->length, true);
415 }
416 }
417
418 /* Print RX Rings Summary */
419rx_ring_summary:
420 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
c7689578 421 pr_info("Queue [NTU] [NTC]\n");
dcd79aeb
TI
422 for (n = 0; n < adapter->num_rx_queues; n++) {
423 rx_ring = adapter->rx_ring[n];
c7689578
JP
424 pr_info("%5d %5X %5X\n",
425 n, rx_ring->next_to_use, rx_ring->next_to_clean);
dcd79aeb
TI
426 }
427
428 /* Print RX Rings */
429 if (!netif_msg_rx_status(adapter))
430 goto exit;
431
432 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
433
434 /* Advanced Receive Descriptor (Read) Format
435 * 63 1 0
436 * +-----------------------------------------------------+
437 * 0 | Packet Buffer Address [63:1] |A0/NSE|
438 * +----------------------------------------------+------+
439 * 8 | Header Buffer Address [63:1] | DD |
440 * +-----------------------------------------------------+
441 *
442 *
443 * Advanced Receive Descriptor (Write-Back) Format
444 *
445 * 63 48 47 32 31 30 21 20 16 15 4 3 0
446 * +------------------------------------------------------+
447 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
448 * | Checksum Ident | | | | Type | Type |
449 * +------------------------------------------------------+
450 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
451 * +------------------------------------------------------+
452 * 63 48 47 32 31 20 19 0
453 */
454 for (n = 0; n < adapter->num_rx_queues; n++) {
455 rx_ring = adapter->rx_ring[n];
c7689578
JP
456 pr_info("------------------------------------\n");
457 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
458 pr_info("------------------------------------\n");
459 pr_info("R [desc] [ PktBuf A0] "
dcd79aeb
TI
460 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
461 "<-- Adv Rx Read format\n");
c7689578 462 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
dcd79aeb
TI
463 "[vl er S cks ln] ---------------- [bi->skb] "
464 "<-- Adv Rx Write-Back format\n");
465
466 for (i = 0; i < rx_ring->count; i++) {
467 rx_buffer_info = &rx_ring->rx_buffer_info[i];
31f05a2d 468 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
dcd79aeb
TI
469 u0 = (struct my_u0 *)rx_desc;
470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
471 if (staterr & IXGBE_RXD_STAT_DD) {
472 /* Descriptor Done */
c7689578 473 pr_info("RWB[0x%03X] %016llX "
dcd79aeb
TI
474 "%016llX ---------------- %p", i,
475 le64_to_cpu(u0->a),
476 le64_to_cpu(u0->b),
477 rx_buffer_info->skb);
478 } else {
c7689578 479 pr_info("R [0x%03X] %016llX "
dcd79aeb
TI
480 "%016llX %016llX %p", i,
481 le64_to_cpu(u0->a),
482 le64_to_cpu(u0->b),
483 (u64)rx_buffer_info->dma,
484 rx_buffer_info->skb);
485
486 if (netif_msg_pktdata(adapter)) {
487 print_hex_dump(KERN_INFO, "",
488 DUMP_PREFIX_ADDRESS, 16, 1,
489 phys_to_virt(rx_buffer_info->dma),
490 rx_ring->rx_buf_len, true);
491
492 if (rx_ring->rx_buf_len
493 < IXGBE_RXBUFFER_2048)
494 print_hex_dump(KERN_INFO, "",
495 DUMP_PREFIX_ADDRESS, 16, 1,
496 phys_to_virt(
497 rx_buffer_info->page_dma +
498 rx_buffer_info->page_offset
499 ),
500 PAGE_SIZE/2, true);
501 }
502 }
503
504 if (i == rx_ring->next_to_use)
c7689578 505 pr_cont(" NTU\n");
dcd79aeb 506 else if (i == rx_ring->next_to_clean)
c7689578 507 pr_cont(" NTC\n");
dcd79aeb 508 else
c7689578 509 pr_cont("\n");
dcd79aeb
TI
510
511 }
512 }
513
514exit:
515 return;
516}
517
5eba3699
AV
518static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
519{
520 u32 ctrl_ext;
521
522 /* Let firmware take over control of h/w */
523 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 525 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699
AV
526}
527
528static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
529{
530 u32 ctrl_ext;
531
532 /* Let firmware know the driver has taken over */
533 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 535 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699 536}
9a799d71 537
e8e26350
PW
538/*
539 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
540 * @adapter: pointer to adapter struct
541 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
542 * @queue: queue to map the corresponding interrupt to
543 * @msix_vector: the vector to map to the corresponding queue
544 *
545 */
546static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
e8e9f696 547 u8 queue, u8 msix_vector)
9a799d71
AK
548{
549 u32 ivar, index;
e8e26350
PW
550 struct ixgbe_hw *hw = &adapter->hw;
551 switch (hw->mac.type) {
552 case ixgbe_mac_82598EB:
553 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
554 if (direction == -1)
555 direction = 0;
556 index = (((direction * 64) + queue) >> 2) & 0x1F;
557 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
558 ivar &= ~(0xFF << (8 * (queue & 0x3)));
559 ivar |= (msix_vector << (8 * (queue & 0x3)));
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break;
562 case ixgbe_mac_82599EB:
563 if (direction == -1) {
564 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
566 index = ((queue & 1) * 8);
567 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
568 ivar &= ~(0xFF << index);
569 ivar |= (msix_vector << index);
570 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
571 break;
572 } else {
573 /* tx or rx causes */
574 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
575 index = ((16 * (queue & 1)) + (8 * direction));
576 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
577 ivar &= ~(0xFF << index);
578 ivar |= (msix_vector << index);
579 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
580 break;
581 }
582 default:
583 break;
584 }
9a799d71
AK
585}
586
fe49f04a 587static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
e8e9f696 588 u64 qmask)
fe49f04a
AD
589{
590 u32 mask;
591
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else {
596 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
600 }
601}
602
b6ec895e
AD
603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer *tx_buffer_info)
9a799d71 605{
e5a43549
AD
606 if (tx_buffer_info->dma) {
607 if (tx_buffer_info->mapped_as_page)
b6ec895e 608 dma_unmap_page(tx_ring->dev,
e5a43549
AD
609 tx_buffer_info->dma,
610 tx_buffer_info->length,
1b507730 611 DMA_TO_DEVICE);
e5a43549 612 else
b6ec895e 613 dma_unmap_single(tx_ring->dev,
e5a43549
AD
614 tx_buffer_info->dma,
615 tx_buffer_info->length,
1b507730 616 DMA_TO_DEVICE);
e5a43549
AD
617 tx_buffer_info->dma = 0;
618 }
9a799d71
AK
619 if (tx_buffer_info->skb) {
620 dev_kfree_skb_any(tx_buffer_info->skb);
621 tx_buffer_info->skb = NULL;
622 }
44df32c5 623 tx_buffer_info->time_stamp = 0;
9a799d71
AK
624 /* tx_buffer_info must be completely set up in the transmit path */
625}
626
26f23d82 627/**
7483d9dd 628 * ixgbe_tx_xon_state - check the tx ring xon state
26f23d82
YZ
629 * @adapter: the ixgbe adapter
630 * @tx_ring: the corresponding tx_ring
631 *
632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
633 * corresponding TC of this tx_ring when checking TFCS.
634 *
7483d9dd 635 * Returns : true if in xon state (currently not paused)
26f23d82 636 */
7483d9dd 637static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
e8e9f696 638 struct ixgbe_ring *tx_ring)
26f23d82 639{
26f23d82
YZ
640 u32 txoff = IXGBE_TFCS_TXOFF;
641
642#ifdef CONFIG_IXGBE_DCB
ca739481 643 if (adapter->dcb_cfg.pfc_mode_enable) {
30b76832 644 int tc;
26f23d82
YZ
645 int reg_idx = tx_ring->reg_idx;
646 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
647
6837e895
PW
648 switch (adapter->hw.mac.type) {
649 case ixgbe_mac_82598EB:
26f23d82
YZ
650 tc = reg_idx >> 2;
651 txoff = IXGBE_TFCS_TXOFF0;
6837e895
PW
652 break;
653 case ixgbe_mac_82599EB:
26f23d82
YZ
654 tc = 0;
655 txoff = IXGBE_TFCS_TXOFF;
656 if (dcb_i == 8) {
657 /* TC0, TC1 */
658 tc = reg_idx >> 5;
659 if (tc == 2) /* TC2, TC3 */
660 tc += (reg_idx - 64) >> 4;
661 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
662 tc += 1 + ((reg_idx - 96) >> 3);
663 } else if (dcb_i == 4) {
664 /* TC0, TC1 */
665 tc = reg_idx >> 6;
666 if (tc == 1) {
667 tc += (reg_idx - 64) >> 5;
668 if (tc == 2) /* TC2, TC3 */
669 tc += (reg_idx - 96) >> 4;
670 }
671 }
6837e895
PW
672 break;
673 default:
674 tc = 0;
26f23d82
YZ
675 }
676 txoff <<= tc;
677 }
678#endif
679 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
680}
681
9a799d71 682static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
e8e9f696
JP
683 struct ixgbe_ring *tx_ring,
684 unsigned int eop)
9a799d71 685{
e01c31a5 686 struct ixgbe_hw *hw = &adapter->hw;
e01c31a5 687
9a799d71 688 /* Detect a transmit hang in hardware, this serializes the
e01c31a5 689 * check with the clearing of time_stamp and movement of eop */
9a799d71 690 adapter->detect_tx_hung = false;
44df32c5 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
9a799d71 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
7483d9dd 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
9a799d71 694 /* detected Tx unit hang */
e01c31a5 695 union ixgbe_adv_tx_desc *tx_desc;
31f05a2d 696 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
396e799c 697 e_err(drv, "Detected Tx Unit Hang\n"
849c4542
ET
698 " Tx Queue <%d>\n"
699 " TDH, TDT <%x>, <%x>\n"
700 " next_to_use <%x>\n"
701 " next_to_clean <%x>\n"
702 "tx_buffer_info[next_to_clean]\n"
703 " time_stamp <%lx>\n"
704 " jiffies <%lx>\n",
705 tx_ring->queue_index,
84ea2591
AD
706 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
707 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
849c4542
ET
708 tx_ring->next_to_use, eop,
709 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
9a799d71
AK
710 return true;
711 }
712
713 return false;
714}
715
b4617240
PW
716#define IXGBE_MAX_TXD_PWR 14
717#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
e092be60
AV
718
719/* Tx Descriptors needed, worst case */
720#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
721 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
722#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
b4617240 723 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
e092be60 724
e01c31a5
JB
725static void ixgbe_tx_timeout(struct net_device *netdev);
726
9a799d71
AK
727/**
728 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
fe49f04a 729 * @q_vector: structure containing interrupt and ring information
e01c31a5 730 * @tx_ring: tx ring to clean
9a799d71 731 **/
fe49f04a 732static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696 733 struct ixgbe_ring *tx_ring)
9a799d71 734{
fe49f04a 735 struct ixgbe_adapter *adapter = q_vector->adapter;
12207e49
PWJ
736 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
737 struct ixgbe_tx_buffer *tx_buffer_info;
738 unsigned int i, eop, count = 0;
e01c31a5 739 unsigned int total_bytes = 0, total_packets = 0;
9a799d71
AK
740
741 i = tx_ring->next_to_clean;
12207e49 742 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 743 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
744
745 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
9a1a69ad 746 (count < tx_ring->work_limit)) {
12207e49 747 bool cleaned = false;
2d0bb1c1 748 rmb(); /* read buffer_info after eop_desc */
12207e49 749 for ( ; !cleaned; count++) {
31f05a2d 750 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71 751 tx_buffer_info = &tx_ring->tx_buffer_info[i];
8ad494b0
AD
752
753 tx_desc->wb.status = 0;
12207e49 754 cleaned = (i == eop);
9a799d71 755
8ad494b0
AD
756 i++;
757 if (i == tx_ring->count)
758 i = 0;
e01c31a5 759
8ad494b0
AD
760 if (cleaned && tx_buffer_info->skb) {
761 total_bytes += tx_buffer_info->bytecount;
762 total_packets += tx_buffer_info->gso_segs;
e092be60 763 }
e01c31a5 764
b6ec895e 765 ixgbe_unmap_and_free_tx_resource(tx_ring,
e8e9f696 766 tx_buffer_info);
e01c31a5 767 }
12207e49
PWJ
768
769 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 770 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
771 }
772
9a799d71
AK
773 tx_ring->next_to_clean = i;
774
e092be60 775#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fc77dc3c 776 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
e8e9f696 777 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
e092be60
AV
778 /* Make sure that anybody stopping the queue after this
779 * sees the new next_to_clean.
780 */
781 smp_mb();
fc77dc3c 782 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
30eba97a 783 !test_bit(__IXGBE_DOWN, &adapter->state)) {
fc77dc3c 784 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 785 ++tx_ring->tx_stats.restart_queue;
30eba97a 786 }
e092be60 787 }
9a799d71 788
e01c31a5
JB
789 if (adapter->detect_tx_hung) {
790 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
791 /* schedule immediate reset if we believe we hung */
396e799c
ET
792 e_info(probe, "tx hang %d detected, resetting "
793 "adapter\n", adapter->tx_timeout_count + 1);
e01c31a5
JB
794 ixgbe_tx_timeout(adapter->netdev);
795 }
796 }
9a799d71 797
e01c31a5 798 /* re-arm the interrupt */
fe49f04a
AD
799 if (count >= tx_ring->work_limit)
800 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
9a799d71 801
e01c31a5
JB
802 tx_ring->total_bytes += total_bytes;
803 tx_ring->total_packets += total_packets;
de1036b1 804 u64_stats_update_begin(&tx_ring->syncp);
e01c31a5 805 tx_ring->stats.packets += total_packets;
12207e49 806 tx_ring->stats.bytes += total_bytes;
de1036b1 807 u64_stats_update_end(&tx_ring->syncp);
807540ba 808 return count < tx_ring->work_limit;
9a799d71
AK
809}
810
5dd2d332 811#ifdef CONFIG_IXGBE_DCA
bd0362dd 812static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
e8e9f696 813 struct ixgbe_ring *rx_ring)
bd0362dd
JC
814{
815 u32 rxctrl;
816 int cpu = get_cpu();
4a0b9ca0 817 int q = rx_ring->reg_idx;
bd0362dd 818
3a581073 819 if (rx_ring->cpu != cpu) {
bd0362dd 820 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
e8e26350
PW
821 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
822 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
823 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
824 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
825 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
826 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
e8e9f696 827 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
e8e26350 828 }
bd0362dd
JC
829 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
830 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
15005a32
DS
831 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
832 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
e8e9f696 833 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
bd0362dd 834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
3a581073 835 rx_ring->cpu = cpu;
bd0362dd
JC
836 }
837 put_cpu();
838}
839
840static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
e8e9f696 841 struct ixgbe_ring *tx_ring)
bd0362dd
JC
842{
843 u32 txctrl;
844 int cpu = get_cpu();
4a0b9ca0 845 int q = tx_ring->reg_idx;
ee5f784a 846 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 847
3a581073 848 if (tx_ring->cpu != cpu) {
e8e26350 849 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
ee5f784a 850 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
e8e26350
PW
851 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
852 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
ee5f784a
DS
853 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
854 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
e8e26350 855 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
ee5f784a 856 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
e8e26350
PW
857 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
858 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
e8e9f696 859 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
ee5f784a
DS
860 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
861 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
e8e26350 862 }
3a581073 863 tx_ring->cpu = cpu;
bd0362dd
JC
864 }
865 put_cpu();
866}
867
868static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
869{
870 int i;
871
872 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
873 return;
874
e35ec126
AD
875 /* always use CB2 mode, difference is masked in the CB driver */
876 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
877
bd0362dd 878 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0
PW
879 adapter->tx_ring[i]->cpu = -1;
880 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
bd0362dd
JC
881 }
882 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0
PW
883 adapter->rx_ring[i]->cpu = -1;
884 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
bd0362dd
JC
885 }
886}
887
888static int __ixgbe_notify_dca(struct device *dev, void *data)
889{
c60fbb00 890 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
bd0362dd
JC
891 unsigned long event = *(unsigned long *)data;
892
893 switch (event) {
894 case DCA_PROVIDER_ADD:
96b0e0f6
JB
895 /* if we're already enabled, don't do it again */
896 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
897 break;
652f093f 898 if (dca_add_requester(dev) == 0) {
96b0e0f6 899 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
900 ixgbe_setup_dca(adapter);
901 break;
902 }
903 /* Fall Through since DCA is disabled. */
904 case DCA_PROVIDER_REMOVE:
905 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
906 dca_remove_requester(dev);
907 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
908 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
909 }
910 break;
911 }
912
652f093f 913 return 0;
bd0362dd
JC
914}
915
5dd2d332 916#endif /* CONFIG_IXGBE_DCA */
9a799d71
AK
917/**
918 * ixgbe_receive_skb - Send a completed packet up the stack
919 * @adapter: board private structure
920 * @skb: packet to send up
177db6ff
MC
921 * @status: hardware indication of status of receive
922 * @rx_ring: rx descriptor ring (for a specific queue) to setup
923 * @rx_desc: rx descriptor
9a799d71 924 **/
78b6f4ce 925static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
926 struct sk_buff *skb, u8 status,
927 struct ixgbe_ring *ring,
928 union ixgbe_adv_rx_desc *rx_desc)
9a799d71 929{
78b6f4ce
HX
930 struct ixgbe_adapter *adapter = q_vector->adapter;
931 struct napi_struct *napi = &q_vector->napi;
177db6ff
MC
932 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
933 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
9a799d71 934
f62bbb5e
JG
935 if (is_vlan && (tag & VLAN_VID_MASK))
936 __vlan_hwaccel_put_tag(skb, tag);
937
938 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
939 napi_gro_receive(napi, skb);
940 else
941 netif_rx(skb);
9a799d71
AK
942}
943
e59bd25d
AV
944/**
945 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
946 * @adapter: address of board private structure
947 * @status_err: hardware indication of status of receive
948 * @skb: skb currently being received and modified
949 **/
9a799d71 950static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
8bae1b2b
DS
951 union ixgbe_adv_rx_desc *rx_desc,
952 struct sk_buff *skb)
9a799d71 953{
8bae1b2b
DS
954 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
955
bc8acf2c 956 skb_checksum_none_assert(skb);
9a799d71 957
712744be
JB
958 /* Rx csum disabled */
959 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
9a799d71 960 return;
e59bd25d
AV
961
962 /* if IP and error */
963 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
964 (status_err & IXGBE_RXDADV_ERR_IPE)) {
9a799d71
AK
965 adapter->hw_csum_rx_error++;
966 return;
967 }
e59bd25d
AV
968
969 if (!(status_err & IXGBE_RXD_STAT_L4CS))
970 return;
971
972 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
8bae1b2b
DS
973 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
974
975 /*
976 * 82599 errata, UDP frames with a 0 checksum can be marked as
977 * checksum errors.
978 */
979 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
980 (adapter->hw.mac.type == ixgbe_mac_82599EB))
981 return;
982
e59bd25d
AV
983 adapter->hw_csum_rx_error++;
984 return;
985 }
986
9a799d71 987 /* It must be a TCP or UDP packet with a valid checksum */
e59bd25d 988 skb->ip_summed = CHECKSUM_UNNECESSARY;
9a799d71
AK
989}
990
84ea2591 991static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
e8e26350
PW
992{
993 /*
994 * Force memory writes to complete before letting h/w
995 * know there are new descriptors to fetch. (Only
996 * applicable for weak-ordered memory model archs,
997 * such as IA-64).
998 */
999 wmb();
84ea2591 1000 writel(val, rx_ring->tail);
e8e26350
PW
1001}
1002
9a799d71
AK
1003/**
1004 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
fc77dc3c
AD
1005 * @rx_ring: ring to place buffers on
1006 * @cleaned_count: number of buffers to replace
9a799d71 1007 **/
fc77dc3c 1008void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
9a799d71 1009{
9a799d71 1010 union ixgbe_adv_rx_desc *rx_desc;
3a581073 1011 struct ixgbe_rx_buffer *bi;
d5f398ed
AD
1012 struct sk_buff *skb;
1013 u16 i = rx_ring->next_to_use;
9a799d71 1014
fc77dc3c
AD
1015 /* do nothing if no valid netdev defined */
1016 if (!rx_ring->netdev)
1017 return;
1018
9a799d71 1019 while (cleaned_count--) {
31f05a2d 1020 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
d5f398ed
AD
1021 bi = &rx_ring->rx_buffer_info[i];
1022 skb = bi->skb;
9a799d71 1023
d5f398ed 1024 if (!skb) {
fc77dc3c 1025 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
d5f398ed 1026 rx_ring->rx_buf_len);
9a799d71 1027 if (!skb) {
5b7da515 1028 rx_ring->rx_stats.alloc_rx_buff_failed++;
9a799d71
AK
1029 goto no_buffers;
1030 }
d716a7d8
AD
1031 /* initialize queue mapping */
1032 skb_record_rx_queue(skb, rx_ring->queue_index);
d5f398ed 1033 bi->skb = skb;
d716a7d8 1034 }
9a799d71 1035
d716a7d8 1036 if (!bi->dma) {
b6ec895e 1037 bi->dma = dma_map_single(rx_ring->dev,
d5f398ed 1038 skb->data,
e8e9f696 1039 rx_ring->rx_buf_len,
1b507730 1040 DMA_FROM_DEVICE);
b6ec895e 1041 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
5b7da515 1042 rx_ring->rx_stats.alloc_rx_buff_failed++;
d5f398ed
AD
1043 bi->dma = 0;
1044 goto no_buffers;
1045 }
9a799d71 1046 }
d5f398ed 1047
6e455b89 1048 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
d5f398ed 1049 if (!bi->page) {
fc77dc3c 1050 bi->page = netdev_alloc_page(rx_ring->netdev);
d5f398ed 1051 if (!bi->page) {
5b7da515 1052 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1053 goto no_buffers;
1054 }
1055 }
1056
1057 if (!bi->page_dma) {
1058 /* use a half page if we're re-using */
1059 bi->page_offset ^= PAGE_SIZE / 2;
b6ec895e 1060 bi->page_dma = dma_map_page(rx_ring->dev,
d5f398ed
AD
1061 bi->page,
1062 bi->page_offset,
1063 PAGE_SIZE / 2,
1064 DMA_FROM_DEVICE);
b6ec895e 1065 if (dma_mapping_error(rx_ring->dev,
d5f398ed 1066 bi->page_dma)) {
5b7da515 1067 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1068 bi->page_dma = 0;
1069 goto no_buffers;
1070 }
1071 }
1072
1073 /* Refresh the desc even if buffer_addrs didn't change
1074 * because each write-back erases this info. */
3a581073
JB
1075 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1076 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9a799d71 1077 } else {
3a581073 1078 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
84418e3b 1079 rx_desc->read.hdr_addr = 0;
9a799d71
AK
1080 }
1081
1082 i++;
1083 if (i == rx_ring->count)
1084 i = 0;
9a799d71 1085 }
7c6e0a43 1086
9a799d71
AK
1087no_buffers:
1088 if (rx_ring->next_to_use != i) {
1089 rx_ring->next_to_use = i;
84ea2591 1090 ixgbe_release_rx_desc(rx_ring, i);
9a799d71
AK
1091 }
1092}
1093
7c6e0a43
JB
1094static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
1095{
1096 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1097}
1098
1099static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1100{
1101 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1102}
1103
f8212f97
AD
1104static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1105{
1106 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
e8e9f696
JP
1107 IXGBE_RXDADV_RSCCNT_MASK) >>
1108 IXGBE_RXDADV_RSCCNT_SHIFT;
f8212f97
AD
1109}
1110
1111/**
1112 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1113 * @skb: pointer to the last skb in the rsc queue
94b982b2 1114 * @count: pointer to number of packets coalesced in this context
f8212f97
AD
1115 *
1116 * This function changes a queue full of hw rsc buffers into a completed
1117 * packet. It uses the ->prev pointers to find the first packet and then
1118 * turns it into the frag list owner.
1119 **/
94b982b2 1120static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
e8e9f696 1121 u64 *count)
f8212f97
AD
1122{
1123 unsigned int frag_list_size = 0;
1124
1125 while (skb->prev) {
1126 struct sk_buff *prev = skb->prev;
1127 frag_list_size += skb->len;
1128 skb->prev = NULL;
1129 skb = prev;
94b982b2 1130 *count += 1;
f8212f97
AD
1131 }
1132
1133 skb_shinfo(skb)->frag_list = skb->next;
1134 skb->next = NULL;
1135 skb->len += frag_list_size;
1136 skb->data_len += frag_list_size;
1137 skb->truesize += frag_list_size;
1138 return skb;
1139}
1140
43634e82
MC
1141struct ixgbe_rsc_cb {
1142 dma_addr_t dma;
e8171aaa 1143 bool delay_unmap;
43634e82
MC
1144};
1145
1146#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1147
78b6f4ce 1148static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
1149 struct ixgbe_ring *rx_ring,
1150 int *work_done, int work_to_do)
9a799d71 1151{
78b6f4ce 1152 struct ixgbe_adapter *adapter = q_vector->adapter;
9a799d71
AK
1153 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1154 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1155 struct sk_buff *skb;
f8212f97 1156 unsigned int i, rsc_count = 0;
7c6e0a43 1157 u32 len, staterr;
177db6ff
MC
1158 u16 hdr_info;
1159 bool cleaned = false;
9a799d71 1160 int cleaned_count = 0;
d2f4fbe2 1161 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
3d8fd385
YZ
1162#ifdef IXGBE_FCOE
1163 int ddp_bytes = 0;
1164#endif /* IXGBE_FCOE */
9a799d71
AK
1165
1166 i = rx_ring->next_to_clean;
31f05a2d 1167 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71
AK
1168 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1169 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
1170
1171 while (staterr & IXGBE_RXD_STAT_DD) {
7c6e0a43 1172 u32 upper_len = 0;
9a799d71
AK
1173 if (*work_done >= work_to_do)
1174 break;
1175 (*work_done)++;
1176
3c945e5b 1177 rmb(); /* read descriptor and rx_buffer_info after status DD */
6e455b89 1178 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
7c6e0a43
JB
1179 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1180 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
762f4c57 1181 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
9a799d71 1182 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
0b746e08
SN
1183 if ((len > IXGBE_RX_HDR_SIZE) ||
1184 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1185 len = IXGBE_RX_HDR_SIZE;
7c6e0a43 1186 } else {
9a799d71 1187 len = le16_to_cpu(rx_desc->wb.upper.length);
7c6e0a43 1188 }
9a799d71
AK
1189
1190 cleaned = true;
1191 skb = rx_buffer_info->skb;
7ca3bc58 1192 prefetch(skb->data);
9a799d71
AK
1193 rx_buffer_info->skb = NULL;
1194
21fa4e66 1195 if (rx_buffer_info->dma) {
43634e82
MC
1196 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1197 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
e8171aaa 1198 (!(skb->prev))) {
43634e82
MC
1199 /*
1200 * When HWRSC is enabled, delay unmapping
1201 * of the first packet. It carries the
1202 * header information, HW may still
1203 * access the header after the writeback.
1204 * Only unmap it when EOP is reached
1205 */
e8171aaa 1206 IXGBE_RSC_CB(skb)->delay_unmap = true;
43634e82 1207 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
e8171aaa 1208 } else {
b6ec895e 1209 dma_unmap_single(rx_ring->dev,
e8e9f696
JP
1210 rx_buffer_info->dma,
1211 rx_ring->rx_buf_len,
1212 DMA_FROM_DEVICE);
e8171aaa 1213 }
4f57ca6e 1214 rx_buffer_info->dma = 0;
9a799d71
AK
1215 skb_put(skb, len);
1216 }
1217
1218 if (upper_len) {
b6ec895e
AD
1219 dma_unmap_page(rx_ring->dev,
1220 rx_buffer_info->page_dma,
1221 PAGE_SIZE / 2,
1222 DMA_FROM_DEVICE);
9a799d71
AK
1223 rx_buffer_info->page_dma = 0;
1224 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
e8e9f696
JP
1225 rx_buffer_info->page,
1226 rx_buffer_info->page_offset,
1227 upper_len);
762f4c57
JB
1228
1229 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1230 (page_count(rx_buffer_info->page) != 1))
1231 rx_buffer_info->page = NULL;
1232 else
1233 get_page(rx_buffer_info->page);
9a799d71
AK
1234
1235 skb->len += upper_len;
1236 skb->data_len += upper_len;
1237 skb->truesize += upper_len;
1238 }
1239
1240 i++;
1241 if (i == rx_ring->count)
1242 i = 0;
9a799d71 1243
31f05a2d 1244 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1245 prefetch(next_rxd);
9a799d71 1246 cleaned_count++;
f8212f97 1247
0c19d6af 1248 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
f8212f97
AD
1249 rsc_count = ixgbe_get_rsc_count(rx_desc);
1250
1251 if (rsc_count) {
1252 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1253 IXGBE_RXDADV_NEXTP_SHIFT;
1254 next_buffer = &rx_ring->rx_buffer_info[nextp];
f8212f97
AD
1255 } else {
1256 next_buffer = &rx_ring->rx_buffer_info[i];
1257 }
1258
9a799d71 1259 if (staterr & IXGBE_RXD_STAT_EOP) {
f8212f97 1260 if (skb->prev)
e8e9f696 1261 skb = ixgbe_transform_rsc_queue(skb,
5b7da515 1262 &(rx_ring->rx_stats.rsc_count));
94b982b2 1263 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
e8171aaa 1264 if (IXGBE_RSC_CB(skb)->delay_unmap) {
b6ec895e 1265 dma_unmap_single(rx_ring->dev,
1b507730 1266 IXGBE_RSC_CB(skb)->dma,
e8e9f696 1267 rx_ring->rx_buf_len,
1b507730 1268 DMA_FROM_DEVICE);
fd3686a8 1269 IXGBE_RSC_CB(skb)->dma = 0;
e8171aaa 1270 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 1271 }
94b982b2 1272 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
5b7da515
AD
1273 rx_ring->rx_stats.rsc_count +=
1274 skb_shinfo(skb)->nr_frags;
94b982b2 1275 else
5b7da515
AD
1276 rx_ring->rx_stats.rsc_count++;
1277 rx_ring->rx_stats.rsc_flush++;
94b982b2 1278 }
de1036b1 1279 u64_stats_update_begin(&rx_ring->syncp);
9a799d71
AK
1280 rx_ring->stats.packets++;
1281 rx_ring->stats.bytes += skb->len;
de1036b1 1282 u64_stats_update_end(&rx_ring->syncp);
9a799d71 1283 } else {
6e455b89 1284 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
f8212f97
AD
1285 rx_buffer_info->skb = next_buffer->skb;
1286 rx_buffer_info->dma = next_buffer->dma;
1287 next_buffer->skb = skb;
1288 next_buffer->dma = 0;
1289 } else {
1290 skb->next = next_buffer->skb;
1291 skb->next->prev = skb;
1292 }
5b7da515 1293 rx_ring->rx_stats.non_eop_descs++;
9a799d71
AK
1294 goto next_desc;
1295 }
1296
1297 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1298 dev_kfree_skb_irq(skb);
1299 goto next_desc;
1300 }
1301
8bae1b2b 1302 ixgbe_rx_checksum(adapter, rx_desc, skb);
d2f4fbe2
AV
1303
1304 /* probably a little skewed due to removing CRC */
1305 total_rx_bytes += skb->len;
1306 total_rx_packets++;
1307
fc77dc3c 1308 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
332d4a7d
YZ
1309#ifdef IXGBE_FCOE
1310 /* if ddp, not passing to ULD unless for FCP_RSP or error */
3d8fd385
YZ
1311 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1312 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1313 if (!ddp_bytes)
332d4a7d 1314 goto next_desc;
3d8fd385 1315 }
332d4a7d 1316#endif /* IXGBE_FCOE */
fdaff1ce 1317 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
9a799d71
AK
1318
1319next_desc:
1320 rx_desc->wb.upper.status_error = 0;
1321
1322 /* return some buffers to hardware, one at a time is too slow */
1323 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
fc77dc3c 1324 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71
AK
1325 cleaned_count = 0;
1326 }
1327
1328 /* use prefetched values */
1329 rx_desc = next_rxd;
f8212f97 1330 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
1331
1332 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
177db6ff
MC
1333 }
1334
9a799d71
AK
1335 rx_ring->next_to_clean = i;
1336 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1337
1338 if (cleaned_count)
fc77dc3c 1339 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71 1340
3d8fd385
YZ
1341#ifdef IXGBE_FCOE
1342 /* include DDPed FCoE data */
1343 if (ddp_bytes > 0) {
1344 unsigned int mss;
1345
fc77dc3c 1346 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
3d8fd385
YZ
1347 sizeof(struct fc_frame_header) -
1348 sizeof(struct fcoe_crc_eof);
1349 if (mss > 512)
1350 mss &= ~511;
1351 total_rx_bytes += ddp_bytes;
1352 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1353 }
1354#endif /* IXGBE_FCOE */
1355
f494e8fa
AV
1356 rx_ring->total_packets += total_rx_packets;
1357 rx_ring->total_bytes += total_rx_bytes;
f494e8fa 1358
9a799d71
AK
1359 return cleaned;
1360}
1361
021230d4 1362static int ixgbe_clean_rxonly(struct napi_struct *, int);
9a799d71
AK
1363/**
1364 * ixgbe_configure_msix - Configure MSI-X hardware
1365 * @adapter: board private structure
1366 *
1367 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1368 * interrupts.
1369 **/
1370static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1371{
021230d4
AV
1372 struct ixgbe_q_vector *q_vector;
1373 int i, j, q_vectors, v_idx, r_idx;
1374 u32 mask;
9a799d71 1375
021230d4 1376 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71 1377
4df10466
JB
1378 /*
1379 * Populate the IVAR table and set the ITR values to the
021230d4
AV
1380 * corresponding register.
1381 */
1382 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
7a921c93 1383 q_vector = adapter->q_vector[v_idx];
984b3f57 1384 /* XXX for_each_set_bit(...) */
021230d4 1385 r_idx = find_first_bit(q_vector->rxr_idx,
e8e9f696 1386 adapter->num_rx_queues);
021230d4
AV
1387
1388 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1389 j = adapter->rx_ring[r_idx]->reg_idx;
e8e26350 1390 ixgbe_set_ivar(adapter, 0, j, v_idx);
021230d4 1391 r_idx = find_next_bit(q_vector->rxr_idx,
e8e9f696
JP
1392 adapter->num_rx_queues,
1393 r_idx + 1);
021230d4
AV
1394 }
1395 r_idx = find_first_bit(q_vector->txr_idx,
e8e9f696 1396 adapter->num_tx_queues);
021230d4
AV
1397
1398 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1399 j = adapter->tx_ring[r_idx]->reg_idx;
e8e26350 1400 ixgbe_set_ivar(adapter, 1, j, v_idx);
021230d4 1401 r_idx = find_next_bit(q_vector->txr_idx,
e8e9f696
JP
1402 adapter->num_tx_queues,
1403 r_idx + 1);
021230d4
AV
1404 }
1405
021230d4 1406 if (q_vector->txr_count && !q_vector->rxr_count)
f7554a2b
NS
1407 /* tx only */
1408 q_vector->eitr = adapter->tx_eitr_param;
509ee935 1409 else if (q_vector->rxr_count)
f7554a2b
NS
1410 /* rx or mixed */
1411 q_vector->eitr = adapter->rx_eitr_param;
021230d4 1412
fe49f04a 1413 ixgbe_write_eitr(q_vector);
b25ebfd2
PW
1414 /* If Flow Director is enabled, set interrupt affinity */
1415 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1416 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1417 /*
1418 * Allocate the affinity_hint cpumask, assign the mask
1419 * for this vector, and set our affinity_hint for
1420 * this irq.
1421 */
1422 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1423 GFP_KERNEL))
1424 return;
1425 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1426 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1427 q_vector->affinity_mask);
1428 }
9a799d71
AK
1429 }
1430
e8e26350
PW
1431 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1432 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
e8e9f696 1433 v_idx);
e8e26350
PW
1434 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1435 ixgbe_set_ivar(adapter, -1, 1, v_idx);
021230d4
AV
1436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1437
41fb9248 1438 /* set up to autoclear timer, and the vectors */
021230d4 1439 mask = IXGBE_EIMS_ENABLE_MASK;
1cdd1ec8
GR
1440 if (adapter->num_vfs)
1441 mask &= ~(IXGBE_EIMS_OTHER |
1442 IXGBE_EIMS_MAILBOX |
1443 IXGBE_EIMS_LSC);
1444 else
1445 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
021230d4 1446 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
9a799d71
AK
1447}
1448
f494e8fa
AV
1449enum latency_range {
1450 lowest_latency = 0,
1451 low_latency = 1,
1452 bulk_latency = 2,
1453 latency_invalid = 255
1454};
1455
1456/**
1457 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1458 * @adapter: pointer to adapter
1459 * @eitr: eitr setting (ints per sec) to give last timeslice
1460 * @itr_setting: current throttle rate in ints/second
1461 * @packets: the number of packets during this measurement interval
1462 * @bytes: the number of bytes during this measurement interval
1463 *
1464 * Stores a new ITR value based on packets and byte
1465 * counts during the last interrupt. The advantage of per interrupt
1466 * computation is faster updates and more accurate ITR for the current
1467 * traffic pattern. Constants in this function were computed
1468 * based on theoretical maximum wire speed and thresholds were set based
1469 * on testing data as well as attempting to minimize response time
1470 * while increasing bulk throughput.
1471 * this functionality is controlled by the InterruptThrottleRate module
1472 * parameter (see ixgbe_param.c)
1473 **/
1474static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
e8e9f696
JP
1475 u32 eitr, u8 itr_setting,
1476 int packets, int bytes)
f494e8fa
AV
1477{
1478 unsigned int retval = itr_setting;
1479 u32 timepassed_us;
1480 u64 bytes_perint;
1481
1482 if (packets == 0)
1483 goto update_itr_done;
1484
1485
1486 /* simple throttlerate management
1487 * 0-20MB/s lowest (100000 ints/s)
1488 * 20-100MB/s low (20000 ints/s)
1489 * 100-1249MB/s bulk (8000 ints/s)
1490 */
1491 /* what was last interrupt timeslice? */
1492 timepassed_us = 1000000/eitr;
1493 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1494
1495 switch (itr_setting) {
1496 case lowest_latency:
1497 if (bytes_perint > adapter->eitr_low)
1498 retval = low_latency;
1499 break;
1500 case low_latency:
1501 if (bytes_perint > adapter->eitr_high)
1502 retval = bulk_latency;
1503 else if (bytes_perint <= adapter->eitr_low)
1504 retval = lowest_latency;
1505 break;
1506 case bulk_latency:
1507 if (bytes_perint <= adapter->eitr_high)
1508 retval = low_latency;
1509 break;
1510 }
1511
1512update_itr_done:
1513 return retval;
1514}
1515
509ee935
JB
1516/**
1517 * ixgbe_write_eitr - write EITR register in hardware specific way
fe49f04a 1518 * @q_vector: structure containing interrupt and ring information
509ee935
JB
1519 *
1520 * This function is made to be called by ethtool and by the driver
1521 * when it needs to update EITR registers at runtime. Hardware
1522 * specific quirks/differences are taken care of here.
1523 */
fe49f04a 1524void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
509ee935 1525{
fe49f04a 1526 struct ixgbe_adapter *adapter = q_vector->adapter;
509ee935 1527 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
1528 int v_idx = q_vector->v_idx;
1529 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1530
509ee935
JB
1531 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1532 /* must write high and low 16 bits to reset counter */
1533 itr_reg |= (itr_reg << 16);
1534 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
f8d1dcaf
JB
1535 /*
1536 * 82599 can support a value of zero, so allow it for
1537 * max interrupt rate, but there is an errata where it can
1538 * not be zero with RSC
1539 */
1540 if (itr_reg == 8 &&
1541 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1542 itr_reg = 0;
1543
509ee935
JB
1544 /*
1545 * set the WDIS bit to not clear the timer bits and cause an
1546 * immediate assertion of the interrupt
1547 */
1548 itr_reg |= IXGBE_EITR_CNT_WDIS;
1549 }
1550 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1551}
1552
f494e8fa
AV
1553static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1554{
1555 struct ixgbe_adapter *adapter = q_vector->adapter;
f494e8fa
AV
1556 u32 new_itr;
1557 u8 current_itr, ret_itr;
fe49f04a 1558 int i, r_idx;
f494e8fa
AV
1559 struct ixgbe_ring *rx_ring, *tx_ring;
1560
1561 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1562 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1563 tx_ring = adapter->tx_ring[r_idx];
f494e8fa 1564 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1565 q_vector->tx_itr,
1566 tx_ring->total_packets,
1567 tx_ring->total_bytes);
f494e8fa
AV
1568 /* if the result for this queue would decrease interrupt
1569 * rate for this vector then use that result */
30efa5a3 1570 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
e8e9f696 1571 q_vector->tx_itr - 1 : ret_itr);
f494e8fa 1572 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1573 r_idx + 1);
f494e8fa
AV
1574 }
1575
1576 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1577 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1578 rx_ring = adapter->rx_ring[r_idx];
f494e8fa 1579 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1580 q_vector->rx_itr,
1581 rx_ring->total_packets,
1582 rx_ring->total_bytes);
f494e8fa
AV
1583 /* if the result for this queue would decrease interrupt
1584 * rate for this vector then use that result */
30efa5a3 1585 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
e8e9f696 1586 q_vector->rx_itr - 1 : ret_itr);
f494e8fa 1587 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1588 r_idx + 1);
f494e8fa
AV
1589 }
1590
30efa5a3 1591 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1592
1593 switch (current_itr) {
1594 /* counts and packets in update_itr are dependent on these numbers */
1595 case lowest_latency:
1596 new_itr = 100000;
1597 break;
1598 case low_latency:
1599 new_itr = 20000; /* aka hwitr = ~200 */
1600 break;
1601 case bulk_latency:
1602 default:
1603 new_itr = 8000;
1604 break;
1605 }
1606
1607 if (new_itr != q_vector->eitr) {
fe49f04a
AD
1608 /* do an exponential smoothing */
1609 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
1610
1611 /* save the algorithm value here, not the smoothed one */
1612 q_vector->eitr = new_itr;
fe49f04a
AD
1613
1614 ixgbe_write_eitr(q_vector);
f494e8fa 1615 }
f494e8fa
AV
1616}
1617
119fc60a
MC
1618/**
1619 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1620 * @work: pointer to work_struct containing our data
1621 **/
1622static void ixgbe_check_overtemp_task(struct work_struct *work)
1623{
1624 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
1625 struct ixgbe_adapter,
1626 check_overtemp_task);
119fc60a
MC
1627 struct ixgbe_hw *hw = &adapter->hw;
1628 u32 eicr = adapter->interrupt_event;
1629
7ca647bd
JP
1630 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1631 return;
1632
1633 switch (hw->device_id) {
1634 case IXGBE_DEV_ID_82599_T3_LOM: {
1635 u32 autoneg;
1636 bool link_up = false;
1637
1638 if (hw->mac.ops.check_link)
1639 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1640
1641 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1642 (eicr & IXGBE_EICR_LSC))
1643 /* Check if this is due to overtemp */
1644 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1645 break;
1646 return;
1647 }
1648 default:
1649 if (!(eicr & IXGBE_EICR_GPI_SDP0))
119fc60a 1650 return;
7ca647bd 1651 break;
119fc60a 1652 }
7ca647bd
JP
1653 e_crit(drv,
1654 "Network adapter has been stopped because it has over heated. "
1655 "Restart the computer. If the problem persists, "
1656 "power off the system and replace the adapter\n");
1657 /* write to clear the interrupt */
1658 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
119fc60a
MC
1659}
1660
0befdb3e
JB
1661static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1662{
1663 struct ixgbe_hw *hw = &adapter->hw;
1664
1665 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1666 (eicr & IXGBE_EICR_GPI_SDP1)) {
396e799c 1667 e_crit(probe, "Fan has stopped, replace the adapter\n");
0befdb3e
JB
1668 /* write to clear the interrupt */
1669 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1670 }
1671}
cf8280ee 1672
e8e26350
PW
1673static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1674{
1675 struct ixgbe_hw *hw = &adapter->hw;
1676
1677 if (eicr & IXGBE_EICR_GPI_SDP1) {
1678 /* Clear the interrupt */
1679 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1680 schedule_work(&adapter->multispeed_fiber_task);
1681 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1682 /* Clear the interrupt */
1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1684 schedule_work(&adapter->sfp_config_module_task);
1685 } else {
1686 /* Interrupt isn't for us... */
1687 return;
1688 }
1689}
1690
cf8280ee
JB
1691static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1692{
1693 struct ixgbe_hw *hw = &adapter->hw;
1694
1695 adapter->lsc_int++;
1696 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1697 adapter->link_check_timeout = jiffies;
1698 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1699 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
8a0717f3 1700 IXGBE_WRITE_FLUSH(hw);
cf8280ee
JB
1701 schedule_work(&adapter->watchdog_task);
1702 }
1703}
1704
9a799d71
AK
1705static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1706{
1707 struct net_device *netdev = data;
1708 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1709 struct ixgbe_hw *hw = &adapter->hw;
54037505
DS
1710 u32 eicr;
1711
1712 /*
1713 * Workaround for Silicon errata. Use clear-by-write instead
1714 * of clear-by-read. Reading with EICS will return the
1715 * interrupt causes without clearing, which later be done
1716 * with the write to EICR.
1717 */
1718 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1719 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
9a799d71 1720
cf8280ee
JB
1721 if (eicr & IXGBE_EICR_LSC)
1722 ixgbe_check_lsc(adapter);
d4f80882 1723
1cdd1ec8
GR
1724 if (eicr & IXGBE_EICR_MAILBOX)
1725 ixgbe_msg_task(adapter);
1726
e8e26350
PW
1727 if (hw->mac.type == ixgbe_mac_82598EB)
1728 ixgbe_check_fan_failure(adapter, eicr);
0befdb3e 1729
c4cf55e5 1730 if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 1731 ixgbe_check_sfp_event(adapter, eicr);
119fc60a
MC
1732 adapter->interrupt_event = eicr;
1733 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1734 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1735 schedule_work(&adapter->check_overtemp_task);
c4cf55e5
PWJ
1736
1737 /* Handle Flow Director Full threshold interrupt */
1738 if (eicr & IXGBE_EICR_FLOW_DIR) {
1739 int i;
1740 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1741 /* Disable transmits before FDIR Re-initialization */
1742 netif_tx_stop_all_queues(netdev);
1743 for (i = 0; i < adapter->num_tx_queues; i++) {
1744 struct ixgbe_ring *tx_ring =
e8e9f696 1745 adapter->tx_ring[i];
c4cf55e5 1746 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
e8e9f696 1747 &tx_ring->reinit_state))
c4cf55e5
PWJ
1748 schedule_work(&adapter->fdir_reinit_task);
1749 }
1750 }
1751 }
d4f80882
AV
1752 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1753 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
9a799d71
AK
1754
1755 return IRQ_HANDLED;
1756}
1757
fe49f04a
AD
1758static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1759 u64 qmask)
1760{
1761 u32 mask;
1762
1763 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1764 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1765 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1766 } else {
1767 mask = (qmask & 0xFFFFFFFF);
1768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1769 mask = (qmask >> 32);
1770 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1771 }
1772 /* skip the flush */
1773}
1774
1775static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
e8e9f696 1776 u64 qmask)
fe49f04a
AD
1777{
1778 u32 mask;
1779
1780 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1781 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1783 } else {
1784 mask = (qmask & 0xFFFFFFFF);
1785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1786 mask = (qmask >> 32);
1787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1788 }
1789 /* skip the flush */
1790}
1791
9a799d71
AK
1792static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1793{
021230d4
AV
1794 struct ixgbe_q_vector *q_vector = data;
1795 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1796 struct ixgbe_ring *tx_ring;
021230d4
AV
1797 int i, r_idx;
1798
1799 if (!q_vector->txr_count)
1800 return IRQ_HANDLED;
1801
1802 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1803 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1804 tx_ring = adapter->tx_ring[r_idx];
3a581073
JB
1805 tx_ring->total_bytes = 0;
1806 tx_ring->total_packets = 0;
021230d4 1807 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1808 r_idx + 1);
021230d4 1809 }
9a799d71 1810
9b471446 1811 /* EIAM disabled interrupts (on this vector) for us */
91281fd3
AD
1812 napi_schedule(&q_vector->napi);
1813
9a799d71
AK
1814 return IRQ_HANDLED;
1815}
1816
021230d4
AV
1817/**
1818 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1819 * @irq: unused
1820 * @data: pointer to our q_vector struct for this interrupt vector
1821 **/
9a799d71
AK
1822static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1823{
021230d4
AV
1824 struct ixgbe_q_vector *q_vector = data;
1825 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1826 struct ixgbe_ring *rx_ring;
021230d4 1827 int r_idx;
30efa5a3 1828 int i;
021230d4
AV
1829
1830 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
30efa5a3 1831 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1832 rx_ring = adapter->rx_ring[r_idx];
30efa5a3
JB
1833 rx_ring->total_bytes = 0;
1834 rx_ring->total_packets = 0;
1835 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1836 r_idx + 1);
30efa5a3
JB
1837 }
1838
021230d4
AV
1839 if (!q_vector->rxr_count)
1840 return IRQ_HANDLED;
1841
021230d4 1842 /* disable interrupts on this vector only */
9b471446 1843 /* EIAM disabled interrupts (on this vector) for us */
288379f0 1844 napi_schedule(&q_vector->napi);
021230d4
AV
1845
1846 return IRQ_HANDLED;
1847}
1848
1849static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1850{
91281fd3
AD
1851 struct ixgbe_q_vector *q_vector = data;
1852 struct ixgbe_adapter *adapter = q_vector->adapter;
1853 struct ixgbe_ring *ring;
1854 int r_idx;
1855 int i;
1856
1857 if (!q_vector->txr_count && !q_vector->rxr_count)
1858 return IRQ_HANDLED;
1859
1860 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1861 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1862 ring = adapter->tx_ring[r_idx];
91281fd3
AD
1863 ring->total_bytes = 0;
1864 ring->total_packets = 0;
1865 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1866 r_idx + 1);
91281fd3
AD
1867 }
1868
1869 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1870 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1871 ring = adapter->rx_ring[r_idx];
91281fd3
AD
1872 ring->total_bytes = 0;
1873 ring->total_packets = 0;
1874 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1875 r_idx + 1);
91281fd3
AD
1876 }
1877
9b471446 1878 /* EIAM disabled interrupts (on this vector) for us */
91281fd3 1879 napi_schedule(&q_vector->napi);
9a799d71 1880
9a799d71
AK
1881 return IRQ_HANDLED;
1882}
1883
021230d4
AV
1884/**
1885 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1886 * @napi: napi struct with our devices info in it
1887 * @budget: amount of work driver is allowed to do this pass, in packets
1888 *
f0848276
JB
1889 * This function is optimized for cleaning one queue only on a single
1890 * q_vector!!!
021230d4 1891 **/
9a799d71
AK
1892static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1893{
021230d4 1894 struct ixgbe_q_vector *q_vector =
e8e9f696 1895 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 1896 struct ixgbe_adapter *adapter = q_vector->adapter;
f0848276 1897 struct ixgbe_ring *rx_ring = NULL;
9a799d71 1898 int work_done = 0;
021230d4 1899 long r_idx;
9a799d71 1900
021230d4 1901 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 1902 rx_ring = adapter->rx_ring[r_idx];
5dd2d332 1903#ifdef CONFIG_IXGBE_DCA
bd0362dd 1904 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3a581073 1905 ixgbe_update_rx_dca(adapter, rx_ring);
bd0362dd 1906#endif
9a799d71 1907
78b6f4ce 1908 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
9a799d71 1909
021230d4
AV
1910 /* If all Rx work done, exit the polling mode */
1911 if (work_done < budget) {
288379f0 1912 napi_complete(napi);
f7554a2b 1913 if (adapter->rx_itr_setting & 1)
f494e8fa 1914 ixgbe_set_itr_msix(q_vector);
9a799d71 1915 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 1916 ixgbe_irq_enable_queues(adapter,
e8e9f696 1917 ((u64)1 << q_vector->v_idx));
9a799d71
AK
1918 }
1919
1920 return work_done;
1921}
1922
f0848276 1923/**
91281fd3 1924 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
f0848276
JB
1925 * @napi: napi struct with our devices info in it
1926 * @budget: amount of work driver is allowed to do this pass, in packets
1927 *
1928 * This function will clean more than one rx queue associated with a
1929 * q_vector.
1930 **/
91281fd3 1931static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
f0848276
JB
1932{
1933 struct ixgbe_q_vector *q_vector =
e8e9f696 1934 container_of(napi, struct ixgbe_q_vector, napi);
f0848276 1935 struct ixgbe_adapter *adapter = q_vector->adapter;
91281fd3 1936 struct ixgbe_ring *ring = NULL;
f0848276
JB
1937 int work_done = 0, i;
1938 long r_idx;
91281fd3
AD
1939 bool tx_clean_complete = true;
1940
1941 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1942 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1943 ring = adapter->tx_ring[r_idx];
91281fd3
AD
1944#ifdef CONFIG_IXGBE_DCA
1945 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1946 ixgbe_update_tx_dca(adapter, ring);
1947#endif
1948 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1949 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1950 r_idx + 1);
91281fd3 1951 }
f0848276
JB
1952
1953 /* attempt to distribute budget to each queue fairly, but don't allow
1954 * the budget to go below 1 because we'll exit polling */
1955 budget /= (q_vector->rxr_count ?: 1);
1956 budget = max(budget, 1);
1957 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1958 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1959 ring = adapter->rx_ring[r_idx];
5dd2d332 1960#ifdef CONFIG_IXGBE_DCA
f0848276 1961 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
91281fd3 1962 ixgbe_update_rx_dca(adapter, ring);
f0848276 1963#endif
91281fd3 1964 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
f0848276 1965 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1966 r_idx + 1);
f0848276
JB
1967 }
1968
1969 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 1970 ring = adapter->rx_ring[r_idx];
f0848276 1971 /* If all Rx work done, exit the polling mode */
7f821875 1972 if (work_done < budget) {
288379f0 1973 napi_complete(napi);
f7554a2b 1974 if (adapter->rx_itr_setting & 1)
f0848276
JB
1975 ixgbe_set_itr_msix(q_vector);
1976 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 1977 ixgbe_irq_enable_queues(adapter,
e8e9f696 1978 ((u64)1 << q_vector->v_idx));
f0848276
JB
1979 return 0;
1980 }
1981
1982 return work_done;
1983}
91281fd3
AD
1984
1985/**
1986 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1987 * @napi: napi struct with our devices info in it
1988 * @budget: amount of work driver is allowed to do this pass, in packets
1989 *
1990 * This function is optimized for cleaning one queue only on a single
1991 * q_vector!!!
1992 **/
1993static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1994{
1995 struct ixgbe_q_vector *q_vector =
e8e9f696 1996 container_of(napi, struct ixgbe_q_vector, napi);
91281fd3
AD
1997 struct ixgbe_adapter *adapter = q_vector->adapter;
1998 struct ixgbe_ring *tx_ring = NULL;
1999 int work_done = 0;
2000 long r_idx;
2001
2002 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
4a0b9ca0 2003 tx_ring = adapter->tx_ring[r_idx];
91281fd3
AD
2004#ifdef CONFIG_IXGBE_DCA
2005 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2006 ixgbe_update_tx_dca(adapter, tx_ring);
2007#endif
2008
2009 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2010 work_done = budget;
2011
f7554a2b 2012 /* If all Tx work done, exit the polling mode */
91281fd3
AD
2013 if (work_done < budget) {
2014 napi_complete(napi);
f7554a2b 2015 if (adapter->tx_itr_setting & 1)
91281fd3
AD
2016 ixgbe_set_itr_msix(q_vector);
2017 if (!test_bit(__IXGBE_DOWN, &adapter->state))
e8e9f696
JP
2018 ixgbe_irq_enable_queues(adapter,
2019 ((u64)1 << q_vector->v_idx));
91281fd3
AD
2020 }
2021
2022 return work_done;
2023}
2024
021230d4 2025static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2026 int r_idx)
021230d4 2027{
7a921c93
AD
2028 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2029
2030 set_bit(r_idx, q_vector->rxr_idx);
2031 q_vector->rxr_count++;
021230d4
AV
2032}
2033
2034static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2035 int t_idx)
021230d4 2036{
7a921c93
AD
2037 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2038
2039 set_bit(t_idx, q_vector->txr_idx);
2040 q_vector->txr_count++;
021230d4
AV
2041}
2042
9a799d71 2043/**
021230d4
AV
2044 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2045 * @adapter: board private structure to initialize
2046 * @vectors: allotted vector count for descriptor rings
9a799d71 2047 *
021230d4
AV
2048 * This function maps descriptor rings to the queue-specific vectors
2049 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2050 * one vector per ring/queue, but on a constrained vector budget, we
2051 * group the rings as "efficiently" as possible. You would add new
2052 * mapping configurations in here.
9a799d71 2053 **/
021230d4 2054static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
e8e9f696 2055 int vectors)
021230d4
AV
2056{
2057 int v_start = 0;
2058 int rxr_idx = 0, txr_idx = 0;
2059 int rxr_remaining = adapter->num_rx_queues;
2060 int txr_remaining = adapter->num_tx_queues;
2061 int i, j;
2062 int rqpv, tqpv;
2063 int err = 0;
2064
2065 /* No mapping required if MSI-X is disabled. */
2066 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2067 goto out;
9a799d71 2068
021230d4
AV
2069 /*
2070 * The ideal configuration...
2071 * We have enough vectors to map one per queue.
2072 */
2073 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2074 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2075 map_vector_to_rxq(adapter, v_start, rxr_idx);
9a799d71 2076
021230d4
AV
2077 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2078 map_vector_to_txq(adapter, v_start, txr_idx);
9a799d71 2079
9a799d71 2080 goto out;
021230d4 2081 }
9a799d71 2082
021230d4
AV
2083 /*
2084 * If we don't have enough vectors for a 1-to-1
2085 * mapping, we'll have to group them so there are
2086 * multiple queues per vector.
2087 */
2088 /* Re-adjusting *qpv takes care of the remainder. */
2089 for (i = v_start; i < vectors; i++) {
2090 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
2091 for (j = 0; j < rqpv; j++) {
2092 map_vector_to_rxq(adapter, i, rxr_idx);
2093 rxr_idx++;
2094 rxr_remaining--;
2095 }
2096 }
2097 for (i = v_start; i < vectors; i++) {
2098 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2099 for (j = 0; j < tqpv; j++) {
2100 map_vector_to_txq(adapter, i, txr_idx);
2101 txr_idx++;
2102 txr_remaining--;
9a799d71 2103 }
9a799d71
AK
2104 }
2105
021230d4
AV
2106out:
2107 return err;
2108}
2109
2110/**
2111 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2112 * @adapter: board private structure
2113 *
2114 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2115 * interrupts from the kernel.
2116 **/
2117static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2118{
2119 struct net_device *netdev = adapter->netdev;
2120 irqreturn_t (*handler)(int, void *);
2121 int i, vector, q_vectors, err;
e8e9f696 2122 int ri = 0, ti = 0;
021230d4
AV
2123
2124 /* Decrement for Other and TCP Timer vectors */
2125 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2126
2127 /* Map the Tx/Rx rings to the vectors we were allotted. */
2128 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2129 if (err)
2130 goto out;
2131
2132#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
e8e9f696
JP
2133 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2134 &ixgbe_msix_clean_many)
021230d4 2135 for (vector = 0; vector < q_vectors; vector++) {
7a921c93 2136 handler = SET_HANDLER(adapter->q_vector[vector]);
cb13fc20 2137
e8e9f696 2138 if (handler == &ixgbe_msix_clean_rx) {
cb13fc20
RO
2139 sprintf(adapter->name[vector], "%s-%s-%d",
2140 netdev->name, "rx", ri++);
e8e9f696 2141 } else if (handler == &ixgbe_msix_clean_tx) {
cb13fc20
RO
2142 sprintf(adapter->name[vector], "%s-%s-%d",
2143 netdev->name, "tx", ti++);
e8e9f696 2144 } else
cb13fc20
RO
2145 sprintf(adapter->name[vector], "%s-%s-%d",
2146 netdev->name, "TxRx", vector);
2147
021230d4 2148 err = request_irq(adapter->msix_entries[vector].vector,
e8e9f696
JP
2149 handler, 0, adapter->name[vector],
2150 adapter->q_vector[vector]);
9a799d71 2151 if (err) {
396e799c 2152 e_err(probe, "request_irq failed for MSIX interrupt "
849c4542 2153 "Error: %d\n", err);
021230d4 2154 goto free_queue_irqs;
9a799d71 2155 }
9a799d71
AK
2156 }
2157
021230d4
AV
2158 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2159 err = request_irq(adapter->msix_entries[vector].vector,
e8e9f696 2160 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
9a799d71 2161 if (err) {
396e799c 2162 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
021230d4 2163 goto free_queue_irqs;
9a799d71
AK
2164 }
2165
9a799d71
AK
2166 return 0;
2167
021230d4
AV
2168free_queue_irqs:
2169 for (i = vector - 1; i >= 0; i--)
2170 free_irq(adapter->msix_entries[--vector].vector,
e8e9f696 2171 adapter->q_vector[i]);
021230d4
AV
2172 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2173 pci_disable_msix(adapter->pdev);
9a799d71
AK
2174 kfree(adapter->msix_entries);
2175 adapter->msix_entries = NULL;
021230d4 2176out:
9a799d71
AK
2177 return err;
2178}
2179
f494e8fa
AV
2180static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2181{
7a921c93 2182 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
f494e8fa
AV
2183 u8 current_itr;
2184 u32 new_itr = q_vector->eitr;
4a0b9ca0
PW
2185 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2186 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
f494e8fa 2187
30efa5a3 2188 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2189 q_vector->tx_itr,
2190 tx_ring->total_packets,
2191 tx_ring->total_bytes);
30efa5a3 2192 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2193 q_vector->rx_itr,
2194 rx_ring->total_packets,
2195 rx_ring->total_bytes);
f494e8fa 2196
30efa5a3 2197 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
2198
2199 switch (current_itr) {
2200 /* counts and packets in update_itr are dependent on these numbers */
2201 case lowest_latency:
2202 new_itr = 100000;
2203 break;
2204 case low_latency:
2205 new_itr = 20000; /* aka hwitr = ~200 */
2206 break;
2207 case bulk_latency:
2208 new_itr = 8000;
2209 break;
2210 default:
2211 break;
2212 }
2213
2214 if (new_itr != q_vector->eitr) {
fe49f04a
AD
2215 /* do an exponential smoothing */
2216 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
2217
2218 /* save the algorithm value here, not the smoothed one */
2219 q_vector->eitr = new_itr;
fe49f04a
AD
2220
2221 ixgbe_write_eitr(q_vector);
f494e8fa 2222 }
f494e8fa
AV
2223}
2224
79aefa45
AD
2225/**
2226 * ixgbe_irq_enable - Enable default interrupt generation settings
2227 * @adapter: board private structure
2228 **/
6af3b9eb
ET
2229static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2230 bool flush)
79aefa45
AD
2231{
2232 u32 mask;
835462fc
NS
2233
2234 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
119fc60a
MC
2235 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2236 mask |= IXGBE_EIMS_GPI_SDP0;
6ab33d51
DM
2237 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2238 mask |= IXGBE_EIMS_GPI_SDP1;
e8e26350 2239 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2a41ff81 2240 mask |= IXGBE_EIMS_ECC;
e8e26350
PW
2241 mask |= IXGBE_EIMS_GPI_SDP1;
2242 mask |= IXGBE_EIMS_GPI_SDP2;
1cdd1ec8
GR
2243 if (adapter->num_vfs)
2244 mask |= IXGBE_EIMS_MAILBOX;
e8e26350 2245 }
c4cf55e5
PWJ
2246 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2247 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2248 mask |= IXGBE_EIMS_FLOW_DIR;
e8e26350 2249
79aefa45 2250 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
6af3b9eb
ET
2251 if (queues)
2252 ixgbe_irq_enable_queues(adapter, ~0);
2253 if (flush)
2254 IXGBE_WRITE_FLUSH(&adapter->hw);
1cdd1ec8
GR
2255
2256 if (adapter->num_vfs > 32) {
2257 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2258 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2259 }
79aefa45 2260}
021230d4 2261
9a799d71 2262/**
021230d4 2263 * ixgbe_intr - legacy mode Interrupt Handler
9a799d71
AK
2264 * @irq: interrupt number
2265 * @data: pointer to a network interface device structure
9a799d71
AK
2266 **/
2267static irqreturn_t ixgbe_intr(int irq, void *data)
2268{
2269 struct net_device *netdev = data;
2270 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2271 struct ixgbe_hw *hw = &adapter->hw;
7a921c93 2272 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
9a799d71
AK
2273 u32 eicr;
2274
54037505 2275 /*
6af3b9eb 2276 * Workaround for silicon errata on 82598. Mask the interrupts
54037505
DS
2277 * before the read of EICR.
2278 */
2279 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2280
021230d4
AV
2281 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2282 * therefore no explict interrupt disable is necessary */
2283 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
f47cf66e 2284 if (!eicr) {
6af3b9eb
ET
2285 /*
2286 * shared interrupt alert!
f47cf66e 2287 * make sure interrupts are enabled because the read will
6af3b9eb
ET
2288 * have disabled interrupts due to EIAM
2289 * finish the workaround of silicon errata on 82598. Unmask
2290 * the interrupt that we masked before the EICR read.
2291 */
2292 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2293 ixgbe_irq_enable(adapter, true, true);
9a799d71 2294 return IRQ_NONE; /* Not our interrupt */
f47cf66e 2295 }
9a799d71 2296
cf8280ee
JB
2297 if (eicr & IXGBE_EICR_LSC)
2298 ixgbe_check_lsc(adapter);
021230d4 2299
e8e26350
PW
2300 if (hw->mac.type == ixgbe_mac_82599EB)
2301 ixgbe_check_sfp_event(adapter, eicr);
2302
0befdb3e 2303 ixgbe_check_fan_failure(adapter, eicr);
119fc60a
MC
2304 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2305 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2306 schedule_work(&adapter->check_overtemp_task);
0befdb3e 2307
7a921c93 2308 if (napi_schedule_prep(&(q_vector->napi))) {
4a0b9ca0
PW
2309 adapter->tx_ring[0]->total_packets = 0;
2310 adapter->tx_ring[0]->total_bytes = 0;
2311 adapter->rx_ring[0]->total_packets = 0;
2312 adapter->rx_ring[0]->total_bytes = 0;
021230d4 2313 /* would disable interrupts here but EIAM disabled it */
7a921c93 2314 __napi_schedule(&(q_vector->napi));
9a799d71
AK
2315 }
2316
6af3b9eb
ET
2317 /*
2318 * re-enable link(maybe) and non-queue interrupts, no flush.
2319 * ixgbe_poll will re-enable the queue interrupts
2320 */
2321
2322 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2323 ixgbe_irq_enable(adapter, false, false);
2324
9a799d71
AK
2325 return IRQ_HANDLED;
2326}
2327
021230d4
AV
2328static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2329{
2330 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2331
2332 for (i = 0; i < q_vectors; i++) {
7a921c93 2333 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
021230d4
AV
2334 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2335 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2336 q_vector->rxr_count = 0;
2337 q_vector->txr_count = 0;
2338 }
2339}
2340
9a799d71
AK
2341/**
2342 * ixgbe_request_irq - initialize interrupts
2343 * @adapter: board private structure
2344 *
2345 * Attempts to configure interrupts using the best available
2346 * capabilities of the hardware and kernel.
2347 **/
021230d4 2348static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
9a799d71
AK
2349{
2350 struct net_device *netdev = adapter->netdev;
021230d4 2351 int err;
9a799d71 2352
021230d4
AV
2353 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2354 err = ixgbe_request_msix_irqs(adapter);
2355 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
a0607fd3 2356 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
e8e9f696 2357 netdev->name, netdev);
021230d4 2358 } else {
a0607fd3 2359 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
e8e9f696 2360 netdev->name, netdev);
9a799d71
AK
2361 }
2362
9a799d71 2363 if (err)
396e799c 2364 e_err(probe, "request_irq failed, Error %d\n", err);
9a799d71 2365
9a799d71
AK
2366 return err;
2367}
2368
2369static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2370{
2371 struct net_device *netdev = adapter->netdev;
2372
2373 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
021230d4 2374 int i, q_vectors;
9a799d71 2375
021230d4
AV
2376 q_vectors = adapter->num_msix_vectors;
2377
2378 i = q_vectors - 1;
9a799d71 2379 free_irq(adapter->msix_entries[i].vector, netdev);
9a799d71 2380
021230d4
AV
2381 i--;
2382 for (; i >= 0; i--) {
2383 free_irq(adapter->msix_entries[i].vector,
e8e9f696 2384 adapter->q_vector[i]);
021230d4
AV
2385 }
2386
2387 ixgbe_reset_q_vectors(adapter);
2388 } else {
2389 free_irq(adapter->pdev->irq, netdev);
9a799d71
AK
2390 }
2391}
2392
22d5a71b
JB
2393/**
2394 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2395 * @adapter: board private structure
2396 **/
2397static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2398{
835462fc
NS
2399 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2400 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2401 } else {
2402 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
22d5a71b 2404 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1cdd1ec8
GR
2405 if (adapter->num_vfs > 32)
2406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
22d5a71b
JB
2407 }
2408 IXGBE_WRITE_FLUSH(&adapter->hw);
2409 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2410 int i;
2411 for (i = 0; i < adapter->num_msix_vectors; i++)
2412 synchronize_irq(adapter->msix_entries[i].vector);
2413 } else {
2414 synchronize_irq(adapter->pdev->irq);
2415 }
2416}
2417
9a799d71
AK
2418/**
2419 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2420 *
2421 **/
2422static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2423{
9a799d71
AK
2424 struct ixgbe_hw *hw = &adapter->hw;
2425
021230d4 2426 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
e8e9f696 2427 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
9a799d71 2428
e8e26350
PW
2429 ixgbe_set_ivar(adapter, 0, 0, 0);
2430 ixgbe_set_ivar(adapter, 1, 0, 0);
021230d4
AV
2431
2432 map_vector_to_rxq(adapter, 0, 0);
2433 map_vector_to_txq(adapter, 0, 0);
2434
396e799c 2435 e_info(hw, "Legacy interrupt IVAR setup done\n");
9a799d71
AK
2436}
2437
43e69bf0
AD
2438/**
2439 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2440 * @adapter: board private structure
2441 * @ring: structure containing ring specific data
2442 *
2443 * Configure the Tx descriptor ring after a reset.
2444 **/
84418e3b
AD
2445void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2446 struct ixgbe_ring *ring)
43e69bf0
AD
2447{
2448 struct ixgbe_hw *hw = &adapter->hw;
2449 u64 tdba = ring->dma;
2f1860b8
AD
2450 int wait_loop = 10;
2451 u32 txdctl;
43e69bf0
AD
2452 u16 reg_idx = ring->reg_idx;
2453
2f1860b8
AD
2454 /* disable queue to avoid issues while updating state */
2455 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2456 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2457 txdctl & ~IXGBE_TXDCTL_ENABLE);
2458 IXGBE_WRITE_FLUSH(hw);
2459
43e69bf0 2460 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
e8e9f696 2461 (tdba & DMA_BIT_MASK(32)));
43e69bf0
AD
2462 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2463 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2464 ring->count * sizeof(union ixgbe_adv_tx_desc));
2465 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2466 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
84ea2591 2467 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
43e69bf0 2468
2f1860b8
AD
2469 /* configure fetching thresholds */
2470 if (adapter->rx_itr_setting == 0) {
2471 /* cannot set wthresh when itr==0 */
2472 txdctl &= ~0x007F0000;
2473 } else {
2474 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2475 txdctl |= (8 << 16);
2476 }
2477 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2478 /* PThresh workaround for Tx hang with DFP enabled. */
2479 txdctl |= 32;
2480 }
2481
2482 /* reinitialize flowdirector state */
2483 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2484
2485 /* enable queue */
2486 txdctl |= IXGBE_TXDCTL_ENABLE;
2487 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2488
2489 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2490 if (hw->mac.type == ixgbe_mac_82598EB &&
2491 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2492 return;
2493
2494 /* poll to verify queue is enabled */
2495 do {
2496 msleep(1);
2497 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2498 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2499 if (!wait_loop)
2500 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
43e69bf0
AD
2501}
2502
120ff942
AD
2503static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2504{
2505 struct ixgbe_hw *hw = &adapter->hw;
2506 u32 rttdcs;
2507 u32 mask;
2508
2509 if (hw->mac.type == ixgbe_mac_82598EB)
2510 return;
2511
2512 /* disable the arbiter while setting MTQC */
2513 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2514 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2515 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2516
2517 /* set transmit pool layout */
2518 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2519 switch (adapter->flags & mask) {
2520
2521 case (IXGBE_FLAG_SRIOV_ENABLED):
2522 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2523 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2524 break;
2525
2526 case (IXGBE_FLAG_DCB_ENABLED):
2527 /* We enable 8 traffic classes, DCB only */
2528 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2529 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2530 break;
2531
2532 default:
2533 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2534 break;
2535 }
2536
2537 /* re-enable the arbiter */
2538 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2539 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2540}
2541
9a799d71 2542/**
3a581073 2543 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
9a799d71
AK
2544 * @adapter: board private structure
2545 *
2546 * Configure the Tx unit of the MAC after a reset.
2547 **/
2548static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2549{
2f1860b8
AD
2550 struct ixgbe_hw *hw = &adapter->hw;
2551 u32 dmatxctl;
43e69bf0 2552 u32 i;
9a799d71 2553
2f1860b8
AD
2554 ixgbe_setup_mtqc(adapter);
2555
2556 if (hw->mac.type != ixgbe_mac_82598EB) {
2557 /* DMATXCTL.EN must be before Tx queues are enabled */
2558 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2559 dmatxctl |= IXGBE_DMATXCTL_TE;
2560 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2561 }
2562
9a799d71 2563 /* Setup the HW Tx Head and Tail descriptor pointers */
43e69bf0
AD
2564 for (i = 0; i < adapter->num_tx_queues; i++)
2565 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
9a799d71
AK
2566}
2567
e8e26350 2568#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
cc41ac7c 2569
a6616b42 2570static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
e8e9f696 2571 struct ixgbe_ring *rx_ring)
cc41ac7c 2572{
cc41ac7c 2573 u32 srrctl;
a6616b42 2574 int index;
0cefafad 2575 struct ixgbe_ring_feature *feature = adapter->ring_feature;
3be1adfb 2576
a6616b42
YZ
2577 index = rx_ring->reg_idx;
2578 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2579 unsigned long mask;
0cefafad 2580 mask = (unsigned long) feature[RING_F_RSS].mask;
3be1adfb 2581 index = index & mask;
cc41ac7c 2582 }
cc41ac7c
JB
2583 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2584
2585 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2586 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
9e10e045
AD
2587 if (adapter->num_vfs)
2588 srrctl |= IXGBE_SRRCTL_DROP_EN;
cc41ac7c 2589
afafd5b0
AD
2590 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2591 IXGBE_SRRCTL_BSIZEHDR_MASK;
2592
6e455b89 2593 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
afafd5b0
AD
2594#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2595 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2596#else
2597 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2598#endif
cc41ac7c 2599 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
cc41ac7c 2600 } else {
afafd5b0
AD
2601 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2602 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
cc41ac7c 2603 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
cc41ac7c 2604 }
e8e26350 2605
cc41ac7c
JB
2606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2607}
9a799d71 2608
05abb126 2609static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
0cefafad 2610{
05abb126
AD
2611 struct ixgbe_hw *hw = &adapter->hw;
2612 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
e8e9f696
JP
2613 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2614 0x6A3E67EA, 0x14364D17, 0x3BED200D};
05abb126
AD
2615 u32 mrqc = 0, reta = 0;
2616 u32 rxcsum;
2617 int i, j;
0cefafad
JB
2618 int mask;
2619
05abb126
AD
2620 /* Fill out hash function seeds */
2621 for (i = 0; i < 10; i++)
2622 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2623
2624 /* Fill out redirection table */
2625 for (i = 0, j = 0; i < 128; i++, j++) {
2626 if (j == adapter->ring_feature[RING_F_RSS].indices)
2627 j = 0;
2628 /* reta = 4-byte sliding window of
2629 * 0x00..(indices-1)(indices-1)00..etc. */
2630 reta = (reta << 8) | (j * 0x11);
2631 if ((i & 3) == 3)
2632 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2633 }
0cefafad 2634
05abb126
AD
2635 /* Disable indicating checksum in descriptor, enables RSS hash */
2636 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2637 rxcsum |= IXGBE_RXCSUM_PCSD;
2638 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2639
2640 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2641 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2642 else
2643 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
0cefafad 2644#ifdef CONFIG_IXGBE_DCB
05abb126 2645 | IXGBE_FLAG_DCB_ENABLED
0cefafad 2646#endif
05abb126
AD
2647 | IXGBE_FLAG_SRIOV_ENABLED
2648 );
0cefafad
JB
2649
2650 switch (mask) {
2651 case (IXGBE_FLAG_RSS_ENABLED):
2652 mrqc = IXGBE_MRQC_RSSEN;
2653 break;
1cdd1ec8
GR
2654 case (IXGBE_FLAG_SRIOV_ENABLED):
2655 mrqc = IXGBE_MRQC_VMDQEN;
2656 break;
0cefafad
JB
2657#ifdef CONFIG_IXGBE_DCB
2658 case (IXGBE_FLAG_DCB_ENABLED):
2659 mrqc = IXGBE_MRQC_RT8TCEN;
2660 break;
2661#endif /* CONFIG_IXGBE_DCB */
2662 default:
2663 break;
2664 }
2665
05abb126
AD
2666 /* Perform hash on these packet types */
2667 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2668 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2669 | IXGBE_MRQC_RSS_FIELD_IPV6
2670 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2671
2672 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
0cefafad
JB
2673}
2674
bb5a9ad2
NS
2675/**
2676 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2677 * @adapter: address of board private structure
2678 * @index: index of ring to set
bb5a9ad2 2679 **/
7367096a
AD
2680static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2681 struct ixgbe_ring *ring)
bb5a9ad2 2682{
bb5a9ad2 2683 struct ixgbe_hw *hw = &adapter->hw;
bb5a9ad2 2684 u32 rscctrl;
edd2ea55 2685 int rx_buf_len;
7367096a
AD
2686 u16 reg_idx = ring->reg_idx;
2687
2688 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2689 return;
bb5a9ad2 2690
7367096a
AD
2691 rx_buf_len = ring->rx_buf_len;
2692 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
bb5a9ad2
NS
2693 rscctrl |= IXGBE_RSCCTL_RSCEN;
2694 /*
2695 * we must limit the number of descriptors so that the
2696 * total size of max desc * buf_len is not greater
2697 * than 65535
2698 */
7367096a 2699 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
bb5a9ad2
NS
2700#if (MAX_SKB_FRAGS > 16)
2701 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2702#elif (MAX_SKB_FRAGS > 8)
2703 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2704#elif (MAX_SKB_FRAGS > 4)
2705 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2706#else
2707 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2708#endif
2709 } else {
2710 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2711 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2712 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2713 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2714 else
2715 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2716 }
7367096a 2717 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
bb5a9ad2
NS
2718}
2719
9e10e045
AD
2720/**
2721 * ixgbe_set_uta - Set unicast filter table address
2722 * @adapter: board private structure
2723 *
2724 * The unicast table address is a register array of 32-bit registers.
2725 * The table is meant to be used in a way similar to how the MTA is used
2726 * however due to certain limitations in the hardware it is necessary to
2727 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2728 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2729 **/
2730static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2731{
2732 struct ixgbe_hw *hw = &adapter->hw;
2733 int i;
2734
2735 /* The UTA table only exists on 82599 hardware and newer */
2736 if (hw->mac.type < ixgbe_mac_82599EB)
2737 return;
2738
2739 /* we only need to do this if VMDq is enabled */
2740 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2741 return;
2742
2743 for (i = 0; i < 128; i++)
2744 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2745}
2746
2747#define IXGBE_MAX_RX_DESC_POLL 10
2748static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2749 struct ixgbe_ring *ring)
2750{
2751 struct ixgbe_hw *hw = &adapter->hw;
2752 int reg_idx = ring->reg_idx;
2753 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2754 u32 rxdctl;
2755
2756 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2757 if (hw->mac.type == ixgbe_mac_82598EB &&
2758 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2759 return;
2760
2761 do {
2762 msleep(1);
2763 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2764 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2765
2766 if (!wait_loop) {
2767 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2768 "the polling period\n", reg_idx);
2769 }
2770}
2771
84418e3b
AD
2772void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2773 struct ixgbe_ring *ring)
acd37177
AD
2774{
2775 struct ixgbe_hw *hw = &adapter->hw;
2776 u64 rdba = ring->dma;
9e10e045 2777 u32 rxdctl;
acd37177
AD
2778 u16 reg_idx = ring->reg_idx;
2779
9e10e045
AD
2780 /* disable queue to avoid issues while updating state */
2781 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2782 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2783 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2784 IXGBE_WRITE_FLUSH(hw);
2785
acd37177
AD
2786 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2787 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2788 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2789 ring->count * sizeof(union ixgbe_adv_rx_desc));
2790 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2791 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
84ea2591 2792 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
9e10e045
AD
2793
2794 ixgbe_configure_srrctl(adapter, ring);
2795 ixgbe_configure_rscctl(adapter, ring);
2796
2797 if (hw->mac.type == ixgbe_mac_82598EB) {
2798 /*
2799 * enable cache line friendly hardware writes:
2800 * PTHRESH=32 descriptors (half the internal cache),
2801 * this also removes ugly rx_no_buffer_count increment
2802 * HTHRESH=4 descriptors (to minimize latency on fetch)
2803 * WTHRESH=8 burst writeback up to two cache lines
2804 */
2805 rxdctl &= ~0x3FFFFF;
2806 rxdctl |= 0x080420;
2807 }
2808
2809 /* enable receive descriptor ring */
2810 rxdctl |= IXGBE_RXDCTL_ENABLE;
2811 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2812
2813 ixgbe_rx_desc_queue_enable(adapter, ring);
fc77dc3c 2814 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
acd37177
AD
2815}
2816
48654521
AD
2817static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2818{
2819 struct ixgbe_hw *hw = &adapter->hw;
2820 int p;
2821
2822 /* PSRTYPE must be initialized in non 82598 adapters */
2823 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
e8e9f696
JP
2824 IXGBE_PSRTYPE_UDPHDR |
2825 IXGBE_PSRTYPE_IPV4HDR |
48654521 2826 IXGBE_PSRTYPE_L2HDR |
e8e9f696 2827 IXGBE_PSRTYPE_IPV6HDR;
48654521
AD
2828
2829 if (hw->mac.type == ixgbe_mac_82598EB)
2830 return;
2831
2832 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2833 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2834
2835 for (p = 0; p < adapter->num_rx_pools; p++)
2836 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2837 psrtype);
2838}
2839
f5b4a52e
AD
2840static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2841{
2842 struct ixgbe_hw *hw = &adapter->hw;
2843 u32 gcr_ext;
2844 u32 vt_reg_bits;
2845 u32 reg_offset, vf_shift;
2846 u32 vmdctl;
2847
2848 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2849 return;
2850
2851 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2852 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2853 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2854 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2855
2856 vf_shift = adapter->num_vfs % 32;
2857 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2858
2859 /* Enable only the PF's pool for Tx/Rx */
2860 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2861 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2862 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2863 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2864 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2865
2866 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2867 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2868
2869 /*
2870 * Set up VF register offsets for selected VT Mode,
2871 * i.e. 32 or 64 VFs for SR-IOV
2872 */
2873 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2874 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2875 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2876 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2877
2878 /* enable Tx loopback for VF/PF communication */
2879 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2880}
2881
477de6ed 2882static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
9a799d71 2883{
9a799d71
AK
2884 struct ixgbe_hw *hw = &adapter->hw;
2885 struct net_device *netdev = adapter->netdev;
2886 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
7c6e0a43 2887 int rx_buf_len;
477de6ed
AD
2888 struct ixgbe_ring *rx_ring;
2889 int i;
2890 u32 mhadd, hlreg0;
48654521 2891
9a799d71 2892 /* Decide whether to use packet split mode or not */
1cdd1ec8
GR
2893 /* Do not use packet split if we're in SR-IOV Mode */
2894 if (!adapter->num_vfs)
2895 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
9a799d71
AK
2896
2897 /* Set the RX buffer length according to the mode */
2898 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43 2899 rx_buf_len = IXGBE_RX_HDR_SIZE;
9a799d71 2900 } else {
0c19d6af 2901 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
f8212f97 2902 (netdev->mtu <= ETH_DATA_LEN))
7c6e0a43 2903 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9a799d71 2904 else
477de6ed 2905 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
9a799d71
AK
2906 }
2907
63f39bd1 2908#ifdef IXGBE_FCOE
477de6ed
AD
2909 /* adjust max frame to be able to do baby jumbo for FCoE */
2910 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2911 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2912 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
9a799d71 2913
477de6ed
AD
2914#endif /* IXGBE_FCOE */
2915 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2916 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2917 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2918 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2919
2920 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2921 }
2922
2923 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2924 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2925 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2926 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
9a799d71 2927
0cefafad
JB
2928 /*
2929 * Setup the HW Rx Head and Tail Descriptor Pointers and
2930 * the Base and Length of the Rx Descriptor Ring
2931 */
9a799d71 2932 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0 2933 rx_ring = adapter->rx_ring[i];
a6616b42 2934 rx_ring->rx_buf_len = rx_buf_len;
cc41ac7c 2935
6e455b89
YZ
2936 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2937 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
1b3ff02e
PWJ
2938 else
2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
cc41ac7c 2940
63f39bd1 2941#ifdef IXGBE_FCOE
e8e9f696 2942 if (netdev->features & NETIF_F_FCOE_MTU) {
63f39bd1
YZ
2943 struct ixgbe_ring_feature *f;
2944 f = &adapter->ring_feature[RING_F_FCOE];
6e455b89
YZ
2945 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2946 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2947 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2948 rx_ring->rx_buf_len =
e8e9f696 2949 IXGBE_FCOE_JUMBO_FRAME_SIZE;
6e455b89 2950 }
63f39bd1 2951 }
63f39bd1 2952#endif /* IXGBE_FCOE */
477de6ed
AD
2953 }
2954
2955}
2956
7367096a
AD
2957static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2958{
2959 struct ixgbe_hw *hw = &adapter->hw;
2960 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2961
2962 switch (hw->mac.type) {
2963 case ixgbe_mac_82598EB:
2964 /*
2965 * For VMDq support of different descriptor types or
2966 * buffer sizes through the use of multiple SRRCTL
2967 * registers, RDRXCTL.MVMEN must be set to 1
2968 *
2969 * also, the manual doesn't mention it clearly but DCA hints
2970 * will only use queue 0's tags unless this bit is set. Side
2971 * effects of setting this bit are only that SRRCTL must be
2972 * fully programmed [0..15]
2973 */
2974 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2975 break;
2976 case ixgbe_mac_82599EB:
2977 /* Disable RSC for ACK packets */
2978 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2979 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2980 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2981 /* hardware requires some bits to be set by default */
2982 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
2983 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2984 break;
2985 default:
2986 /* We should do nothing since we don't know this hardware */
2987 return;
2988 }
2989
2990 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2991}
2992
477de6ed
AD
2993/**
2994 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2995 * @adapter: board private structure
2996 *
2997 * Configure the Rx unit of the MAC after a reset.
2998 **/
2999static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3000{
3001 struct ixgbe_hw *hw = &adapter->hw;
477de6ed
AD
3002 int i;
3003 u32 rxctrl;
477de6ed
AD
3004
3005 /* disable receives while setting up the descriptors */
3006 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3007 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3008
3009 ixgbe_setup_psrtype(adapter);
7367096a 3010 ixgbe_setup_rdrxctl(adapter);
477de6ed 3011
9e10e045 3012 /* Program registers for the distribution of queues */
f5b4a52e 3013 ixgbe_setup_mrqc(adapter);
f5b4a52e 3014
9e10e045
AD
3015 ixgbe_set_uta(adapter);
3016
477de6ed
AD
3017 /* set_rx_buffer_len must be called before ring initialization */
3018 ixgbe_set_rx_buffer_len(adapter);
3019
3020 /*
3021 * Setup the HW Rx Head and Tail Descriptor Pointers and
3022 * the Base and Length of the Rx Descriptor Ring
3023 */
9e10e045
AD
3024 for (i = 0; i < adapter->num_rx_queues; i++)
3025 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
177db6ff 3026
9e10e045
AD
3027 /* disable drop enable for 82598 parts */
3028 if (hw->mac.type == ixgbe_mac_82598EB)
3029 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3030
3031 /* enable all receives */
3032 rxctrl |= IXGBE_RXCTRL_RXEN;
3033 hw->mac.ops.enable_rx_dma(hw, rxctrl);
9a799d71
AK
3034}
3035
068c89b0
DS
3036static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3037{
3038 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3039 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3040 int pool_ndx = adapter->num_vfs;
068c89b0
DS
3041
3042 /* add VID to filter table */
1ada1b1b 3043 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
f62bbb5e 3044 set_bit(vid, adapter->active_vlans);
068c89b0
DS
3045}
3046
3047static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3048{
3049 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3050 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3051 int pool_ndx = adapter->num_vfs;
068c89b0 3052
068c89b0 3053 /* remove VID from filter table */
1ada1b1b 3054 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
f62bbb5e 3055 clear_bit(vid, adapter->active_vlans);
068c89b0
DS
3056}
3057
5f6c0181
JB
3058/**
3059 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3060 * @adapter: driver data
3061 */
3062static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3063{
3064 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e
JG
3065 u32 vlnctrl;
3066
3067 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3068 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3069 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3070}
3071
3072/**
3073 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3074 * @adapter: driver data
3075 */
3076static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3077{
3078 struct ixgbe_hw *hw = &adapter->hw;
3079 u32 vlnctrl;
3080
3081 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3082 vlnctrl |= IXGBE_VLNCTRL_VFE;
3083 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3084 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3085}
3086
3087/**
3088 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3089 * @adapter: driver data
3090 */
3091static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3092{
3093 struct ixgbe_hw *hw = &adapter->hw;
3094 u32 vlnctrl;
5f6c0181
JB
3095 int i, j;
3096
3097 switch (hw->mac.type) {
3098 case ixgbe_mac_82598EB:
f62bbb5e
JG
3099 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3100 vlnctrl &= ~IXGBE_VLNCTRL_VME;
5f6c0181
JB
3101 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3102 break;
3103 case ixgbe_mac_82599EB:
5f6c0181
JB
3104 for (i = 0; i < adapter->num_rx_queues; i++) {
3105 j = adapter->rx_ring[i]->reg_idx;
3106 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3107 vlnctrl &= ~IXGBE_RXDCTL_VME;
3108 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3109 }
3110 break;
3111 default:
3112 break;
3113 }
3114}
3115
3116/**
f62bbb5e 3117 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
5f6c0181
JB
3118 * @adapter: driver data
3119 */
f62bbb5e 3120static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
5f6c0181
JB
3121{
3122 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e 3123 u32 vlnctrl;
5f6c0181
JB
3124 int i, j;
3125
3126 switch (hw->mac.type) {
3127 case ixgbe_mac_82598EB:
f62bbb5e
JG
3128 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3129 vlnctrl |= IXGBE_VLNCTRL_VME;
5f6c0181
JB
3130 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3131 break;
3132 case ixgbe_mac_82599EB:
5f6c0181
JB
3133 for (i = 0; i < adapter->num_rx_queues; i++) {
3134 j = adapter->rx_ring[i]->reg_idx;
3135 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3136 vlnctrl |= IXGBE_RXDCTL_VME;
3137 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3138 }
3139 break;
3140 default:
3141 break;
3142 }
3143}
3144
9a799d71
AK
3145static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3146{
f62bbb5e 3147 u16 vid;
9a799d71 3148
f62bbb5e
JG
3149 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3150
3151 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3152 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
9a799d71
AK
3153}
3154
2850062a
AD
3155/**
3156 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3157 * @netdev: network interface device structure
3158 *
3159 * Writes unicast address list to the RAR table.
3160 * Returns: -ENOMEM on failure/insufficient address space
3161 * 0 on no addresses written
3162 * X on writing X addresses to the RAR table
3163 **/
3164static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3165{
3166 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3167 struct ixgbe_hw *hw = &adapter->hw;
3168 unsigned int vfn = adapter->num_vfs;
3169 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3170 int count = 0;
3171
3172 /* return ENOMEM indicating insufficient memory for addresses */
3173 if (netdev_uc_count(netdev) > rar_entries)
3174 return -ENOMEM;
3175
3176 if (!netdev_uc_empty(netdev) && rar_entries) {
3177 struct netdev_hw_addr *ha;
3178 /* return error if we do not support writing to RAR table */
3179 if (!hw->mac.ops.set_rar)
3180 return -ENOMEM;
3181
3182 netdev_for_each_uc_addr(ha, netdev) {
3183 if (!rar_entries)
3184 break;
3185 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3186 vfn, IXGBE_RAH_AV);
3187 count++;
3188 }
3189 }
3190 /* write the addresses in reverse order to avoid write combining */
3191 for (; rar_entries > 0 ; rar_entries--)
3192 hw->mac.ops.clear_rar(hw, rar_entries);
3193
3194 return count;
3195}
3196
9a799d71 3197/**
2c5645cf 3198 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
9a799d71
AK
3199 * @netdev: network interface device structure
3200 *
2c5645cf
CL
3201 * The set_rx_method entry point is called whenever the unicast/multicast
3202 * address list or the network interface flags are updated. This routine is
3203 * responsible for configuring the hardware for proper unicast, multicast and
3204 * promiscuous mode.
9a799d71 3205 **/
7f870475 3206void ixgbe_set_rx_mode(struct net_device *netdev)
9a799d71
AK
3207{
3208 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3209 struct ixgbe_hw *hw = &adapter->hw;
2850062a
AD
3210 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3211 int count;
9a799d71
AK
3212
3213 /* Check for Promiscuous and All Multicast modes */
3214
3215 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3216
f5dc442b
AD
3217 /* set all bits that we expect to always be set */
3218 fctrl |= IXGBE_FCTRL_BAM;
3219 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3220 fctrl |= IXGBE_FCTRL_PMCF;
3221
2850062a
AD
3222 /* clear the bits we are changing the status of */
3223 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3224
9a799d71 3225 if (netdev->flags & IFF_PROMISC) {
e433ea1f 3226 hw->addr_ctrl.user_set_promisc = true;
9a799d71 3227 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2850062a 3228 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
5f6c0181
JB
3229 /* don't hardware filter vlans in promisc mode */
3230 ixgbe_vlan_filter_disable(adapter);
9a799d71 3231 } else {
746b9f02
PM
3232 if (netdev->flags & IFF_ALLMULTI) {
3233 fctrl |= IXGBE_FCTRL_MPE;
2850062a
AD
3234 vmolr |= IXGBE_VMOLR_MPE;
3235 } else {
3236 /*
3237 * Write addresses to the MTA, if the attempt fails
3238 * then we should just turn on promiscous mode so
3239 * that we can at least receive multicast traffic
3240 */
3241 hw->mac.ops.update_mc_addr_list(hw, netdev);
3242 vmolr |= IXGBE_VMOLR_ROMPE;
746b9f02 3243 }
5f6c0181 3244 ixgbe_vlan_filter_enable(adapter);
e433ea1f 3245 hw->addr_ctrl.user_set_promisc = false;
2850062a
AD
3246 /*
3247 * Write addresses to available RAR registers, if there is not
3248 * sufficient space to store all the addresses then enable
3249 * unicast promiscous mode
3250 */
3251 count = ixgbe_write_uc_addr_list(netdev);
3252 if (count < 0) {
3253 fctrl |= IXGBE_FCTRL_UPE;
3254 vmolr |= IXGBE_VMOLR_ROPE;
3255 }
9a799d71
AK
3256 }
3257
2850062a 3258 if (adapter->num_vfs) {
1cdd1ec8 3259 ixgbe_restore_vf_multicasts(adapter);
2850062a
AD
3260 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3261 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3262 IXGBE_VMOLR_ROPE);
3263 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3264 }
3265
3266 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
f62bbb5e
JG
3267
3268 if (netdev->features & NETIF_F_HW_VLAN_RX)
3269 ixgbe_vlan_strip_enable(adapter);
3270 else
3271 ixgbe_vlan_strip_disable(adapter);
9a799d71
AK
3272}
3273
021230d4
AV
3274static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3275{
3276 int q_idx;
3277 struct ixgbe_q_vector *q_vector;
3278 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3279
3280 /* legacy and MSI only use one vector */
3281 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3282 q_vectors = 1;
3283
3284 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
f0848276 3285 struct napi_struct *napi;
7a921c93 3286 q_vector = adapter->q_vector[q_idx];
f0848276 3287 napi = &q_vector->napi;
91281fd3
AD
3288 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3289 if (!q_vector->rxr_count || !q_vector->txr_count) {
3290 if (q_vector->txr_count == 1)
3291 napi->poll = &ixgbe_clean_txonly;
3292 else if (q_vector->rxr_count == 1)
3293 napi->poll = &ixgbe_clean_rxonly;
3294 }
3295 }
f0848276
JB
3296
3297 napi_enable(napi);
021230d4
AV
3298 }
3299}
3300
3301static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3302{
3303 int q_idx;
3304 struct ixgbe_q_vector *q_vector;
3305 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3306
3307 /* legacy and MSI only use one vector */
3308 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3309 q_vectors = 1;
3310
3311 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7a921c93 3312 q_vector = adapter->q_vector[q_idx];
021230d4
AV
3313 napi_disable(&q_vector->napi);
3314 }
3315}
3316
7a6b6f51 3317#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3318/*
3319 * ixgbe_configure_dcb - Configure DCB hardware
3320 * @adapter: ixgbe adapter struct
3321 *
3322 * This is called by the driver on open to configure the DCB hardware.
3323 * This is also called by the gennetlink interface when reconfiguring
3324 * the DCB state.
3325 */
3326static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3327{
3328 struct ixgbe_hw *hw = &adapter->hw;
9806307a 3329 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2f90b865 3330
67ebd791
AD
3331 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3332 if (hw->mac.type == ixgbe_mac_82598EB)
3333 netif_set_gso_max_size(adapter->netdev, 65536);
3334 return;
3335 }
3336
3337 if (hw->mac.type == ixgbe_mac_82598EB)
3338 netif_set_gso_max_size(adapter->netdev, 32768);
3339
9806307a
JF
3340#ifdef CONFIG_FCOE
3341 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3342 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3343#endif
3344
80ab193d 3345 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
9806307a 3346 DCB_TX_CONFIG);
80ab193d 3347 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
9806307a 3348 DCB_RX_CONFIG);
2f90b865 3349
2f90b865 3350 /* Enable VLAN tag insert/strip */
f62bbb5e 3351 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
5f6c0181 3352
2f90b865 3353 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
01fa7d90
AD
3354
3355 /* reconfigure the hardware */
3356 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
2f90b865
AD
3357}
3358
3359#endif
9a799d71
AK
3360static void ixgbe_configure(struct ixgbe_adapter *adapter)
3361{
3362 struct net_device *netdev = adapter->netdev;
c4cf55e5 3363 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
3364 int i;
3365
7a6b6f51 3366#ifdef CONFIG_IXGBE_DCB
67ebd791 3367 ixgbe_configure_dcb(adapter);
2f90b865 3368#endif
9a799d71 3369
f62bbb5e
JG
3370 ixgbe_set_rx_mode(netdev);
3371 ixgbe_restore_vlan(adapter);
3372
eacd73f7
YZ
3373#ifdef IXGBE_FCOE
3374 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3375 ixgbe_configure_fcoe(adapter);
3376
3377#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
3378 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3379 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 3380 adapter->tx_ring[i]->atr_sample_rate =
e8e9f696 3381 adapter->atr_sample_rate;
c4cf55e5
PWJ
3382 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3383 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3384 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3385 }
933d41f1 3386 ixgbe_configure_virtualization(adapter);
c4cf55e5 3387
9a799d71
AK
3388 ixgbe_configure_tx(adapter);
3389 ixgbe_configure_rx(adapter);
9a799d71
AK
3390}
3391
e8e26350
PW
3392static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3393{
3394 switch (hw->phy.type) {
3395 case ixgbe_phy_sfp_avago:
3396 case ixgbe_phy_sfp_ftl:
3397 case ixgbe_phy_sfp_intel:
3398 case ixgbe_phy_sfp_unknown:
ea0a04df
DS
3399 case ixgbe_phy_sfp_passive_tyco:
3400 case ixgbe_phy_sfp_passive_unknown:
3401 case ixgbe_phy_sfp_active_unknown:
3402 case ixgbe_phy_sfp_ftl_active:
e8e26350
PW
3403 return true;
3404 default:
3405 return false;
3406 }
3407}
3408
0ecc061d 3409/**
e8e26350
PW
3410 * ixgbe_sfp_link_config - set up SFP+ link
3411 * @adapter: pointer to private adapter struct
3412 **/
3413static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3414{
3415 struct ixgbe_hw *hw = &adapter->hw;
3416
3417 if (hw->phy.multispeed_fiber) {
3418 /*
3419 * In multispeed fiber setups, the device may not have
3420 * had a physical connection when the driver loaded.
3421 * If that's the case, the initial link configuration
3422 * couldn't get the MAC into 10G or 1G mode, so we'll
3423 * never have a link status change interrupt fire.
3424 * We need to try and force an autonegotiation
3425 * session, then bring up link.
3426 */
3427 hw->mac.ops.setup_sfp(hw);
3428 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3429 schedule_work(&adapter->multispeed_fiber_task);
3430 } else {
3431 /*
3432 * Direct Attach Cu and non-multispeed fiber modules
3433 * still need to be configured properly prior to
3434 * attempting link.
3435 */
3436 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3437 schedule_work(&adapter->sfp_config_module_task);
3438 }
3439}
3440
3441/**
3442 * ixgbe_non_sfp_link_config - set up non-SFP+ link
0ecc061d
PWJ
3443 * @hw: pointer to private hardware struct
3444 *
3445 * Returns 0 on success, negative on failure
3446 **/
e8e26350 3447static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
0ecc061d
PWJ
3448{
3449 u32 autoneg;
8620a103 3450 bool negotiation, link_up = false;
0ecc061d
PWJ
3451 u32 ret = IXGBE_ERR_LINK_SETUP;
3452
3453 if (hw->mac.ops.check_link)
3454 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3455
3456 if (ret)
3457 goto link_cfg_out;
3458
3459 if (hw->mac.ops.get_link_capabilities)
e8e9f696
JP
3460 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3461 &negotiation);
0ecc061d
PWJ
3462 if (ret)
3463 goto link_cfg_out;
3464
8620a103
MC
3465 if (hw->mac.ops.setup_link)
3466 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
0ecc061d
PWJ
3467link_cfg_out:
3468 return ret;
3469}
3470
a34bcfff 3471static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
9a799d71 3472{
9a799d71 3473 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3474 u32 gpie = 0;
9a799d71 3475
9b471446 3476 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
a34bcfff
AD
3477 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3478 IXGBE_GPIE_OCD;
3479 gpie |= IXGBE_GPIE_EIAME;
9b471446
JB
3480 /*
3481 * use EIAM to auto-mask when MSI-X interrupt is asserted
3482 * this saves a register write for every interrupt
3483 */
3484 switch (hw->mac.type) {
3485 case ixgbe_mac_82598EB:
3486 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3487 break;
3488 default:
3489 case ixgbe_mac_82599EB:
3490 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3491 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3492 break;
3493 }
3494 } else {
021230d4
AV
3495 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3496 * specifically only auto mask tx and rx interrupts */
3497 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3498 }
9a799d71 3499
a34bcfff
AD
3500 /* XXX: to interrupt immediately for EICS writes, enable this */
3501 /* gpie |= IXGBE_GPIE_EIMEN; */
3502
3503 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3504 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3505 gpie |= IXGBE_GPIE_VTMODE_64;
119fc60a
MC
3506 }
3507
a34bcfff
AD
3508 /* Enable fan failure interrupt */
3509 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
0befdb3e 3510 gpie |= IXGBE_SDP1_GPIEN;
0befdb3e 3511
a34bcfff 3512 if (hw->mac.type == ixgbe_mac_82599EB)
e8e26350
PW
3513 gpie |= IXGBE_SDP1_GPIEN;
3514 gpie |= IXGBE_SDP2_GPIEN;
a34bcfff
AD
3515
3516 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3517}
3518
3519static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3520{
3521 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3522 int err;
a34bcfff
AD
3523 u32 ctrl_ext;
3524
3525 ixgbe_get_hw_control(adapter);
3526 ixgbe_setup_gpie(adapter);
e8e26350 3527
9a799d71
AK
3528 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3529 ixgbe_configure_msix(adapter);
3530 else
3531 ixgbe_configure_msi_and_legacy(adapter);
3532
61fac744
PW
3533 /* enable the optics */
3534 if (hw->phy.multispeed_fiber)
3535 hw->mac.ops.enable_tx_laser(hw);
3536
9a799d71 3537 clear_bit(__IXGBE_DOWN, &adapter->state);
021230d4
AV
3538 ixgbe_napi_enable_all(adapter);
3539
3540 /* clear any pending interrupts, may auto mask */
3541 IXGBE_READ_REG(hw, IXGBE_EICR);
6af3b9eb 3542 ixgbe_irq_enable(adapter, true, true);
9a799d71 3543
bf069c97
DS
3544 /*
3545 * If this adapter has a fan, check to see if we had a failure
3546 * before we enabled the interrupt.
3547 */
3548 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3549 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3550 if (esdp & IXGBE_ESDP_SDP1)
396e799c 3551 e_crit(drv, "Fan has stopped, replace the adapter\n");
bf069c97
DS
3552 }
3553
e8e26350
PW
3554 /*
3555 * For hot-pluggable SFP+ devices, a new SFP+ module may have
19343de2
DS
3556 * arrived before interrupts were enabled but after probe. Such
3557 * devices wouldn't have their type identified yet. We need to
3558 * kick off the SFP+ module setup first, then try to bring up link.
e8e26350
PW
3559 * If we're not hot-pluggable SFP+, we just need to configure link
3560 * and bring it up.
3561 */
19343de2
DS
3562 if (hw->phy.type == ixgbe_phy_unknown) {
3563 err = hw->phy.ops.identify(hw);
3564 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5da43c1a
DS
3565 /*
3566 * Take the device down and schedule the sfp tasklet
3567 * which will unregister_netdev and log it.
3568 */
19343de2 3569 ixgbe_down(adapter);
5da43c1a 3570 schedule_work(&adapter->sfp_config_module_task);
19343de2
DS
3571 return err;
3572 }
e8e26350
PW
3573 }
3574
3575 if (ixgbe_is_sfp(hw)) {
3576 ixgbe_sfp_link_config(adapter);
3577 } else {
3578 err = ixgbe_non_sfp_link_config(hw);
3579 if (err)
396e799c 3580 e_err(probe, "link_config FAILED %d\n", err);
e8e26350 3581 }
0ecc061d 3582
1da100bb 3583 /* enable transmits */
477de6ed 3584 netif_tx_start_all_queues(adapter->netdev);
1da100bb 3585
9a799d71
AK
3586 /* bring the link up in the watchdog, this could race with our first
3587 * link up interrupt but shouldn't be a problem */
cf8280ee
JB
3588 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3589 adapter->link_check_timeout = jiffies;
9a799d71 3590 mod_timer(&adapter->watchdog_timer, jiffies);
c9205697
GR
3591
3592 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3593 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3594 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3595 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3596
9a799d71
AK
3597 return 0;
3598}
3599
d4f80882
AV
3600void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3601{
3602 WARN_ON(in_interrupt());
3603 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3604 msleep(1);
3605 ixgbe_down(adapter);
5809a1ae
GR
3606 /*
3607 * If SR-IOV enabled then wait a bit before bringing the adapter
3608 * back up to give the VFs time to respond to the reset. The
3609 * two second wait is based upon the watchdog timer cycle in
3610 * the VF driver.
3611 */
3612 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3613 msleep(2000);
d4f80882
AV
3614 ixgbe_up(adapter);
3615 clear_bit(__IXGBE_RESETTING, &adapter->state);
3616}
3617
9a799d71
AK
3618int ixgbe_up(struct ixgbe_adapter *adapter)
3619{
3620 /* hardware has been reset, we need to reload some things */
3621 ixgbe_configure(adapter);
3622
3623 return ixgbe_up_complete(adapter);
3624}
3625
3626void ixgbe_reset(struct ixgbe_adapter *adapter)
3627{
c44ade9e 3628 struct ixgbe_hw *hw = &adapter->hw;
8ca783ab
DS
3629 int err;
3630
3631 err = hw->mac.ops.init_hw(hw);
da4dd0f7
PWJ
3632 switch (err) {
3633 case 0:
3634 case IXGBE_ERR_SFP_NOT_PRESENT:
3635 break;
3636 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
849c4542 3637 e_dev_err("master disable timed out\n");
da4dd0f7 3638 break;
794caeb2
PWJ
3639 case IXGBE_ERR_EEPROM_VERSION:
3640 /* We are running on a pre-production device, log a warning */
849c4542
ET
3641 e_dev_warn("This device is a pre-production adapter/LOM. "
3642 "Please be aware there may be issuesassociated with "
3643 "your hardware. If you are experiencing problems "
3644 "please contact your Intel or hardware "
3645 "representative who provided you with this "
3646 "hardware.\n");
794caeb2 3647 break;
da4dd0f7 3648 default:
849c4542 3649 e_dev_err("Hardware Error: %d\n", err);
da4dd0f7 3650 }
9a799d71
AK
3651
3652 /* reprogram the RAR[0] in case user changed it. */
1cdd1ec8
GR
3653 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3654 IXGBE_RAH_AV);
9a799d71
AK
3655}
3656
9a799d71
AK
3657/**
3658 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
9a799d71
AK
3659 * @rx_ring: ring to free buffers from
3660 **/
b6ec895e 3661static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
9a799d71 3662{
b6ec895e 3663 struct device *dev = rx_ring->dev;
9a799d71 3664 unsigned long size;
b6ec895e 3665 u16 i;
9a799d71 3666
84418e3b
AD
3667 /* ring already cleared, nothing to do */
3668 if (!rx_ring->rx_buffer_info)
3669 return;
9a799d71 3670
84418e3b 3671 /* Free all the Rx ring sk_buffs */
9a799d71
AK
3672 for (i = 0; i < rx_ring->count; i++) {
3673 struct ixgbe_rx_buffer *rx_buffer_info;
3674
3675 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3676 if (rx_buffer_info->dma) {
b6ec895e 3677 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
e8e9f696 3678 rx_ring->rx_buf_len,
1b507730 3679 DMA_FROM_DEVICE);
9a799d71
AK
3680 rx_buffer_info->dma = 0;
3681 }
3682 if (rx_buffer_info->skb) {
f8212f97 3683 struct sk_buff *skb = rx_buffer_info->skb;
9a799d71 3684 rx_buffer_info->skb = NULL;
f8212f97
AD
3685 do {
3686 struct sk_buff *this = skb;
e8171aaa 3687 if (IXGBE_RSC_CB(this)->delay_unmap) {
b6ec895e 3688 dma_unmap_single(dev,
1b507730 3689 IXGBE_RSC_CB(this)->dma,
e8e9f696 3690 rx_ring->rx_buf_len,
1b507730 3691 DMA_FROM_DEVICE);
fd3686a8 3692 IXGBE_RSC_CB(this)->dma = 0;
e8171aaa 3693 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 3694 }
f8212f97
AD
3695 skb = skb->prev;
3696 dev_kfree_skb(this);
3697 } while (skb);
9a799d71
AK
3698 }
3699 if (!rx_buffer_info->page)
3700 continue;
4f57ca6e 3701 if (rx_buffer_info->page_dma) {
b6ec895e 3702 dma_unmap_page(dev, rx_buffer_info->page_dma,
1b507730 3703 PAGE_SIZE / 2, DMA_FROM_DEVICE);
4f57ca6e
JB
3704 rx_buffer_info->page_dma = 0;
3705 }
9a799d71
AK
3706 put_page(rx_buffer_info->page);
3707 rx_buffer_info->page = NULL;
762f4c57 3708 rx_buffer_info->page_offset = 0;
9a799d71
AK
3709 }
3710
3711 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3712 memset(rx_ring->rx_buffer_info, 0, size);
3713
3714 /* Zero out the descriptor ring */
3715 memset(rx_ring->desc, 0, rx_ring->size);
3716
3717 rx_ring->next_to_clean = 0;
3718 rx_ring->next_to_use = 0;
9a799d71
AK
3719}
3720
3721/**
3722 * ixgbe_clean_tx_ring - Free Tx Buffers
9a799d71
AK
3723 * @tx_ring: ring to be cleaned
3724 **/
b6ec895e 3725static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
9a799d71
AK
3726{
3727 struct ixgbe_tx_buffer *tx_buffer_info;
3728 unsigned long size;
b6ec895e 3729 u16 i;
9a799d71 3730
84418e3b
AD
3731 /* ring already cleared, nothing to do */
3732 if (!tx_ring->tx_buffer_info)
3733 return;
9a799d71 3734
84418e3b 3735 /* Free all the Tx ring sk_buffs */
9a799d71
AK
3736 for (i = 0; i < tx_ring->count; i++) {
3737 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 3738 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
9a799d71
AK
3739 }
3740
3741 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3742 memset(tx_ring->tx_buffer_info, 0, size);
3743
3744 /* Zero out the descriptor ring */
3745 memset(tx_ring->desc, 0, tx_ring->size);
3746
3747 tx_ring->next_to_use = 0;
3748 tx_ring->next_to_clean = 0;
9a799d71
AK
3749}
3750
3751/**
021230d4 3752 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
9a799d71
AK
3753 * @adapter: board private structure
3754 **/
021230d4 3755static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
3756{
3757 int i;
3758
021230d4 3759 for (i = 0; i < adapter->num_rx_queues; i++)
b6ec895e 3760 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
9a799d71
AK
3761}
3762
3763/**
021230d4 3764 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
9a799d71
AK
3765 * @adapter: board private structure
3766 **/
021230d4 3767static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
3768{
3769 int i;
3770
021230d4 3771 for (i = 0; i < adapter->num_tx_queues; i++)
b6ec895e 3772 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
9a799d71
AK
3773}
3774
3775void ixgbe_down(struct ixgbe_adapter *adapter)
3776{
3777 struct net_device *netdev = adapter->netdev;
7f821875 3778 struct ixgbe_hw *hw = &adapter->hw;
9a799d71 3779 u32 rxctrl;
7f821875
JB
3780 u32 txdctl;
3781 int i, j;
b25ebfd2 3782 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71
AK
3783
3784 /* signal that we are down to the interrupt handler */
3785 set_bit(__IXGBE_DOWN, &adapter->state);
3786
767081ad
GR
3787 /* disable receive for all VFs and wait one second */
3788 if (adapter->num_vfs) {
767081ad
GR
3789 /* ping all the active vfs to let them know we are going down */
3790 ixgbe_ping_all_vfs(adapter);
581d1aa7 3791
767081ad
GR
3792 /* Disable all VFTE/VFRE TX/RX */
3793 ixgbe_disable_tx_rx(adapter);
581d1aa7
GR
3794
3795 /* Mark all the VFs as inactive */
3796 for (i = 0 ; i < adapter->num_vfs; i++)
3797 adapter->vfinfo[i].clear_to_send = 0;
767081ad
GR
3798 }
3799
9a799d71 3800 /* disable receives */
7f821875
JB
3801 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3802 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
9a799d71 3803
7f821875 3804 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
3805 msleep(10);
3806
7f821875
JB
3807 netif_tx_stop_all_queues(netdev);
3808
0a1f87cb
DS
3809 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3810 del_timer_sync(&adapter->sfp_timer);
9a799d71 3811 del_timer_sync(&adapter->watchdog_timer);
cf8280ee 3812 cancel_work_sync(&adapter->watchdog_task);
9a799d71 3813
c0dfb90e
JF
3814 netif_carrier_off(netdev);
3815 netif_tx_disable(netdev);
3816
3817 ixgbe_irq_disable(adapter);
3818
3819 ixgbe_napi_disable_all(adapter);
3820
b25ebfd2
PW
3821 /* Cleanup the affinity_hint CPU mask memory and callback */
3822 for (i = 0; i < num_q_vectors; i++) {
3823 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
3824 /* clear the affinity_mask in the IRQ descriptor */
3825 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
3826 /* release the CPU mask memory */
3827 free_cpumask_var(q_vector->affinity_mask);
3828 }
3829
c4cf55e5
PWJ
3830 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3831 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3832 cancel_work_sync(&adapter->fdir_reinit_task);
3833
119fc60a
MC
3834 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3835 cancel_work_sync(&adapter->check_overtemp_task);
3836
7f821875
JB
3837 /* disable transmits in the hardware now that interrupts are off */
3838 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 3839 j = adapter->tx_ring[i]->reg_idx;
7f821875
JB
3840 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3841 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
e8e9f696 3842 (txdctl & ~IXGBE_TXDCTL_ENABLE));
7f821875 3843 }
88512539
PW
3844 /* Disable the Tx DMA engine on 82599 */
3845 if (hw->mac.type == ixgbe_mac_82599EB)
3846 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
e8e9f696
JP
3847 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3848 ~IXGBE_DMATXCTL_TE));
7f821875 3849
9f756f01
JF
3850 /* power down the optics */
3851 if (hw->phy.multispeed_fiber)
3852 hw->mac.ops.disable_tx_laser(hw);
3853
9a713e7c
PW
3854 /* clear n-tuple filters that are cached */
3855 ethtool_ntuple_flush(netdev);
3856
6f4a0e45
PL
3857 if (!pci_channel_offline(adapter->pdev))
3858 ixgbe_reset(adapter);
9a799d71
AK
3859 ixgbe_clean_all_tx_rings(adapter);
3860 ixgbe_clean_all_rx_rings(adapter);
3861
5dd2d332 3862#ifdef CONFIG_IXGBE_DCA
96b0e0f6 3863 /* since we reset the hardware DCA settings were cleared */
e35ec126 3864 ixgbe_setup_dca(adapter);
96b0e0f6 3865#endif
9a799d71
AK
3866}
3867
9a799d71 3868/**
021230d4
AV
3869 * ixgbe_poll - NAPI Rx polling callback
3870 * @napi: structure for representing this polling device
3871 * @budget: how many packets driver is allowed to clean
3872 *
3873 * This function is used for legacy and MSI, NAPI mode
9a799d71 3874 **/
021230d4 3875static int ixgbe_poll(struct napi_struct *napi, int budget)
9a799d71 3876{
9a1a69ad 3877 struct ixgbe_q_vector *q_vector =
e8e9f696 3878 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 3879 struct ixgbe_adapter *adapter = q_vector->adapter;
9a1a69ad 3880 int tx_clean_complete, work_done = 0;
9a799d71 3881
5dd2d332 3882#ifdef CONFIG_IXGBE_DCA
bd0362dd 3883 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
4a0b9ca0
PW
3884 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3885 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
bd0362dd
JC
3886 }
3887#endif
3888
4a0b9ca0
PW
3889 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3890 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
9a799d71 3891
9a1a69ad 3892 if (!tx_clean_complete)
d2c7ddd6
DM
3893 work_done = budget;
3894
53e52c72
DM
3895 /* If budget not fully consumed, exit the polling mode */
3896 if (work_done < budget) {
288379f0 3897 napi_complete(napi);
f7554a2b 3898 if (adapter->rx_itr_setting & 1)
f494e8fa 3899 ixgbe_set_itr(adapter);
d4f80882 3900 if (!test_bit(__IXGBE_DOWN, &adapter->state))
835462fc 3901 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
9a799d71 3902 }
9a799d71
AK
3903 return work_done;
3904}
3905
3906/**
3907 * ixgbe_tx_timeout - Respond to a Tx Hang
3908 * @netdev: network interface device structure
3909 **/
3910static void ixgbe_tx_timeout(struct net_device *netdev)
3911{
3912 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3913
3914 /* Do the reset outside of interrupt context */
3915 schedule_work(&adapter->reset_task);
3916}
3917
3918static void ixgbe_reset_task(struct work_struct *work)
3919{
3920 struct ixgbe_adapter *adapter;
3921 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3922
2f90b865
AD
3923 /* If we're already down or resetting, just bail */
3924 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3925 test_bit(__IXGBE_RESETTING, &adapter->state))
3926 return;
3927
9a799d71
AK
3928 adapter->tx_timeout_count++;
3929
dcd79aeb
TI
3930 ixgbe_dump(adapter);
3931 netdev_err(adapter->netdev, "Reset adapter\n");
d4f80882 3932 ixgbe_reinit_locked(adapter);
9a799d71
AK
3933}
3934
bc97114d
PWJ
3935#ifdef CONFIG_IXGBE_DCB
3936static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
b9804972 3937{
bc97114d 3938 bool ret = false;
0cefafad 3939 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
b9804972 3940
0cefafad
JB
3941 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3942 return ret;
3943
3944 f->mask = 0x7 << 3;
3945 adapter->num_rx_queues = f->indices;
3946 adapter->num_tx_queues = f->indices;
3947 ret = true;
2f90b865 3948
bc97114d
PWJ
3949 return ret;
3950}
3951#endif
3952
4df10466
JB
3953/**
3954 * ixgbe_set_rss_queues: Allocate queues for RSS
3955 * @adapter: board private structure to initialize
3956 *
3957 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3958 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3959 *
3960 **/
bc97114d
PWJ
3961static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3962{
3963 bool ret = false;
0cefafad 3964 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
bc97114d
PWJ
3965
3966 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
0cefafad
JB
3967 f->mask = 0xF;
3968 adapter->num_rx_queues = f->indices;
3969 adapter->num_tx_queues = f->indices;
bc97114d
PWJ
3970 ret = true;
3971 } else {
bc97114d 3972 ret = false;
b9804972
JB
3973 }
3974
bc97114d
PWJ
3975 return ret;
3976}
3977
c4cf55e5
PWJ
3978/**
3979 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3980 * @adapter: board private structure to initialize
3981 *
3982 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3983 * to the original CPU that initiated the Tx session. This runs in addition
3984 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3985 * Rx load across CPUs using RSS.
3986 *
3987 **/
e8e9f696 3988static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
3989{
3990 bool ret = false;
3991 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3992
3993 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3994 f_fdir->mask = 0;
3995
3996 /* Flow Director must have RSS enabled */
3997 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3998 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3999 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4000 adapter->num_tx_queues = f_fdir->indices;
4001 adapter->num_rx_queues = f_fdir->indices;
4002 ret = true;
4003 } else {
4004 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4005 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4006 }
4007 return ret;
4008}
4009
0331a832
YZ
4010#ifdef IXGBE_FCOE
4011/**
4012 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4013 * @adapter: board private structure to initialize
4014 *
4015 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4016 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4017 * rx queues out of the max number of rx queues, instead, it is used as the
4018 * index of the first rx queue used by FCoE.
4019 *
4020 **/
4021static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4022{
4023 bool ret = false;
4024 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4025
4026 f->indices = min((int)num_online_cpus(), f->indices);
4027 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
8de8b2e6
YZ
4028 adapter->num_rx_queues = 1;
4029 adapter->num_tx_queues = 1;
0331a832
YZ
4030#ifdef CONFIG_IXGBE_DCB
4031 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
396e799c 4032 e_info(probe, "FCoE enabled with DCB\n");
0331a832
YZ
4033 ixgbe_set_dcb_queues(adapter);
4034 }
4035#endif
4036 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
396e799c 4037 e_info(probe, "FCoE enabled with RSS\n");
8faa2a78
YZ
4038 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4039 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4040 ixgbe_set_fdir_queues(adapter);
4041 else
4042 ixgbe_set_rss_queues(adapter);
0331a832
YZ
4043 }
4044 /* adding FCoE rx rings to the end */
4045 f->mask = adapter->num_rx_queues;
4046 adapter->num_rx_queues += f->indices;
8de8b2e6 4047 adapter->num_tx_queues += f->indices;
0331a832
YZ
4048
4049 ret = true;
4050 }
4051
4052 return ret;
4053}
4054
4055#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4056/**
4057 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4058 * @adapter: board private structure to initialize
4059 *
4060 * IOV doesn't actually use anything, so just NAK the
4061 * request for now and let the other queue routines
4062 * figure out what to do.
4063 */
4064static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4065{
4066 return false;
4067}
4068
4df10466
JB
4069/*
4070 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4071 * @adapter: board private structure to initialize
4072 *
4073 * This is the top level queue allocation routine. The order here is very
4074 * important, starting with the "most" number of features turned on at once,
4075 * and ending with the smallest set of features. This way large combinations
4076 * can be allocated if they're turned on, and smaller combinations are the
4077 * fallthrough conditions.
4078 *
4079 **/
847f53ff 4080static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
bc97114d 4081{
1cdd1ec8
GR
4082 /* Start with base case */
4083 adapter->num_rx_queues = 1;
4084 adapter->num_tx_queues = 1;
4085 adapter->num_rx_pools = adapter->num_rx_queues;
4086 adapter->num_rx_queues_per_pool = 1;
4087
4088 if (ixgbe_set_sriov_queues(adapter))
847f53ff 4089 goto done;
1cdd1ec8 4090
0331a832
YZ
4091#ifdef IXGBE_FCOE
4092 if (ixgbe_set_fcoe_queues(adapter))
4093 goto done;
4094
4095#endif /* IXGBE_FCOE */
bc97114d
PWJ
4096#ifdef CONFIG_IXGBE_DCB
4097 if (ixgbe_set_dcb_queues(adapter))
af22ab1b 4098 goto done;
bc97114d
PWJ
4099
4100#endif
c4cf55e5
PWJ
4101 if (ixgbe_set_fdir_queues(adapter))
4102 goto done;
4103
bc97114d 4104 if (ixgbe_set_rss_queues(adapter))
af22ab1b
WF
4105 goto done;
4106
4107 /* fallback to base case */
4108 adapter->num_rx_queues = 1;
4109 adapter->num_tx_queues = 1;
4110
4111done:
847f53ff 4112 /* Notify the stack of the (possibly) reduced queue counts. */
f0796d5c 4113 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
847f53ff
BH
4114 return netif_set_real_num_rx_queues(adapter->netdev,
4115 adapter->num_rx_queues);
b9804972
JB
4116}
4117
021230d4 4118static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
e8e9f696 4119 int vectors)
021230d4
AV
4120{
4121 int err, vector_threshold;
4122
4123 /* We'll want at least 3 (vector_threshold):
4124 * 1) TxQ[0] Cleanup
4125 * 2) RxQ[0] Cleanup
4126 * 3) Other (Link Status Change, etc.)
4127 * 4) TCP Timer (optional)
4128 */
4129 vector_threshold = MIN_MSIX_COUNT;
4130
4131 /* The more we get, the more we will assign to Tx/Rx Cleanup
4132 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4133 * Right now, we simply care about how many we'll get; we'll
4134 * set them up later while requesting irq's.
4135 */
4136 while (vectors >= vector_threshold) {
4137 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
e8e9f696 4138 vectors);
021230d4
AV
4139 if (!err) /* Success in acquiring all requested vectors. */
4140 break;
4141 else if (err < 0)
4142 vectors = 0; /* Nasty failure, quit now */
4143 else /* err == number of vectors we should try again with */
4144 vectors = err;
4145 }
4146
4147 if (vectors < vector_threshold) {
4148 /* Can't allocate enough MSI-X interrupts? Oh well.
4149 * This just means we'll go with either a single MSI
4150 * vector or fall back to legacy interrupts.
4151 */
849c4542
ET
4152 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4153 "Unable to allocate MSI-X interrupts\n");
021230d4
AV
4154 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4155 kfree(adapter->msix_entries);
4156 adapter->msix_entries = NULL;
021230d4
AV
4157 } else {
4158 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
eb7f139c
PWJ
4159 /*
4160 * Adjust for only the vectors we'll use, which is minimum
4161 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4162 * vectors we were allocated.
4163 */
4164 adapter->num_msix_vectors = min(vectors,
e8e9f696 4165 adapter->max_msix_q_vectors + NON_Q_VECTORS);
021230d4
AV
4166 }
4167}
4168
021230d4 4169/**
bc97114d 4170 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
021230d4
AV
4171 * @adapter: board private structure to initialize
4172 *
bc97114d
PWJ
4173 * Cache the descriptor ring offsets for RSS to the assigned rings.
4174 *
021230d4 4175 **/
bc97114d 4176static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
021230d4 4177{
bc97114d
PWJ
4178 int i;
4179 bool ret = false;
4180
4181 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4182 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4183 adapter->rx_ring[i]->reg_idx = i;
bc97114d 4184 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4185 adapter->tx_ring[i]->reg_idx = i;
bc97114d
PWJ
4186 ret = true;
4187 } else {
4188 ret = false;
4189 }
4190
4191 return ret;
4192}
4193
4194#ifdef CONFIG_IXGBE_DCB
4195/**
4196 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4197 * @adapter: board private structure to initialize
4198 *
4199 * Cache the descriptor ring offsets for DCB to the assigned rings.
4200 *
4201 **/
4202static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4203{
4204 int i;
4205 bool ret = false;
4206 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4207
4208 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4209 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2f90b865
AD
4210 /* the number of queues is assumed to be symmetric */
4211 for (i = 0; i < dcb_i; i++) {
4a0b9ca0
PW
4212 adapter->rx_ring[i]->reg_idx = i << 3;
4213 adapter->tx_ring[i]->reg_idx = i << 2;
2f90b865 4214 }
bc97114d 4215 ret = true;
e8e26350 4216 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
f92ef202
PW
4217 if (dcb_i == 8) {
4218 /*
4219 * Tx TC0 starts at: descriptor queue 0
4220 * Tx TC1 starts at: descriptor queue 32
4221 * Tx TC2 starts at: descriptor queue 64
4222 * Tx TC3 starts at: descriptor queue 80
4223 * Tx TC4 starts at: descriptor queue 96
4224 * Tx TC5 starts at: descriptor queue 104
4225 * Tx TC6 starts at: descriptor queue 112
4226 * Tx TC7 starts at: descriptor queue 120
4227 *
4228 * Rx TC0-TC7 are offset by 16 queues each
4229 */
4230 for (i = 0; i < 3; i++) {
4a0b9ca0
PW
4231 adapter->tx_ring[i]->reg_idx = i << 5;
4232 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4233 }
4234 for ( ; i < 5; i++) {
4a0b9ca0 4235 adapter->tx_ring[i]->reg_idx =
e8e9f696 4236 ((i + 2) << 4);
4a0b9ca0 4237 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4238 }
4239 for ( ; i < dcb_i; i++) {
4a0b9ca0 4240 adapter->tx_ring[i]->reg_idx =
e8e9f696 4241 ((i + 8) << 3);
4a0b9ca0 4242 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4243 }
4244
4245 ret = true;
4246 } else if (dcb_i == 4) {
4247 /*
4248 * Tx TC0 starts at: descriptor queue 0
4249 * Tx TC1 starts at: descriptor queue 64
4250 * Tx TC2 starts at: descriptor queue 96
4251 * Tx TC3 starts at: descriptor queue 112
4252 *
4253 * Rx TC0-TC3 are offset by 32 queues each
4254 */
4a0b9ca0
PW
4255 adapter->tx_ring[0]->reg_idx = 0;
4256 adapter->tx_ring[1]->reg_idx = 64;
4257 adapter->tx_ring[2]->reg_idx = 96;
4258 adapter->tx_ring[3]->reg_idx = 112;
f92ef202 4259 for (i = 0 ; i < dcb_i; i++)
4a0b9ca0 4260 adapter->rx_ring[i]->reg_idx = i << 5;
f92ef202
PW
4261
4262 ret = true;
4263 } else {
4264 ret = false;
e8e26350 4265 }
bc97114d
PWJ
4266 } else {
4267 ret = false;
021230d4 4268 }
bc97114d
PWJ
4269 } else {
4270 ret = false;
021230d4 4271 }
bc97114d
PWJ
4272
4273 return ret;
4274}
4275#endif
4276
c4cf55e5
PWJ
4277/**
4278 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4279 * @adapter: board private structure to initialize
4280 *
4281 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4282 *
4283 **/
e8e9f696 4284static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
4285{
4286 int i;
4287 bool ret = false;
4288
4289 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4290 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4291 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4292 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4293 adapter->rx_ring[i]->reg_idx = i;
c4cf55e5 4294 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4295 adapter->tx_ring[i]->reg_idx = i;
c4cf55e5
PWJ
4296 ret = true;
4297 }
4298
4299 return ret;
4300}
4301
0331a832
YZ
4302#ifdef IXGBE_FCOE
4303/**
4304 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4305 * @adapter: board private structure to initialize
4306 *
4307 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4308 *
4309 */
4310static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4311{
8de8b2e6 4312 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
0331a832
YZ
4313 bool ret = false;
4314 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4315
4316 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4317#ifdef CONFIG_IXGBE_DCB
4318 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
8de8b2e6
YZ
4319 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4320
0331a832 4321 ixgbe_cache_ring_dcb(adapter);
8de8b2e6 4322 /* find out queues in TC for FCoE */
4a0b9ca0
PW
4323 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4324 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
8de8b2e6
YZ
4325 /*
4326 * In 82599, the number of Tx queues for each traffic
4327 * class for both 8-TC and 4-TC modes are:
4328 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4329 * 8 TCs: 32 32 16 16 8 8 8 8
4330 * 4 TCs: 64 64 32 32
4331 * We have max 8 queues for FCoE, where 8 the is
4332 * FCoE redirection table size. If TC for FCoE is
4333 * less than or equal to TC3, we have enough queues
4334 * to add max of 8 queues for FCoE, so we start FCoE
4335 * tx descriptor from the next one, i.e., reg_idx + 1.
4336 * If TC for FCoE is above TC3, implying 8 TC mode,
4337 * and we need 8 for FCoE, we have to take all queues
4338 * in that traffic class for FCoE.
4339 */
4340 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4341 fcoe_tx_i--;
0331a832
YZ
4342 }
4343#endif /* CONFIG_IXGBE_DCB */
4344 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
8faa2a78
YZ
4345 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4346 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4347 ixgbe_cache_ring_fdir(adapter);
4348 else
4349 ixgbe_cache_ring_rss(adapter);
4350
8de8b2e6
YZ
4351 fcoe_rx_i = f->mask;
4352 fcoe_tx_i = f->mask;
4353 }
4354 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4a0b9ca0
PW
4355 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4356 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
0331a832 4357 }
0331a832
YZ
4358 ret = true;
4359 }
4360 return ret;
4361}
4362
4363#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4364/**
4365 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4366 * @adapter: board private structure to initialize
4367 *
4368 * SR-IOV doesn't use any descriptor rings but changes the default if
4369 * no other mapping is used.
4370 *
4371 */
4372static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4373{
4a0b9ca0
PW
4374 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4375 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
1cdd1ec8
GR
4376 if (adapter->num_vfs)
4377 return true;
4378 else
4379 return false;
4380}
4381
bc97114d
PWJ
4382/**
4383 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4384 * @adapter: board private structure to initialize
4385 *
4386 * Once we know the feature-set enabled for the device, we'll cache
4387 * the register offset the descriptor ring is assigned to.
4388 *
4389 * Note, the order the various feature calls is important. It must start with
4390 * the "most" features enabled at the same time, then trickle down to the
4391 * least amount of features turned on at once.
4392 **/
4393static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4394{
4395 /* start with default case */
4a0b9ca0
PW
4396 adapter->rx_ring[0]->reg_idx = 0;
4397 adapter->tx_ring[0]->reg_idx = 0;
bc97114d 4398
1cdd1ec8
GR
4399 if (ixgbe_cache_ring_sriov(adapter))
4400 return;
4401
0331a832
YZ
4402#ifdef IXGBE_FCOE
4403 if (ixgbe_cache_ring_fcoe(adapter))
4404 return;
4405
4406#endif /* IXGBE_FCOE */
bc97114d
PWJ
4407#ifdef CONFIG_IXGBE_DCB
4408 if (ixgbe_cache_ring_dcb(adapter))
4409 return;
4410
4411#endif
c4cf55e5
PWJ
4412 if (ixgbe_cache_ring_fdir(adapter))
4413 return;
4414
bc97114d
PWJ
4415 if (ixgbe_cache_ring_rss(adapter))
4416 return;
021230d4
AV
4417}
4418
9a799d71
AK
4419/**
4420 * ixgbe_alloc_queues - Allocate memory for all rings
4421 * @adapter: board private structure to initialize
4422 *
4423 * We allocate one ring per queue at run-time since we don't know the
4df10466
JB
4424 * number of queues at compile-time. The polling_netdev array is
4425 * intended for Multiqueue, but should work fine with a single queue.
9a799d71 4426 **/
2f90b865 4427static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
9a799d71
AK
4428{
4429 int i;
b6ec895e 4430 int rx_count;
4a0b9ca0 4431 int orig_node = adapter->node;
9a799d71 4432
021230d4 4433 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0
PW
4434 struct ixgbe_ring *ring = adapter->tx_ring[i];
4435 if (orig_node == -1) {
4436 int cur_node = next_online_node(adapter->node);
4437 if (cur_node == MAX_NUMNODES)
4438 cur_node = first_online_node;
4439 adapter->node = cur_node;
4440 }
4441 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
e8e9f696 4442 adapter->node);
4a0b9ca0
PW
4443 if (!ring)
4444 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4445 if (!ring)
4446 goto err_tx_ring_allocation;
4447 ring->count = adapter->tx_ring_count;
4448 ring->queue_index = i;
b6ec895e 4449 ring->dev = &adapter->pdev->dev;
fc77dc3c 4450 ring->netdev = adapter->netdev;
4a0b9ca0
PW
4451 ring->numa_node = adapter->node;
4452
4453 adapter->tx_ring[i] = ring;
021230d4 4454 }
b9804972 4455
4a0b9ca0
PW
4456 /* Restore the adapter's original node */
4457 adapter->node = orig_node;
4458
b6ec895e 4459 rx_count = adapter->rx_ring_count;
9a799d71 4460 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0
PW
4461 struct ixgbe_ring *ring = adapter->rx_ring[i];
4462 if (orig_node == -1) {
4463 int cur_node = next_online_node(adapter->node);
4464 if (cur_node == MAX_NUMNODES)
4465 cur_node = first_online_node;
4466 adapter->node = cur_node;
4467 }
4468 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
e8e9f696 4469 adapter->node);
4a0b9ca0
PW
4470 if (!ring)
4471 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4472 if (!ring)
4473 goto err_rx_ring_allocation;
b6ec895e 4474 ring->count = rx_count;
4a0b9ca0 4475 ring->queue_index = i;
b6ec895e 4476 ring->dev = &adapter->pdev->dev;
fc77dc3c 4477 ring->netdev = adapter->netdev;
4a0b9ca0
PW
4478 ring->numa_node = adapter->node;
4479
4480 adapter->rx_ring[i] = ring;
021230d4
AV
4481 }
4482
4a0b9ca0
PW
4483 /* Restore the adapter's original node */
4484 adapter->node = orig_node;
4485
021230d4
AV
4486 ixgbe_cache_ring_register(adapter);
4487
4488 return 0;
4489
4490err_rx_ring_allocation:
4a0b9ca0
PW
4491 for (i = 0; i < adapter->num_tx_queues; i++)
4492 kfree(adapter->tx_ring[i]);
021230d4
AV
4493err_tx_ring_allocation:
4494 return -ENOMEM;
4495}
4496
4497/**
4498 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4499 * @adapter: board private structure to initialize
4500 *
4501 * Attempt to configure the interrupts using the best available
4502 * capabilities of the hardware and the kernel.
4503 **/
feea6a57 4504static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4 4505{
8be0e467 4506 struct ixgbe_hw *hw = &adapter->hw;
021230d4
AV
4507 int err = 0;
4508 int vector, v_budget;
4509
4510 /*
4511 * It's easy to be greedy for MSI-X vectors, but it really
4512 * doesn't do us much good if we have a lot more vectors
4513 * than CPU's. So let's be conservative and only ask for
342bde1b 4514 * (roughly) the same number of vectors as there are CPU's.
021230d4
AV
4515 */
4516 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
e8e9f696 4517 (int)num_online_cpus()) + NON_Q_VECTORS;
021230d4
AV
4518
4519 /*
4520 * At the same time, hardware can only support a maximum of
8be0e467
PW
4521 * hw.mac->max_msix_vectors vectors. With features
4522 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4523 * descriptor queues supported by our device. Thus, we cap it off in
4524 * those rare cases where the cpu count also exceeds our vector limit.
021230d4 4525 */
8be0e467 4526 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
021230d4
AV
4527
4528 /* A failure in MSI-X entry allocation isn't fatal, but it does
4529 * mean we disable MSI-X capabilities of the adapter. */
4530 adapter->msix_entries = kcalloc(v_budget,
e8e9f696 4531 sizeof(struct msix_entry), GFP_KERNEL);
7a921c93
AD
4532 if (adapter->msix_entries) {
4533 for (vector = 0; vector < v_budget; vector++)
4534 adapter->msix_entries[vector].entry = vector;
021230d4 4535
7a921c93 4536 ixgbe_acquire_msix_vectors(adapter, v_budget);
021230d4 4537
7a921c93
AD
4538 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4539 goto out;
4540 }
26d27844 4541
7a921c93
AD
4542 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4543 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
c4cf55e5
PWJ
4544 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4545 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4546 adapter->atr_sample_rate = 0;
1cdd1ec8
GR
4547 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4548 ixgbe_disable_sriov(adapter);
4549
847f53ff
BH
4550 err = ixgbe_set_num_queues(adapter);
4551 if (err)
4552 return err;
021230d4 4553
021230d4
AV
4554 err = pci_enable_msi(adapter->pdev);
4555 if (!err) {
4556 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4557 } else {
849c4542
ET
4558 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4559 "Unable to allocate MSI interrupt, "
4560 "falling back to legacy. Error: %d\n", err);
021230d4
AV
4561 /* reset err */
4562 err = 0;
4563 }
4564
4565out:
021230d4
AV
4566 return err;
4567}
4568
7a921c93
AD
4569/**
4570 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4571 * @adapter: board private structure to initialize
4572 *
4573 * We allocate one q_vector per queue interrupt. If allocation fails we
4574 * return -ENOMEM.
4575 **/
4576static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4577{
4578 int q_idx, num_q_vectors;
4579 struct ixgbe_q_vector *q_vector;
4580 int napi_vectors;
4581 int (*poll)(struct napi_struct *, int);
4582
4583 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4584 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4585 napi_vectors = adapter->num_rx_queues;
91281fd3 4586 poll = &ixgbe_clean_rxtx_many;
7a921c93
AD
4587 } else {
4588 num_q_vectors = 1;
4589 napi_vectors = 1;
4590 poll = &ixgbe_poll;
4591 }
4592
4593 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1a6c14a2 4594 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
e8e9f696 4595 GFP_KERNEL, adapter->node);
1a6c14a2
JB
4596 if (!q_vector)
4597 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
e8e9f696 4598 GFP_KERNEL);
7a921c93
AD
4599 if (!q_vector)
4600 goto err_out;
4601 q_vector->adapter = adapter;
f7554a2b
NS
4602 if (q_vector->txr_count && !q_vector->rxr_count)
4603 q_vector->eitr = adapter->tx_eitr_param;
4604 else
4605 q_vector->eitr = adapter->rx_eitr_param;
fe49f04a 4606 q_vector->v_idx = q_idx;
91281fd3 4607 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
7a921c93
AD
4608 adapter->q_vector[q_idx] = q_vector;
4609 }
4610
4611 return 0;
4612
4613err_out:
4614 while (q_idx) {
4615 q_idx--;
4616 q_vector = adapter->q_vector[q_idx];
4617 netif_napi_del(&q_vector->napi);
4618 kfree(q_vector);
4619 adapter->q_vector[q_idx] = NULL;
4620 }
4621 return -ENOMEM;
4622}
4623
4624/**
4625 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4626 * @adapter: board private structure to initialize
4627 *
4628 * This function frees the memory allocated to the q_vectors. In addition if
4629 * NAPI is enabled it will delete any references to the NAPI struct prior
4630 * to freeing the q_vector.
4631 **/
4632static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4633{
4634 int q_idx, num_q_vectors;
7a921c93 4635
91281fd3 4636 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
7a921c93 4637 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 4638 else
7a921c93 4639 num_q_vectors = 1;
7a921c93
AD
4640
4641 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4642 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
7a921c93 4643 adapter->q_vector[q_idx] = NULL;
91281fd3 4644 netif_napi_del(&q_vector->napi);
7a921c93
AD
4645 kfree(q_vector);
4646 }
4647}
4648
7b25cdba 4649static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4
AV
4650{
4651 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4652 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4653 pci_disable_msix(adapter->pdev);
4654 kfree(adapter->msix_entries);
4655 adapter->msix_entries = NULL;
4656 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4657 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4658 pci_disable_msi(adapter->pdev);
4659 }
021230d4
AV
4660}
4661
4662/**
4663 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4664 * @adapter: board private structure to initialize
4665 *
4666 * We determine which interrupt scheme to use based on...
4667 * - Kernel support (MSI, MSI-X)
4668 * - which can be user-defined (via MODULE_PARAM)
4669 * - Hardware queue count (num_*_queues)
4670 * - defined by miscellaneous hardware support/features (RSS, etc.)
4671 **/
2f90b865 4672int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
021230d4
AV
4673{
4674 int err;
4675
4676 /* Number of supported queues */
847f53ff
BH
4677 err = ixgbe_set_num_queues(adapter);
4678 if (err)
4679 return err;
021230d4 4680
021230d4
AV
4681 err = ixgbe_set_interrupt_capability(adapter);
4682 if (err) {
849c4542 4683 e_dev_err("Unable to setup interrupt capabilities\n");
021230d4 4684 goto err_set_interrupt;
9a799d71
AK
4685 }
4686
7a921c93
AD
4687 err = ixgbe_alloc_q_vectors(adapter);
4688 if (err) {
849c4542 4689 e_dev_err("Unable to allocate memory for queue vectors\n");
7a921c93
AD
4690 goto err_alloc_q_vectors;
4691 }
4692
4693 err = ixgbe_alloc_queues(adapter);
4694 if (err) {
849c4542 4695 e_dev_err("Unable to allocate memory for queues\n");
7a921c93
AD
4696 goto err_alloc_queues;
4697 }
4698
849c4542 4699 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
396e799c
ET
4700 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4701 adapter->num_rx_queues, adapter->num_tx_queues);
021230d4
AV
4702
4703 set_bit(__IXGBE_DOWN, &adapter->state);
4704
9a799d71 4705 return 0;
021230d4 4706
7a921c93
AD
4707err_alloc_queues:
4708 ixgbe_free_q_vectors(adapter);
4709err_alloc_q_vectors:
4710 ixgbe_reset_interrupt_capability(adapter);
021230d4 4711err_set_interrupt:
7a921c93
AD
4712 return err;
4713}
4714
1a51502b
ED
4715static void ring_free_rcu(struct rcu_head *head)
4716{
4717 kfree(container_of(head, struct ixgbe_ring, rcu));
4718}
4719
7a921c93
AD
4720/**
4721 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4722 * @adapter: board private structure to clear interrupt scheme on
4723 *
4724 * We go through and clear interrupt specific resources and reset the structure
4725 * to pre-load conditions
4726 **/
4727void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4728{
4a0b9ca0
PW
4729 int i;
4730
4731 for (i = 0; i < adapter->num_tx_queues; i++) {
4732 kfree(adapter->tx_ring[i]);
4733 adapter->tx_ring[i] = NULL;
4734 }
4735 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b
ED
4736 struct ixgbe_ring *ring = adapter->rx_ring[i];
4737
4738 /* ixgbe_get_stats64() might access this ring, we must wait
4739 * a grace period before freeing it.
4740 */
4741 call_rcu(&ring->rcu, ring_free_rcu);
4a0b9ca0
PW
4742 adapter->rx_ring[i] = NULL;
4743 }
7a921c93
AD
4744
4745 ixgbe_free_q_vectors(adapter);
4746 ixgbe_reset_interrupt_capability(adapter);
9a799d71
AK
4747}
4748
c4900be0
DS
4749/**
4750 * ixgbe_sfp_timer - worker thread to find a missing module
4751 * @data: pointer to our adapter struct
4752 **/
4753static void ixgbe_sfp_timer(unsigned long data)
4754{
4755 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4756
4df10466
JB
4757 /*
4758 * Do the sfp_timer outside of interrupt context due to the
c4900be0
DS
4759 * delays that sfp+ detection requires
4760 */
4761 schedule_work(&adapter->sfp_task);
4762}
4763
4764/**
4765 * ixgbe_sfp_task - worker thread to find a missing module
4766 * @work: pointer to work_struct containing our data
4767 **/
4768static void ixgbe_sfp_task(struct work_struct *work)
4769{
4770 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
4771 struct ixgbe_adapter,
4772 sfp_task);
c4900be0
DS
4773 struct ixgbe_hw *hw = &adapter->hw;
4774
4775 if ((hw->phy.type == ixgbe_phy_nl) &&
4776 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4777 s32 ret = hw->phy.ops.identify_sfp(hw);
63d6e1d8 4778 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
c4900be0
DS
4779 goto reschedule;
4780 ret = hw->phy.ops.reset(hw);
4781 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
4782 e_dev_err("failed to initialize because an unsupported "
4783 "SFP+ module type was detected.\n");
4784 e_dev_err("Reload the driver after installing a "
4785 "supported module.\n");
c4900be0
DS
4786 unregister_netdev(adapter->netdev);
4787 } else {
396e799c 4788 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
c4900be0
DS
4789 }
4790 /* don't need this routine any more */
4791 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4792 }
4793 return;
4794reschedule:
4795 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4796 mod_timer(&adapter->sfp_timer,
e8e9f696 4797 round_jiffies(jiffies + (2 * HZ)));
c4900be0
DS
4798}
4799
9a799d71
AK
4800/**
4801 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4802 * @adapter: board private structure to initialize
4803 *
4804 * ixgbe_sw_init initializes the Adapter private data structure.
4805 * Fields are initialized based on PCI device information and
4806 * OS network device settings (MTU size).
4807 **/
4808static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4809{
4810 struct ixgbe_hw *hw = &adapter->hw;
4811 struct pci_dev *pdev = adapter->pdev;
9a713e7c 4812 struct net_device *dev = adapter->netdev;
021230d4 4813 unsigned int rss;
7a6b6f51 4814#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
4815 int j;
4816 struct tc_configuration *tc;
4817#endif
16b61beb 4818 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
021230d4 4819
c44ade9e
JB
4820 /* PCI config space info */
4821
4822 hw->vendor_id = pdev->vendor;
4823 hw->device_id = pdev->device;
4824 hw->revision_id = pdev->revision;
4825 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4826 hw->subsystem_device_id = pdev->subsystem_device;
4827
021230d4
AV
4828 /* Set capability flags */
4829 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4830 adapter->ring_feature[RING_F_RSS].indices = rss;
4831 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2f90b865 4832 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
bf069c97
DS
4833 if (hw->mac.type == ixgbe_mac_82598EB) {
4834 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4835 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
e8e26350 4836 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
bf069c97 4837 } else if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 4838 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
0c19d6af
PWJ
4839 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4840 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
119fc60a
MC
4841 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4842 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
9a713e7c
PW
4843 if (dev->features & NETIF_F_NTUPLE) {
4844 /* Flow Director perfect filter enabled */
4845 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4846 adapter->atr_sample_rate = 0;
4847 spin_lock_init(&adapter->fdir_perfect_lock);
4848 } else {
4849 /* Flow Director hash filters enabled */
4850 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4851 adapter->atr_sample_rate = 20;
4852 }
c4cf55e5 4853 adapter->ring_feature[RING_F_FDIR].indices =
e8e9f696 4854 IXGBE_MAX_FDIR_INDICES;
c4cf55e5 4855 adapter->fdir_pballoc = 0;
eacd73f7 4856#ifdef IXGBE_FCOE
0d551589
YZ
4857 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4858 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4859 adapter->ring_feature[RING_F_FCOE].indices = 0;
61a0f421 4860#ifdef CONFIG_IXGBE_DCB
6ee16520
YZ
4861 /* Default traffic class to use for FCoE */
4862 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
56075a98 4863 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
61a0f421 4864#endif
eacd73f7 4865#endif /* IXGBE_FCOE */
f8212f97 4866 }
2f90b865 4867
7a6b6f51 4868#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
4869 /* Configure DCB traffic classes */
4870 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4871 tc = &adapter->dcb_cfg.tc_config[j];
4872 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4873 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4874 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4875 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4876 tc->dcb_pfc = pfc_disabled;
4877 }
4878 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4879 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4880 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
264857b8 4881 adapter->dcb_cfg.pfc_mode_enable = false;
2f90b865
AD
4882 adapter->dcb_cfg.round_robin_enable = false;
4883 adapter->dcb_set_bitmap = 0x00;
4884 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
e8e9f696 4885 adapter->ring_feature[RING_F_DCB].indices);
2f90b865
AD
4886
4887#endif
9a799d71
AK
4888
4889 /* default flow control settings */
cd7664f6 4890 hw->fc.requested_mode = ixgbe_fc_full;
71fd570b 4891 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
264857b8
PWJ
4892#ifdef CONFIG_DCB
4893 adapter->last_lfc_mode = hw->fc.current_mode;
4894#endif
16b61beb
JF
4895 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4896 hw->fc.low_water = FC_LOW_WATER(max_frame);
2b9ade93
JB
4897 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4898 hw->fc.send_xon = true;
71fd570b 4899 hw->fc.disable_fc_autoneg = false;
9a799d71 4900
30efa5a3 4901 /* enable itr by default in dynamic mode */
f7554a2b
NS
4902 adapter->rx_itr_setting = 1;
4903 adapter->rx_eitr_param = 20000;
4904 adapter->tx_itr_setting = 1;
4905 adapter->tx_eitr_param = 10000;
30efa5a3
JB
4906
4907 /* set defaults for eitr in MegaBytes */
4908 adapter->eitr_low = 10;
4909 adapter->eitr_high = 20;
4910
4911 /* set default ring sizes */
4912 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4913 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4914
9a799d71 4915 /* initialize eeprom parameters */
c44ade9e 4916 if (ixgbe_init_eeprom_params_generic(hw)) {
849c4542 4917 e_dev_err("EEPROM initialization failed\n");
9a799d71
AK
4918 return -EIO;
4919 }
4920
021230d4 4921 /* enable rx csum by default */
9a799d71
AK
4922 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4923
1a6c14a2
JB
4924 /* get assigned NUMA node */
4925 adapter->node = dev_to_node(&pdev->dev);
4926
9a799d71
AK
4927 set_bit(__IXGBE_DOWN, &adapter->state);
4928
4929 return 0;
4930}
4931
4932/**
4933 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3a581073 4934 * @tx_ring: tx descriptor ring (for a specific queue) to setup
9a799d71
AK
4935 *
4936 * Return 0 on success, negative on failure
4937 **/
b6ec895e 4938int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 4939{
b6ec895e 4940 struct device *dev = tx_ring->dev;
9a799d71
AK
4941 int size;
4942
3a581073 4943 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4a0b9ca0 4944 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
1a6c14a2
JB
4945 if (!tx_ring->tx_buffer_info)
4946 tx_ring->tx_buffer_info = vmalloc(size);
e01c31a5
JB
4947 if (!tx_ring->tx_buffer_info)
4948 goto err;
3a581073 4949 memset(tx_ring->tx_buffer_info, 0, size);
9a799d71
AK
4950
4951 /* round up to nearest 4K */
12207e49 4952 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3a581073 4953 tx_ring->size = ALIGN(tx_ring->size, 4096);
9a799d71 4954
b6ec895e 4955 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1b507730 4956 &tx_ring->dma, GFP_KERNEL);
e01c31a5
JB
4957 if (!tx_ring->desc)
4958 goto err;
9a799d71 4959
3a581073
JB
4960 tx_ring->next_to_use = 0;
4961 tx_ring->next_to_clean = 0;
4962 tx_ring->work_limit = tx_ring->count;
9a799d71 4963 return 0;
e01c31a5
JB
4964
4965err:
4966 vfree(tx_ring->tx_buffer_info);
4967 tx_ring->tx_buffer_info = NULL;
b6ec895e 4968 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
e01c31a5 4969 return -ENOMEM;
9a799d71
AK
4970}
4971
69888674
AD
4972/**
4973 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4974 * @adapter: board private structure
4975 *
4976 * If this function returns with an error, then it's possible one or
4977 * more of the rings is populated (while the rest are not). It is the
4978 * callers duty to clean those orphaned rings.
4979 *
4980 * Return 0 on success, negative on failure
4981 **/
4982static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4983{
4984 int i, err = 0;
4985
4986 for (i = 0; i < adapter->num_tx_queues; i++) {
b6ec895e 4987 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
69888674
AD
4988 if (!err)
4989 continue;
396e799c 4990 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
69888674
AD
4991 break;
4992 }
4993
4994 return err;
4995}
4996
9a799d71
AK
4997/**
4998 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3a581073 4999 * @rx_ring: rx descriptor ring (for a specific queue) to setup
9a799d71
AK
5000 *
5001 * Returns 0 on success, negative on failure
5002 **/
b6ec895e 5003int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5004{
b6ec895e 5005 struct device *dev = rx_ring->dev;
021230d4 5006 int size;
9a799d71 5007
3a581073 5008 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
b6ec895e 5009 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
1a6c14a2
JB
5010 if (!rx_ring->rx_buffer_info)
5011 rx_ring->rx_buffer_info = vmalloc(size);
b6ec895e
AD
5012 if (!rx_ring->rx_buffer_info)
5013 goto err;
3a581073 5014 memset(rx_ring->rx_buffer_info, 0, size);
9a799d71 5015
9a799d71 5016 /* Round up to nearest 4K */
3a581073
JB
5017 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5018 rx_ring->size = ALIGN(rx_ring->size, 4096);
9a799d71 5019
b6ec895e 5020 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1b507730 5021 &rx_ring->dma, GFP_KERNEL);
9a799d71 5022
b6ec895e
AD
5023 if (!rx_ring->desc)
5024 goto err;
9a799d71 5025
3a581073
JB
5026 rx_ring->next_to_clean = 0;
5027 rx_ring->next_to_use = 0;
9a799d71
AK
5028
5029 return 0;
b6ec895e
AD
5030err:
5031 vfree(rx_ring->rx_buffer_info);
5032 rx_ring->rx_buffer_info = NULL;
5033 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
177db6ff 5034 return -ENOMEM;
9a799d71
AK
5035}
5036
69888674
AD
5037/**
5038 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5039 * @adapter: board private structure
5040 *
5041 * If this function returns with an error, then it's possible one or
5042 * more of the rings is populated (while the rest are not). It is the
5043 * callers duty to clean those orphaned rings.
5044 *
5045 * Return 0 on success, negative on failure
5046 **/
69888674
AD
5047static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5048{
5049 int i, err = 0;
5050
5051 for (i = 0; i < adapter->num_rx_queues; i++) {
b6ec895e 5052 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
69888674
AD
5053 if (!err)
5054 continue;
396e799c 5055 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
69888674
AD
5056 break;
5057 }
5058
5059 return err;
5060}
5061
9a799d71
AK
5062/**
5063 * ixgbe_free_tx_resources - Free Tx Resources per Queue
9a799d71
AK
5064 * @tx_ring: Tx descriptor ring for a specific queue
5065 *
5066 * Free all transmit software resources
5067 **/
b6ec895e 5068void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 5069{
b6ec895e 5070 ixgbe_clean_tx_ring(tx_ring);
9a799d71
AK
5071
5072 vfree(tx_ring->tx_buffer_info);
5073 tx_ring->tx_buffer_info = NULL;
5074
b6ec895e
AD
5075 /* if not set, then don't free */
5076 if (!tx_ring->desc)
5077 return;
5078
5079 dma_free_coherent(tx_ring->dev, tx_ring->size,
5080 tx_ring->desc, tx_ring->dma);
9a799d71
AK
5081
5082 tx_ring->desc = NULL;
5083}
5084
5085/**
5086 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5087 * @adapter: board private structure
5088 *
5089 * Free all transmit software resources
5090 **/
5091static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5092{
5093 int i;
5094
5095 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 5096 if (adapter->tx_ring[i]->desc)
b6ec895e 5097 ixgbe_free_tx_resources(adapter->tx_ring[i]);
9a799d71
AK
5098}
5099
5100/**
b4617240 5101 * ixgbe_free_rx_resources - Free Rx Resources
9a799d71
AK
5102 * @rx_ring: ring to clean the resources from
5103 *
5104 * Free all receive software resources
5105 **/
b6ec895e 5106void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5107{
b6ec895e 5108 ixgbe_clean_rx_ring(rx_ring);
9a799d71
AK
5109
5110 vfree(rx_ring->rx_buffer_info);
5111 rx_ring->rx_buffer_info = NULL;
5112
b6ec895e
AD
5113 /* if not set, then don't free */
5114 if (!rx_ring->desc)
5115 return;
5116
5117 dma_free_coherent(rx_ring->dev, rx_ring->size,
5118 rx_ring->desc, rx_ring->dma);
9a799d71
AK
5119
5120 rx_ring->desc = NULL;
5121}
5122
5123/**
5124 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5125 * @adapter: board private structure
5126 *
5127 * Free all receive software resources
5128 **/
5129static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5130{
5131 int i;
5132
5133 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 5134 if (adapter->rx_ring[i]->desc)
b6ec895e 5135 ixgbe_free_rx_resources(adapter->rx_ring[i]);
9a799d71
AK
5136}
5137
9a799d71
AK
5138/**
5139 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5140 * @netdev: network interface device structure
5141 * @new_mtu: new value for maximum frame size
5142 *
5143 * Returns 0 on success, negative on failure
5144 **/
5145static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5146{
5147 struct ixgbe_adapter *adapter = netdev_priv(netdev);
16b61beb 5148 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
5149 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5150
42c783c5
JB
5151 /* MTU < 68 is an error and causes problems on some kernels */
5152 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
9a799d71
AK
5153 return -EINVAL;
5154
396e799c 5155 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
021230d4 5156 /* must set new MTU before calling down or up */
9a799d71
AK
5157 netdev->mtu = new_mtu;
5158
16b61beb
JF
5159 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5160 hw->fc.low_water = FC_LOW_WATER(max_frame);
5161
d4f80882
AV
5162 if (netif_running(netdev))
5163 ixgbe_reinit_locked(adapter);
9a799d71
AK
5164
5165 return 0;
5166}
5167
5168/**
5169 * ixgbe_open - Called when a network interface is made active
5170 * @netdev: network interface device structure
5171 *
5172 * Returns 0 on success, negative value on failure
5173 *
5174 * The open entry point is called when a network interface is made
5175 * active by the system (IFF_UP). At this point all resources needed
5176 * for transmit and receive operations are allocated, the interrupt
5177 * handler is registered with the OS, the watchdog timer is started,
5178 * and the stack is notified that the interface is ready.
5179 **/
5180static int ixgbe_open(struct net_device *netdev)
5181{
5182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5183 int err;
4bebfaa5
AK
5184
5185 /* disallow open during test */
5186 if (test_bit(__IXGBE_TESTING, &adapter->state))
5187 return -EBUSY;
9a799d71 5188
54386467
JB
5189 netif_carrier_off(netdev);
5190
9a799d71
AK
5191 /* allocate transmit descriptors */
5192 err = ixgbe_setup_all_tx_resources(adapter);
5193 if (err)
5194 goto err_setup_tx;
5195
9a799d71
AK
5196 /* allocate receive descriptors */
5197 err = ixgbe_setup_all_rx_resources(adapter);
5198 if (err)
5199 goto err_setup_rx;
5200
5201 ixgbe_configure(adapter);
5202
021230d4 5203 err = ixgbe_request_irq(adapter);
9a799d71
AK
5204 if (err)
5205 goto err_req_irq;
5206
9a799d71
AK
5207 err = ixgbe_up_complete(adapter);
5208 if (err)
5209 goto err_up;
5210
d55b53ff
JK
5211 netif_tx_start_all_queues(netdev);
5212
9a799d71
AK
5213 return 0;
5214
5215err_up:
5eba3699 5216 ixgbe_release_hw_control(adapter);
9a799d71
AK
5217 ixgbe_free_irq(adapter);
5218err_req_irq:
9a799d71 5219err_setup_rx:
a20a1199 5220 ixgbe_free_all_rx_resources(adapter);
9a799d71 5221err_setup_tx:
a20a1199 5222 ixgbe_free_all_tx_resources(adapter);
9a799d71
AK
5223 ixgbe_reset(adapter);
5224
5225 return err;
5226}
5227
5228/**
5229 * ixgbe_close - Disables a network interface
5230 * @netdev: network interface device structure
5231 *
5232 * Returns 0, this is not allowed to fail
5233 *
5234 * The close entry point is called when an interface is de-activated
5235 * by the OS. The hardware is still under the drivers control, but
5236 * needs to be disabled. A global MAC reset is issued to stop the
5237 * hardware, and all transmit and receive resources are freed.
5238 **/
5239static int ixgbe_close(struct net_device *netdev)
5240{
5241 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5242
5243 ixgbe_down(adapter);
5244 ixgbe_free_irq(adapter);
5245
5246 ixgbe_free_all_tx_resources(adapter);
5247 ixgbe_free_all_rx_resources(adapter);
5248
5eba3699 5249 ixgbe_release_hw_control(adapter);
9a799d71
AK
5250
5251 return 0;
5252}
5253
b3c8b4ba
AD
5254#ifdef CONFIG_PM
5255static int ixgbe_resume(struct pci_dev *pdev)
5256{
c60fbb00
AD
5257 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5258 struct net_device *netdev = adapter->netdev;
b3c8b4ba
AD
5259 u32 err;
5260
5261 pci_set_power_state(pdev, PCI_D0);
5262 pci_restore_state(pdev);
656ab817
DS
5263 /*
5264 * pci_restore_state clears dev->state_saved so call
5265 * pci_save_state to restore it.
5266 */
5267 pci_save_state(pdev);
9ce77666 5268
5269 err = pci_enable_device_mem(pdev);
b3c8b4ba 5270 if (err) {
849c4542 5271 e_dev_err("Cannot enable PCI device from suspend\n");
b3c8b4ba
AD
5272 return err;
5273 }
5274 pci_set_master(pdev);
5275
dd4d8ca6 5276 pci_wake_from_d3(pdev, false);
b3c8b4ba
AD
5277
5278 err = ixgbe_init_interrupt_scheme(adapter);
5279 if (err) {
849c4542 5280 e_dev_err("Cannot initialize interrupts for device\n");
b3c8b4ba
AD
5281 return err;
5282 }
5283
b3c8b4ba
AD
5284 ixgbe_reset(adapter);
5285
495dce12
WJP
5286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5287
b3c8b4ba 5288 if (netif_running(netdev)) {
c60fbb00 5289 err = ixgbe_open(netdev);
b3c8b4ba
AD
5290 if (err)
5291 return err;
5292 }
5293
5294 netif_device_attach(netdev);
5295
5296 return 0;
5297}
b3c8b4ba 5298#endif /* CONFIG_PM */
9d8d05ae
RW
5299
5300static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
b3c8b4ba 5301{
c60fbb00
AD
5302 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5303 struct net_device *netdev = adapter->netdev;
e8e26350
PW
5304 struct ixgbe_hw *hw = &adapter->hw;
5305 u32 ctrl, fctrl;
5306 u32 wufc = adapter->wol;
b3c8b4ba
AD
5307#ifdef CONFIG_PM
5308 int retval = 0;
5309#endif
5310
5311 netif_device_detach(netdev);
5312
5313 if (netif_running(netdev)) {
5314 ixgbe_down(adapter);
5315 ixgbe_free_irq(adapter);
5316 ixgbe_free_all_tx_resources(adapter);
5317 ixgbe_free_all_rx_resources(adapter);
5318 }
b3c8b4ba 5319
5f5ae6fc
AD
5320 ixgbe_clear_interrupt_scheme(adapter);
5321
b3c8b4ba
AD
5322#ifdef CONFIG_PM
5323 retval = pci_save_state(pdev);
5324 if (retval)
5325 return retval;
4df10466 5326
b3c8b4ba 5327#endif
e8e26350
PW
5328 if (wufc) {
5329 ixgbe_set_rx_mode(netdev);
b3c8b4ba 5330
e8e26350
PW
5331 /* turn on all-multi mode if wake on multicast is enabled */
5332 if (wufc & IXGBE_WUFC_MC) {
5333 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5334 fctrl |= IXGBE_FCTRL_MPE;
5335 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5336 }
5337
5338 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5339 ctrl |= IXGBE_CTRL_GIO_DIS;
5340 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5341
5342 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5343 } else {
5344 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5345 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5346 }
5347
dd4d8ca6
DS
5348 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
5349 pci_wake_from_d3(pdev, true);
5350 else
5351 pci_wake_from_d3(pdev, false);
b3c8b4ba 5352
9d8d05ae
RW
5353 *enable_wake = !!wufc;
5354
b3c8b4ba
AD
5355 ixgbe_release_hw_control(adapter);
5356
5357 pci_disable_device(pdev);
5358
9d8d05ae
RW
5359 return 0;
5360}
5361
5362#ifdef CONFIG_PM
5363static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5364{
5365 int retval;
5366 bool wake;
5367
5368 retval = __ixgbe_shutdown(pdev, &wake);
5369 if (retval)
5370 return retval;
5371
5372 if (wake) {
5373 pci_prepare_to_sleep(pdev);
5374 } else {
5375 pci_wake_from_d3(pdev, false);
5376 pci_set_power_state(pdev, PCI_D3hot);
5377 }
b3c8b4ba
AD
5378
5379 return 0;
5380}
9d8d05ae 5381#endif /* CONFIG_PM */
b3c8b4ba
AD
5382
5383static void ixgbe_shutdown(struct pci_dev *pdev)
5384{
9d8d05ae
RW
5385 bool wake;
5386
5387 __ixgbe_shutdown(pdev, &wake);
5388
5389 if (system_state == SYSTEM_POWER_OFF) {
5390 pci_wake_from_d3(pdev, wake);
5391 pci_set_power_state(pdev, PCI_D3hot);
5392 }
b3c8b4ba
AD
5393}
5394
9a799d71
AK
5395/**
5396 * ixgbe_update_stats - Update the board statistics counters.
5397 * @adapter: board private structure
5398 **/
5399void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5400{
2d86f139 5401 struct net_device *netdev = adapter->netdev;
9a799d71 5402 struct ixgbe_hw *hw = &adapter->hw;
5b7da515 5403 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6f11eef7
AV
5404 u64 total_mpc = 0;
5405 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5b7da515
AD
5406 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5407 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5408 u64 bytes = 0, packets = 0;
9a799d71 5409
d08935c2
DS
5410 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5411 test_bit(__IXGBE_RESETTING, &adapter->state))
5412 return;
5413
94b982b2 5414 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
f8212f97 5415 u64 rsc_count = 0;
94b982b2 5416 u64 rsc_flush = 0;
d51019a4
PW
5417 for (i = 0; i < 16; i++)
5418 adapter->hw_rx_no_dma_resources +=
7ca647bd 5419 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
94b982b2 5420 for (i = 0; i < adapter->num_rx_queues; i++) {
5b7da515
AD
5421 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5422 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
94b982b2
MC
5423 }
5424 adapter->rsc_total_count = rsc_count;
5425 adapter->rsc_total_flush = rsc_flush;
d51019a4
PW
5426 }
5427
5b7da515
AD
5428 for (i = 0; i < adapter->num_rx_queues; i++) {
5429 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5430 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5431 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5432 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5433 bytes += rx_ring->stats.bytes;
5434 packets += rx_ring->stats.packets;
5435 }
5436 adapter->non_eop_descs = non_eop_descs;
5437 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5438 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5439 netdev->stats.rx_bytes = bytes;
5440 netdev->stats.rx_packets = packets;
5441
5442 bytes = 0;
5443 packets = 0;
7ca3bc58 5444 /* gather some stats to the adapter struct that are per queue */
5b7da515
AD
5445 for (i = 0; i < adapter->num_tx_queues; i++) {
5446 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5447 restart_queue += tx_ring->tx_stats.restart_queue;
5448 tx_busy += tx_ring->tx_stats.tx_busy;
5449 bytes += tx_ring->stats.bytes;
5450 packets += tx_ring->stats.packets;
5451 }
eb985f09 5452 adapter->restart_queue = restart_queue;
5b7da515
AD
5453 adapter->tx_busy = tx_busy;
5454 netdev->stats.tx_bytes = bytes;
5455 netdev->stats.tx_packets = packets;
7ca3bc58 5456
7ca647bd 5457 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6f11eef7
AV
5458 for (i = 0; i < 8; i++) {
5459 /* for packet buffers not used, the register should read 0 */
5460 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5461 missed_rx += mpc;
7ca647bd
JP
5462 hwstats->mpc[i] += mpc;
5463 total_mpc += hwstats->mpc[i];
e8e26350 5464 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5465 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5466 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5467 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5468 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5469 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
e8e26350 5470 if (hw->mac.type == ixgbe_mac_82599EB) {
7ca647bd
JP
5471 hwstats->pxonrxc[i] +=
5472 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5473 hwstats->pxoffrxc[i] +=
5474 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5475 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
e8e26350 5476 } else {
7ca647bd
JP
5477 hwstats->pxonrxc[i] +=
5478 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5479 hwstats->pxoffrxc[i] +=
5480 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
e8e26350 5481 }
7ca647bd
JP
5482 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5483 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6f11eef7 5484 }
7ca647bd 5485 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6f11eef7 5486 /* work around hardware counting issue */
7ca647bd 5487 hwstats->gprc -= missed_rx;
6f11eef7
AV
5488
5489 /* 82598 hardware only has a 32 bit counter in the high register */
e8e26350 5490 if (hw->mac.type == ixgbe_mac_82599EB) {
aad71918 5491 u64 tmp;
7ca647bd 5492 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
e8e9f696
JP
5493 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5494 /* 4 high bits of GORC */
7ca647bd
JP
5495 hwstats->gorc += (tmp << 32);
5496 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
e8e9f696
JP
5497 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5498 /* 4 high bits of GOTC */
7ca647bd
JP
5499 hwstats->gotc += (tmp << 32);
5500 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
e8e9f696 5501 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
7ca647bd
JP
5502 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5503 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5504 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5505 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6d45522c 5506#ifdef IXGBE_FCOE
7ca647bd
JP
5507 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5508 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5509 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5510 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5511 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5512 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6d45522c 5513#endif /* IXGBE_FCOE */
e8e26350 5514 } else {
7ca647bd
JP
5515 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5516 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5517 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5518 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5519 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
e8e26350 5520 }
9a799d71 5521 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7ca647bd
JP
5522 hwstats->bprc += bprc;
5523 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
e8e26350 5524 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5525 hwstats->mprc -= bprc;
5526 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5527 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5528 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5529 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5530 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5531 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5532 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5533 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6f11eef7 5534 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7ca647bd 5535 hwstats->lxontxc += lxon;
6f11eef7 5536 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7ca647bd
JP
5537 hwstats->lxofftxc += lxoff;
5538 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5539 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5540 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6f11eef7
AV
5541 /*
5542 * 82598 errata - tx of flow control packets is included in tx counters
5543 */
5544 xon_off_tot = lxon + lxoff;
7ca647bd
JP
5545 hwstats->gptc -= xon_off_tot;
5546 hwstats->mptc -= xon_off_tot;
5547 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5548 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5549 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5550 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5551 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5552 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5553 hwstats->ptc64 -= xon_off_tot;
5554 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5555 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5556 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5557 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5558 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5559 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
9a799d71
AK
5560
5561 /* Fill out the OS statistics structure */
7ca647bd 5562 netdev->stats.multicast = hwstats->mprc;
9a799d71
AK
5563
5564 /* Rx Errors */
7ca647bd 5565 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
2d86f139 5566 netdev->stats.rx_dropped = 0;
7ca647bd
JP
5567 netdev->stats.rx_length_errors = hwstats->rlec;
5568 netdev->stats.rx_crc_errors = hwstats->crcerrs;
2d86f139 5569 netdev->stats.rx_missed_errors = total_mpc;
9a799d71
AK
5570}
5571
5572/**
5573 * ixgbe_watchdog - Timer Call-back
5574 * @data: pointer to adapter cast into an unsigned long
5575 **/
5576static void ixgbe_watchdog(unsigned long data)
5577{
5578 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
cf8280ee 5579 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
5580 u64 eics = 0;
5581 int i;
cf8280ee 5582
fe49f04a
AD
5583 /*
5584 * Do the watchdog outside of interrupt context due to the lovely
5585 * delays that some of the newer hardware requires
5586 */
22d5a71b 5587
fe49f04a
AD
5588 if (test_bit(__IXGBE_DOWN, &adapter->state))
5589 goto watchdog_short_circuit;
22d5a71b 5590
fe49f04a
AD
5591 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5592 /*
5593 * for legacy and MSI interrupts don't set any bits
5594 * that are enabled for EIAM, because this operation
5595 * would set *both* EIMS and EICS for any bit in EIAM
5596 */
5597 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5598 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5599 goto watchdog_reschedule;
5600 }
5601
5602 /* get one bit for every active tx/rx interrupt vector */
5603 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5604 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5605 if (qv->rxr_count || qv->txr_count)
5606 eics |= ((u64)1 << i);
cf8280ee 5607 }
9a799d71 5608
fe49f04a
AD
5609 /* Cause software interrupt to ensure rx rings are cleaned */
5610 ixgbe_irq_rearm_queues(adapter, eics);
5611
5612watchdog_reschedule:
5613 /* Reset the timer */
5614 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5615
5616watchdog_short_circuit:
cf8280ee
JB
5617 schedule_work(&adapter->watchdog_task);
5618}
5619
e8e26350
PW
5620/**
5621 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5622 * @work: pointer to work_struct containing our data
5623 **/
5624static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5625{
5626 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5627 struct ixgbe_adapter,
5628 multispeed_fiber_task);
e8e26350
PW
5629 struct ixgbe_hw *hw = &adapter->hw;
5630 u32 autoneg;
8620a103 5631 bool negotiation;
e8e26350
PW
5632
5633 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
a1f25324
MC
5634 autoneg = hw->phy.autoneg_advertised;
5635 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
8620a103 5636 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
1097cd17 5637 hw->mac.autotry_restart = false;
8620a103
MC
5638 if (hw->mac.ops.setup_link)
5639 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
e8e26350
PW
5640 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5641 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5642}
5643
5644/**
5645 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5646 * @work: pointer to work_struct containing our data
5647 **/
5648static void ixgbe_sfp_config_module_task(struct work_struct *work)
5649{
5650 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5651 struct ixgbe_adapter,
5652 sfp_config_module_task);
e8e26350
PW
5653 struct ixgbe_hw *hw = &adapter->hw;
5654 u32 err;
5655
5656 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
63d6e1d8
DS
5657
5658 /* Time for electrical oscillations to settle down */
5659 msleep(100);
e8e26350 5660 err = hw->phy.ops.identify_sfp(hw);
63d6e1d8 5661
e8e26350 5662 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
5663 e_dev_err("failed to initialize because an unsupported SFP+ "
5664 "module type was detected.\n");
5665 e_dev_err("Reload the driver after installing a supported "
5666 "module.\n");
63d6e1d8 5667 unregister_netdev(adapter->netdev);
e8e26350
PW
5668 return;
5669 }
5670 hw->mac.ops.setup_sfp(hw);
5671
8d1c3c07 5672 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
e8e26350
PW
5673 /* This will also work for DA Twinax connections */
5674 schedule_work(&adapter->multispeed_fiber_task);
5675 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5676}
5677
c4cf55e5
PWJ
5678/**
5679 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5680 * @work: pointer to work_struct containing our data
5681 **/
5682static void ixgbe_fdir_reinit_task(struct work_struct *work)
5683{
5684 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5685 struct ixgbe_adapter,
5686 fdir_reinit_task);
c4cf55e5
PWJ
5687 struct ixgbe_hw *hw = &adapter->hw;
5688 int i;
5689
5690 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5691 for (i = 0; i < adapter->num_tx_queues; i++)
5692 set_bit(__IXGBE_FDIR_INIT_DONE,
e8e9f696 5693 &(adapter->tx_ring[i]->reinit_state));
c4cf55e5 5694 } else {
396e799c 5695 e_err(probe, "failed to finish FDIR re-initialization, "
849c4542 5696 "ignored adding FDIR ATR filters\n");
c4cf55e5
PWJ
5697 }
5698 /* Done FDIR Re-initialization, enable transmits */
5699 netif_tx_start_all_queues(adapter->netdev);
5700}
5701
10eec955
JF
5702static DEFINE_MUTEX(ixgbe_watchdog_lock);
5703
cf8280ee 5704/**
69888674
AD
5705 * ixgbe_watchdog_task - worker thread to bring link up
5706 * @work: pointer to work_struct containing our data
cf8280ee
JB
5707 **/
5708static void ixgbe_watchdog_task(struct work_struct *work)
5709{
5710 struct ixgbe_adapter *adapter = container_of(work,
e8e9f696
JP
5711 struct ixgbe_adapter,
5712 watchdog_task);
cf8280ee
JB
5713 struct net_device *netdev = adapter->netdev;
5714 struct ixgbe_hw *hw = &adapter->hw;
10eec955
JF
5715 u32 link_speed;
5716 bool link_up;
bc59fcda
NS
5717 int i;
5718 struct ixgbe_ring *tx_ring;
5719 int some_tx_pending = 0;
cf8280ee 5720
10eec955
JF
5721 mutex_lock(&ixgbe_watchdog_lock);
5722
5723 link_up = adapter->link_up;
5724 link_speed = adapter->link_speed;
cf8280ee
JB
5725
5726 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5727 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
264857b8
PWJ
5728 if (link_up) {
5729#ifdef CONFIG_DCB
5730 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5731 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
620fa036 5732 hw->mac.ops.fc_enable(hw, i);
264857b8 5733 } else {
620fa036 5734 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5735 }
5736#else
620fa036 5737 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5738#endif
5739 }
5740
cf8280ee
JB
5741 if (link_up ||
5742 time_after(jiffies, (adapter->link_check_timeout +
e8e9f696 5743 IXGBE_TRY_LINK_TIMEOUT))) {
cf8280ee 5744 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
264857b8 5745 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
cf8280ee
JB
5746 }
5747 adapter->link_up = link_up;
5748 adapter->link_speed = link_speed;
5749 }
9a799d71
AK
5750
5751 if (link_up) {
5752 if (!netif_carrier_ok(netdev)) {
e8e26350
PW
5753 bool flow_rx, flow_tx;
5754
5755 if (hw->mac.type == ixgbe_mac_82599EB) {
5756 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5757 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
078788b6
PWJ
5758 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5759 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
e8e26350
PW
5760 } else {
5761 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5762 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
078788b6
PWJ
5763 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5764 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
e8e26350
PW
5765 }
5766
396e799c 5767 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
a46e534b 5768 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
849c4542
ET
5769 "10 Gbps" :
5770 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5771 "1 Gbps" : "unknown speed")),
e8e26350 5772 ((flow_rx && flow_tx) ? "RX/TX" :
849c4542
ET
5773 (flow_rx ? "RX" :
5774 (flow_tx ? "TX" : "None"))));
9a799d71
AK
5775
5776 netif_carrier_on(netdev);
9a799d71
AK
5777 } else {
5778 /* Force detection of hung controller */
5779 adapter->detect_tx_hung = true;
5780 }
5781 } else {
cf8280ee
JB
5782 adapter->link_up = false;
5783 adapter->link_speed = 0;
9a799d71 5784 if (netif_carrier_ok(netdev)) {
396e799c 5785 e_info(drv, "NIC Link is Down\n");
9a799d71 5786 netif_carrier_off(netdev);
9a799d71
AK
5787 }
5788 }
5789
bc59fcda
NS
5790 if (!netif_carrier_ok(netdev)) {
5791 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 5792 tx_ring = adapter->tx_ring[i];
bc59fcda
NS
5793 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5794 some_tx_pending = 1;
5795 break;
5796 }
5797 }
5798
5799 if (some_tx_pending) {
5800 /* We've lost link, so the controller stops DMA,
5801 * but we've got queued Tx work that's never going
5802 * to get done, so reset controller to flush Tx.
5803 * (Do the reset outside of interrupt context).
5804 */
5805 schedule_work(&adapter->reset_task);
5806 }
5807 }
5808
9a799d71 5809 ixgbe_update_stats(adapter);
10eec955 5810 mutex_unlock(&ixgbe_watchdog_lock);
9a799d71
AK
5811}
5812
9a799d71 5813static int ixgbe_tso(struct ixgbe_adapter *adapter,
e8e9f696 5814 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5e09a105 5815 u32 tx_flags, u8 *hdr_len, __be16 protocol)
9a799d71
AK
5816{
5817 struct ixgbe_adv_tx_context_desc *context_desc;
5818 unsigned int i;
5819 int err;
5820 struct ixgbe_tx_buffer *tx_buffer_info;
9f8cdf4f
JB
5821 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5822 u32 mss_l4len_idx, l4len;
9a799d71
AK
5823
5824 if (skb_is_gso(skb)) {
5825 if (skb_header_cloned(skb)) {
5826 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5827 if (err)
5828 return err;
5829 }
5830 l4len = tcp_hdrlen(skb);
5831 *hdr_len += l4len;
5832
5e09a105 5833 if (protocol == htons(ETH_P_IP)) {
9a799d71
AK
5834 struct iphdr *iph = ip_hdr(skb);
5835 iph->tot_len = 0;
5836 iph->check = 0;
5837 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
e8e9f696
JP
5838 iph->daddr, 0,
5839 IPPROTO_TCP,
5840 0);
8e1e8a47 5841 } else if (skb_is_gso_v6(skb)) {
9a799d71
AK
5842 ipv6_hdr(skb)->payload_len = 0;
5843 tcp_hdr(skb)->check =
5844 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
e8e9f696
JP
5845 &ipv6_hdr(skb)->daddr,
5846 0, IPPROTO_TCP, 0);
9a799d71
AK
5847 }
5848
5849 i = tx_ring->next_to_use;
5850
5851 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 5852 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
5853
5854 /* VLAN MACLEN IPLEN */
5855 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5856 vlan_macip_lens |=
5857 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5858 vlan_macip_lens |= ((skb_network_offset(skb)) <<
e8e9f696 5859 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
5860 *hdr_len += skb_network_offset(skb);
5861 vlan_macip_lens |=
5862 (skb_transport_header(skb) - skb_network_header(skb));
5863 *hdr_len +=
5864 (skb_transport_header(skb) - skb_network_header(skb));
5865 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5866 context_desc->seqnum_seed = 0;
5867
5868 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
9f8cdf4f 5869 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
e8e9f696 5870 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 5871
5e09a105 5872 if (protocol == htons(ETH_P_IP))
9a799d71
AK
5873 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5874 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5875 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5876
5877 /* MSS L4LEN IDX */
9f8cdf4f 5878 mss_l4len_idx =
9a799d71
AK
5879 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
5880 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4eeae6fd
PW
5881 /* use index 1 for TSO */
5882 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
5883 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5884
5885 tx_buffer_info->time_stamp = jiffies;
5886 tx_buffer_info->next_to_watch = i;
5887
5888 i++;
5889 if (i == tx_ring->count)
5890 i = 0;
5891 tx_ring->next_to_use = i;
5892
5893 return true;
5894 }
5895 return false;
5896}
5897
5e09a105
HZ
5898static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5899 __be16 protocol)
7ca647bd
JP
5900{
5901 u32 rtn = 0;
7ca647bd
JP
5902
5903 switch (protocol) {
5904 case cpu_to_be16(ETH_P_IP):
5905 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
5906 switch (ip_hdr(skb)->protocol) {
5907 case IPPROTO_TCP:
5908 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5909 break;
5910 case IPPROTO_SCTP:
5911 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5912 break;
5913 }
5914 break;
5915 case cpu_to_be16(ETH_P_IPV6):
5916 /* XXX what about other V6 headers?? */
5917 switch (ipv6_hdr(skb)->nexthdr) {
5918 case IPPROTO_TCP:
5919 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5920 break;
5921 case IPPROTO_SCTP:
5922 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5923 break;
5924 }
5925 break;
5926 default:
5927 if (unlikely(net_ratelimit()))
5928 e_warn(probe, "partial checksum but proto=%x!\n",
5e09a105 5929 protocol);
7ca647bd
JP
5930 break;
5931 }
5932
5933 return rtn;
5934}
5935
9a799d71 5936static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
e8e9f696 5937 struct ixgbe_ring *tx_ring,
5e09a105
HZ
5938 struct sk_buff *skb, u32 tx_flags,
5939 __be16 protocol)
9a799d71
AK
5940{
5941 struct ixgbe_adv_tx_context_desc *context_desc;
5942 unsigned int i;
5943 struct ixgbe_tx_buffer *tx_buffer_info;
5944 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
5945
5946 if (skb->ip_summed == CHECKSUM_PARTIAL ||
5947 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5948 i = tx_ring->next_to_use;
5949 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 5950 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
5951
5952 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5953 vlan_macip_lens |=
5954 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5955 vlan_macip_lens |= (skb_network_offset(skb) <<
e8e9f696 5956 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
5957 if (skb->ip_summed == CHECKSUM_PARTIAL)
5958 vlan_macip_lens |= (skb_transport_header(skb) -
e8e9f696 5959 skb_network_header(skb));
9a799d71
AK
5960
5961 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5962 context_desc->seqnum_seed = 0;
5963
5964 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
e8e9f696 5965 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 5966
7ca647bd 5967 if (skb->ip_summed == CHECKSUM_PARTIAL)
5e09a105 5968 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
9a799d71
AK
5969
5970 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4eeae6fd 5971 /* use index zero for tx checksum offload */
9a799d71
AK
5972 context_desc->mss_l4len_idx = 0;
5973
5974 tx_buffer_info->time_stamp = jiffies;
5975 tx_buffer_info->next_to_watch = i;
9f8cdf4f 5976
9a799d71
AK
5977 i++;
5978 if (i == tx_ring->count)
5979 i = 0;
5980 tx_ring->next_to_use = i;
5981
5982 return true;
5983 }
9f8cdf4f 5984
9a799d71
AK
5985 return false;
5986}
5987
5988static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
e8e9f696
JP
5989 struct ixgbe_ring *tx_ring,
5990 struct sk_buff *skb, u32 tx_flags,
8ad494b0 5991 unsigned int first, const u8 hdr_len)
9a799d71 5992{
b6ec895e 5993 struct device *dev = tx_ring->dev;
9a799d71 5994 struct ixgbe_tx_buffer *tx_buffer_info;
eacd73f7
YZ
5995 unsigned int len;
5996 unsigned int total = skb->len;
9a799d71
AK
5997 unsigned int offset = 0, size, count = 0, i;
5998 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5999 unsigned int f;
8ad494b0
AD
6000 unsigned int bytecount = skb->len;
6001 u16 gso_segs = 1;
9a799d71
AK
6002
6003 i = tx_ring->next_to_use;
6004
eacd73f7
YZ
6005 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6006 /* excluding fcoe_crc_eof for FCoE */
6007 total -= sizeof(struct fcoe_crc_eof);
6008
6009 len = min(skb_headlen(skb), total);
9a799d71
AK
6010 while (len) {
6011 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6012 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6013
6014 tx_buffer_info->length = size;
e5a43549 6015 tx_buffer_info->mapped_as_page = false;
b6ec895e 6016 tx_buffer_info->dma = dma_map_single(dev,
e5a43549 6017 skb->data + offset,
1b507730 6018 size, DMA_TO_DEVICE);
b6ec895e 6019 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6020 goto dma_error;
9a799d71
AK
6021 tx_buffer_info->time_stamp = jiffies;
6022 tx_buffer_info->next_to_watch = i;
6023
6024 len -= size;
eacd73f7 6025 total -= size;
9a799d71
AK
6026 offset += size;
6027 count++;
44df32c5
AD
6028
6029 if (len) {
6030 i++;
6031 if (i == tx_ring->count)
6032 i = 0;
6033 }
9a799d71
AK
6034 }
6035
6036 for (f = 0; f < nr_frags; f++) {
6037 struct skb_frag_struct *frag;
6038
6039 frag = &skb_shinfo(skb)->frags[f];
eacd73f7 6040 len = min((unsigned int)frag->size, total);
e5a43549 6041 offset = frag->page_offset;
9a799d71
AK
6042
6043 while (len) {
44df32c5
AD
6044 i++;
6045 if (i == tx_ring->count)
6046 i = 0;
6047
9a799d71
AK
6048 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6049 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6050
6051 tx_buffer_info->length = size;
b6ec895e 6052 tx_buffer_info->dma = dma_map_page(dev,
e5a43549
AD
6053 frag->page,
6054 offset, size,
1b507730 6055 DMA_TO_DEVICE);
e5a43549 6056 tx_buffer_info->mapped_as_page = true;
b6ec895e 6057 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6058 goto dma_error;
9a799d71
AK
6059 tx_buffer_info->time_stamp = jiffies;
6060 tx_buffer_info->next_to_watch = i;
6061
6062 len -= size;
eacd73f7 6063 total -= size;
9a799d71
AK
6064 offset += size;
6065 count++;
9a799d71 6066 }
eacd73f7
YZ
6067 if (total == 0)
6068 break;
9a799d71 6069 }
44df32c5 6070
8ad494b0
AD
6071 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6072 gso_segs = skb_shinfo(skb)->gso_segs;
6073#ifdef IXGBE_FCOE
6074 /* adjust for FCoE Sequence Offload */
6075 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6076 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6077 skb_shinfo(skb)->gso_size);
6078#endif /* IXGBE_FCOE */
6079 bytecount += (gso_segs - 1) * hdr_len;
6080
6081 /* multiply data chunks by size of headers */
6082 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6083 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
9a799d71
AK
6084 tx_ring->tx_buffer_info[i].skb = skb;
6085 tx_ring->tx_buffer_info[first].next_to_watch = i;
6086
e5a43549
AD
6087 return count;
6088
6089dma_error:
849c4542 6090 e_dev_err("TX DMA map failed\n");
e5a43549
AD
6091
6092 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6093 tx_buffer_info->dma = 0;
6094 tx_buffer_info->time_stamp = 0;
6095 tx_buffer_info->next_to_watch = 0;
c1fa347f
RK
6096 if (count)
6097 count--;
e5a43549
AD
6098
6099 /* clear timestamp and dma mappings for remaining portion of packet */
c1fa347f 6100 while (count--) {
e8e9f696 6101 if (i == 0)
e5a43549 6102 i += tx_ring->count;
c1fa347f 6103 i--;
e5a43549 6104 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 6105 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
e5a43549
AD
6106 }
6107
e44d38e1 6108 return 0;
9a799d71
AK
6109}
6110
84ea2591 6111static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
e8e9f696 6112 int tx_flags, int count, u32 paylen, u8 hdr_len)
9a799d71
AK
6113{
6114 union ixgbe_adv_tx_desc *tx_desc = NULL;
6115 struct ixgbe_tx_buffer *tx_buffer_info;
6116 u32 olinfo_status = 0, cmd_type_len = 0;
6117 unsigned int i;
6118 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6119
6120 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6121
6122 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6123
6124 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6125 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6126
6127 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6128 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6129
6130 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6131 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6132
4eeae6fd
PW
6133 /* use index 1 context for tso */
6134 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6135 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6136 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
e8e9f696 6137 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71
AK
6138
6139 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6140 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6141 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6142
eacd73f7
YZ
6143 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6144 olinfo_status |= IXGBE_ADVTXD_CC;
6145 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6146 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6147 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6148 }
6149
9a799d71
AK
6150 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6151
6152 i = tx_ring->next_to_use;
6153 while (count--) {
6154 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6155 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71
AK
6156 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6157 tx_desc->read.cmd_type_len =
e8e9f696 6158 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
9a799d71 6159 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
9a799d71
AK
6160 i++;
6161 if (i == tx_ring->count)
6162 i = 0;
6163 }
6164
6165 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6166
6167 /*
6168 * Force memory writes to complete before letting h/w
6169 * know there are new descriptors to fetch. (Only
6170 * applicable for weak-ordered memory model archs,
6171 * such as IA-64).
6172 */
6173 wmb();
6174
6175 tx_ring->next_to_use = i;
84ea2591 6176 writel(i, tx_ring->tail);
9a799d71
AK
6177}
6178
c4cf55e5 6179static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5e09a105 6180 int queue, u32 tx_flags, __be16 protocol)
c4cf55e5 6181{
c4cf55e5
PWJ
6182 struct ixgbe_atr_input atr_input;
6183 struct tcphdr *th;
c4cf55e5
PWJ
6184 struct iphdr *iph = ip_hdr(skb);
6185 struct ethhdr *eth = (struct ethhdr *)skb->data;
6186 u16 vlan_id, src_port, dst_port, flex_bytes;
6187 u32 src_ipv4_addr, dst_ipv4_addr;
6188 u8 l4type = 0;
6189
d3ead241 6190 /* Right now, we support IPv4 only */
5e09a105 6191 if (protocol != htons(ETH_P_IP))
d3ead241 6192 return;
c4cf55e5
PWJ
6193 /* check if we're UDP or TCP */
6194 if (iph->protocol == IPPROTO_TCP) {
6195 th = tcp_hdr(skb);
6196 src_port = th->source;
6197 dst_port = th->dest;
6198 l4type |= IXGBE_ATR_L4TYPE_TCP;
6199 /* l4type IPv4 type is 0, no need to assign */
c4cf55e5
PWJ
6200 } else {
6201 /* Unsupported L4 header, just bail here */
6202 return;
6203 }
6204
6205 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6206
6207 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
e8e9f696 6208 IXGBE_TX_FLAGS_VLAN_SHIFT;
c4cf55e5
PWJ
6209 src_ipv4_addr = iph->saddr;
6210 dst_ipv4_addr = iph->daddr;
6211 flex_bytes = eth->h_proto;
6212
6213 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6214 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
6215 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
6216 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
6217 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
6218 /* src and dst are inverted, think how the receiver sees them */
6219 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
6220 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
6221
6222 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6223 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6224}
6225
fc77dc3c 6226static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60 6227{
fc77dc3c 6228 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
e092be60
AV
6229 /* Herbert's original patch had:
6230 * smp_mb__after_netif_stop_queue();
6231 * but since that doesn't exist yet, just open code it. */
6232 smp_mb();
6233
6234 /* We need to check again in a case another CPU has just
6235 * made room available. */
6236 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6237 return -EBUSY;
6238
6239 /* A reprieve! - use start_queue because it doesn't call schedule */
fc77dc3c 6240 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 6241 ++tx_ring->tx_stats.restart_queue;
e092be60
AV
6242 return 0;
6243}
6244
fc77dc3c 6245static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60
AV
6246{
6247 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6248 return 0;
fc77dc3c 6249 return __ixgbe_maybe_stop_tx(tx_ring, size);
e092be60
AV
6250}
6251
09a3b1f8
SH
6252static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6253{
6254 struct ixgbe_adapter *adapter = netdev_priv(dev);
5f715823 6255 int txq = smp_processor_id();
56075a98 6256#ifdef IXGBE_FCOE
5e09a105
HZ
6257 __be16 protocol;
6258
6259 protocol = vlan_get_protocol(skb);
6260
6261 if ((protocol == htons(ETH_P_FCOE)) ||
6262 (protocol == htons(ETH_P_FIP))) {
56075a98
JF
6263 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6264 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6265 txq += adapter->ring_feature[RING_F_FCOE].mask;
6266 return txq;
4bc091d8 6267#ifdef CONFIG_IXGBE_DCB
56075a98
JF
6268 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6269 txq = adapter->fcoe.up;
6270 return txq;
4bc091d8 6271#endif
56075a98
JF
6272 }
6273 }
6274#endif
6275
fdd3d631
KK
6276 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6277 while (unlikely(txq >= dev->real_num_tx_queues))
6278 txq -= dev->real_num_tx_queues;
5f715823 6279 return txq;
fdd3d631 6280 }
c4cf55e5 6281
2ea186ae
JF
6282 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6283 if (skb->priority == TC_PRIO_CONTROL)
6284 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6285 else
6286 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6287 >> 13;
6288 return txq;
6289 }
09a3b1f8
SH
6290
6291 return skb_tx_hash(dev, skb);
6292}
6293
fc77dc3c 6294netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
84418e3b
AD
6295 struct ixgbe_adapter *adapter,
6296 struct ixgbe_ring *tx_ring)
9a799d71 6297{
fc77dc3c 6298 struct net_device *netdev = tx_ring->netdev;
60d51134 6299 struct netdev_queue *txq;
9a799d71
AK
6300 unsigned int first;
6301 unsigned int tx_flags = 0;
30eba97a 6302 u8 hdr_len = 0;
5f715823 6303 int tso;
9a799d71
AK
6304 int count = 0;
6305 unsigned int f;
5e09a105
HZ
6306 __be16 protocol;
6307
6308 protocol = vlan_get_protocol(skb);
9f8cdf4f 6309
eab6d18d 6310 if (vlan_tx_tag_present(skb)) {
9f8cdf4f 6311 tx_flags |= vlan_tx_tag_get(skb);
2f90b865
AD
6312 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6313 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5f715823 6314 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
2f90b865
AD
6315 }
6316 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6317 tx_flags |= IXGBE_TX_FLAGS_VLAN;
33c66bd1
JF
6318 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6319 skb->priority != TC_PRIO_CONTROL) {
2ea186ae
JF
6320 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6321 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6322 tx_flags |= IXGBE_TX_FLAGS_VLAN;
9a799d71 6323 }
eacd73f7 6324
09ad1cc0 6325#ifdef IXGBE_FCOE
56075a98
JF
6326 /* for FCoE with DCB, we force the priority to what
6327 * was specified by the switch */
6328 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
5e09a105
HZ
6329 (protocol == htons(ETH_P_FCOE) ||
6330 protocol == htons(ETH_P_FIP))) {
4bc091d8
JF
6331#ifdef CONFIG_IXGBE_DCB
6332 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6333 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6334 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6335 tx_flags |= ((adapter->fcoe.up << 13)
6336 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6337 }
6338#endif
ca77cd59 6339 /* flag for FCoE offloads */
5e09a105 6340 if (protocol == htons(ETH_P_FCOE))
ca77cd59 6341 tx_flags |= IXGBE_TX_FLAGS_FCOE;
09ad1cc0 6342 }
ca77cd59
RL
6343#endif
6344
eacd73f7 6345 /* four things can cause us to need a context descriptor */
9f8cdf4f
JB
6346 if (skb_is_gso(skb) ||
6347 (skb->ip_summed == CHECKSUM_PARTIAL) ||
eacd73f7
YZ
6348 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6349 (tx_flags & IXGBE_TX_FLAGS_FCOE))
9a799d71
AK
6350 count++;
6351
9f8cdf4f
JB
6352 count += TXD_USE_COUNT(skb_headlen(skb));
6353 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9a799d71
AK
6354 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6355
fc77dc3c 6356 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
5b7da515 6357 tx_ring->tx_stats.tx_busy++;
9a799d71
AK
6358 return NETDEV_TX_BUSY;
6359 }
9a799d71 6360
9a799d71 6361 first = tx_ring->next_to_use;
eacd73f7
YZ
6362 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6363#ifdef IXGBE_FCOE
6364 /* setup tx offload for FCoE */
6365 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6366 if (tso < 0) {
6367 dev_kfree_skb_any(skb);
6368 return NETDEV_TX_OK;
6369 }
6370 if (tso)
6371 tx_flags |= IXGBE_TX_FLAGS_FSO;
6372#endif /* IXGBE_FCOE */
6373 } else {
5e09a105 6374 if (protocol == htons(ETH_P_IP))
eacd73f7 6375 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5e09a105
HZ
6376 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6377 protocol);
eacd73f7
YZ
6378 if (tso < 0) {
6379 dev_kfree_skb_any(skb);
6380 return NETDEV_TX_OK;
6381 }
9a799d71 6382
eacd73f7
YZ
6383 if (tso)
6384 tx_flags |= IXGBE_TX_FLAGS_TSO;
5e09a105
HZ
6385 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6386 protocol) &&
eacd73f7
YZ
6387 (skb->ip_summed == CHECKSUM_PARTIAL))
6388 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6389 }
9a799d71 6390
8ad494b0 6391 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
44df32c5 6392 if (count) {
c4cf55e5
PWJ
6393 /* add the ATR filter if ATR is on */
6394 if (tx_ring->atr_sample_rate) {
6395 ++tx_ring->atr_count;
6396 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
e8e9f696
JP
6397 test_bit(__IXGBE_FDIR_INIT_DONE,
6398 &tx_ring->reinit_state)) {
c4cf55e5 6399 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5e09a105 6400 tx_flags, protocol);
c4cf55e5
PWJ
6401 tx_ring->atr_count = 0;
6402 }
6403 }
60d51134
ED
6404 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6405 txq->tx_bytes += skb->len;
6406 txq->tx_packets++;
84ea2591 6407 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
fc77dc3c 6408 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
9a799d71 6409
44df32c5
AD
6410 } else {
6411 dev_kfree_skb_any(skb);
6412 tx_ring->tx_buffer_info[first].time_stamp = 0;
6413 tx_ring->next_to_use = first;
6414 }
9a799d71
AK
6415
6416 return NETDEV_TX_OK;
6417}
6418
84418e3b
AD
6419static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6420{
6421 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6422 struct ixgbe_ring *tx_ring;
6423
6424 tx_ring = adapter->tx_ring[skb->queue_mapping];
fc77dc3c 6425 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
84418e3b
AD
6426}
6427
9a799d71
AK
6428/**
6429 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6430 * @netdev: network interface device structure
6431 * @p: pointer to an address structure
6432 *
6433 * Returns 0 on success, negative on failure
6434 **/
6435static int ixgbe_set_mac(struct net_device *netdev, void *p)
6436{
6437 struct ixgbe_adapter *adapter = netdev_priv(netdev);
b4617240 6438 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
6439 struct sockaddr *addr = p;
6440
6441 if (!is_valid_ether_addr(addr->sa_data))
6442 return -EADDRNOTAVAIL;
6443
6444 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
b4617240 6445 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9a799d71 6446
1cdd1ec8
GR
6447 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6448 IXGBE_RAH_AV);
9a799d71
AK
6449
6450 return 0;
6451}
6452
6b73e10d
BH
6453static int
6454ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6455{
6456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6457 struct ixgbe_hw *hw = &adapter->hw;
6458 u16 value;
6459 int rc;
6460
6461 if (prtad != hw->phy.mdio.prtad)
6462 return -EINVAL;
6463 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6464 if (!rc)
6465 rc = value;
6466 return rc;
6467}
6468
6469static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6470 u16 addr, u16 value)
6471{
6472 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6473 struct ixgbe_hw *hw = &adapter->hw;
6474
6475 if (prtad != hw->phy.mdio.prtad)
6476 return -EINVAL;
6477 return hw->phy.ops.write_reg(hw, addr, devad, value);
6478}
6479
6480static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6481{
6482 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6483
6484 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6485}
6486
0365e6e4
PW
6487/**
6488 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
31278e71 6489 * netdev->dev_addrs
0365e6e4
PW
6490 * @netdev: network interface device structure
6491 *
6492 * Returns non-zero on failure
6493 **/
6494static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6495{
6496 int err = 0;
6497 struct ixgbe_adapter *adapter = netdev_priv(dev);
6498 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6499
6500 if (is_valid_ether_addr(mac->san_addr)) {
6501 rtnl_lock();
6502 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6503 rtnl_unlock();
6504 }
6505 return err;
6506}
6507
6508/**
6509 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
31278e71 6510 * netdev->dev_addrs
0365e6e4
PW
6511 * @netdev: network interface device structure
6512 *
6513 * Returns non-zero on failure
6514 **/
6515static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6516{
6517 int err = 0;
6518 struct ixgbe_adapter *adapter = netdev_priv(dev);
6519 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6520
6521 if (is_valid_ether_addr(mac->san_addr)) {
6522 rtnl_lock();
6523 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6524 rtnl_unlock();
6525 }
6526 return err;
6527}
6528
9a799d71
AK
6529#ifdef CONFIG_NET_POLL_CONTROLLER
6530/*
6531 * Polling 'interrupt' - used by things like netconsole to send skbs
6532 * without having to re-enable interrupts. It's not called while
6533 * the interrupt routine is executing.
6534 */
6535static void ixgbe_netpoll(struct net_device *netdev)
6536{
6537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8f9a7167 6538 int i;
9a799d71 6539
1a647bd2
AD
6540 /* if interface is down do nothing */
6541 if (test_bit(__IXGBE_DOWN, &adapter->state))
6542 return;
6543
9a799d71 6544 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
8f9a7167
PWJ
6545 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6546 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6547 for (i = 0; i < num_q_vectors; i++) {
6548 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6549 ixgbe_msix_clean_many(0, q_vector);
6550 }
6551 } else {
6552 ixgbe_intr(adapter->pdev->irq, netdev);
6553 }
9a799d71 6554 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
9a799d71
AK
6555}
6556#endif
6557
de1036b1
ED
6558static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6559 struct rtnl_link_stats64 *stats)
6560{
6561 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6562 int i;
6563
6564 /* accurate rx/tx bytes/packets stats */
6565 dev_txq_stats_fold(netdev, stats);
1a51502b 6566 rcu_read_lock();
de1036b1 6567 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b 6568 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
de1036b1
ED
6569 u64 bytes, packets;
6570 unsigned int start;
6571
1a51502b
ED
6572 if (ring) {
6573 do {
6574 start = u64_stats_fetch_begin_bh(&ring->syncp);
6575 packets = ring->stats.packets;
6576 bytes = ring->stats.bytes;
6577 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6578 stats->rx_packets += packets;
6579 stats->rx_bytes += bytes;
6580 }
de1036b1 6581 }
1a51502b 6582 rcu_read_unlock();
de1036b1
ED
6583 /* following stats updated by ixgbe_watchdog_task() */
6584 stats->multicast = netdev->stats.multicast;
6585 stats->rx_errors = netdev->stats.rx_errors;
6586 stats->rx_length_errors = netdev->stats.rx_length_errors;
6587 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6588 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6589 return stats;
6590}
6591
6592
0edc3527 6593static const struct net_device_ops ixgbe_netdev_ops = {
e8e9f696 6594 .ndo_open = ixgbe_open,
0edc3527 6595 .ndo_stop = ixgbe_close,
00829823 6596 .ndo_start_xmit = ixgbe_xmit_frame,
09a3b1f8 6597 .ndo_select_queue = ixgbe_select_queue,
e90d400c 6598 .ndo_set_rx_mode = ixgbe_set_rx_mode,
0edc3527
SH
6599 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6600 .ndo_validate_addr = eth_validate_addr,
6601 .ndo_set_mac_address = ixgbe_set_mac,
6602 .ndo_change_mtu = ixgbe_change_mtu,
6603 .ndo_tx_timeout = ixgbe_tx_timeout,
0edc3527
SH
6604 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6605 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6b73e10d 6606 .ndo_do_ioctl = ixgbe_ioctl,
7f01648a
GR
6607 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6608 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6609 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6610 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
de1036b1 6611 .ndo_get_stats64 = ixgbe_get_stats64,
0edc3527
SH
6612#ifdef CONFIG_NET_POLL_CONTROLLER
6613 .ndo_poll_controller = ixgbe_netpoll,
6614#endif
332d4a7d
YZ
6615#ifdef IXGBE_FCOE
6616 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6617 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8450ff8c
YZ
6618 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6619 .ndo_fcoe_disable = ixgbe_fcoe_disable,
61a1fa10 6620 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
332d4a7d 6621#endif /* IXGBE_FCOE */
0edc3527
SH
6622};
6623
1cdd1ec8
GR
6624static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6625 const struct ixgbe_info *ii)
6626{
6627#ifdef CONFIG_PCI_IOV
6628 struct ixgbe_hw *hw = &adapter->hw;
6629 int err;
6630
6631 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6632 return;
6633
6634 /* The 82599 supports up to 64 VFs per physical function
6635 * but this implementation limits allocation to 63 so that
6636 * basic networking resources are still available to the
6637 * physical function
6638 */
6639 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6640 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6641 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6642 if (err) {
396e799c 6643 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
1cdd1ec8
GR
6644 goto err_novfs;
6645 }
6646 /* If call to enable VFs succeeded then allocate memory
6647 * for per VF control structures.
6648 */
6649 adapter->vfinfo =
6650 kcalloc(adapter->num_vfs,
6651 sizeof(struct vf_data_storage), GFP_KERNEL);
6652 if (adapter->vfinfo) {
6653 /* Now that we're sure SR-IOV is enabled
6654 * and memory allocated set up the mailbox parameters
6655 */
6656 ixgbe_init_mbx_params_pf(hw);
6657 memcpy(&hw->mbx.ops, ii->mbx_ops,
6658 sizeof(hw->mbx.ops));
6659
6660 /* Disable RSC when in SR-IOV mode */
6661 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6662 IXGBE_FLAG2_RSC_ENABLED);
6663 return;
6664 }
6665
6666 /* Oh oh */
396e799c
ET
6667 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6668 "SRIOV disabled\n");
1cdd1ec8
GR
6669 pci_disable_sriov(adapter->pdev);
6670
6671err_novfs:
6672 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6673 adapter->num_vfs = 0;
6674#endif /* CONFIG_PCI_IOV */
6675}
6676
9a799d71
AK
6677/**
6678 * ixgbe_probe - Device Initialization Routine
6679 * @pdev: PCI device information struct
6680 * @ent: entry in ixgbe_pci_tbl
6681 *
6682 * Returns 0 on success, negative on failure
6683 *
6684 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6685 * The OS initialization, configuring of the adapter private structure,
6686 * and a hardware reset occur.
6687 **/
6688static int __devinit ixgbe_probe(struct pci_dev *pdev,
e8e9f696 6689 const struct pci_device_id *ent)
9a799d71
AK
6690{
6691 struct net_device *netdev;
6692 struct ixgbe_adapter *adapter = NULL;
6693 struct ixgbe_hw *hw;
6694 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9a799d71
AK
6695 static int cards_found;
6696 int i, err, pci_using_dac;
c85a2618 6697 unsigned int indices = num_possible_cpus();
eacd73f7
YZ
6698#ifdef IXGBE_FCOE
6699 u16 device_caps;
6700#endif
c44ade9e 6701 u32 part_num, eec;
9a799d71 6702
bded64a7
AG
6703 /* Catch broken hardware that put the wrong VF device ID in
6704 * the PCIe SR-IOV capability.
6705 */
6706 if (pdev->is_virtfn) {
6707 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6708 pci_name(pdev), pdev->vendor, pdev->device);
6709 return -EINVAL;
6710 }
6711
9ce77666 6712 err = pci_enable_device_mem(pdev);
9a799d71
AK
6713 if (err)
6714 return err;
6715
1b507730
NN
6716 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6717 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
9a799d71
AK
6718 pci_using_dac = 1;
6719 } else {
1b507730 6720 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9a799d71 6721 if (err) {
1b507730
NN
6722 err = dma_set_coherent_mask(&pdev->dev,
6723 DMA_BIT_MASK(32));
9a799d71 6724 if (err) {
b8bc0421
DC
6725 dev_err(&pdev->dev,
6726 "No usable DMA configuration, aborting\n");
9a799d71
AK
6727 goto err_dma;
6728 }
6729 }
6730 pci_using_dac = 0;
6731 }
6732
9ce77666 6733 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 6734 IORESOURCE_MEM), ixgbe_driver_name);
9a799d71 6735 if (err) {
b8bc0421
DC
6736 dev_err(&pdev->dev,
6737 "pci_request_selected_regions failed 0x%x\n", err);
9a799d71
AK
6738 goto err_pci_reg;
6739 }
6740
19d5afd4 6741 pci_enable_pcie_error_reporting(pdev);
6fabd715 6742
9a799d71 6743 pci_set_master(pdev);
fb3b27bc 6744 pci_save_state(pdev);
9a799d71 6745
c85a2618
JF
6746 if (ii->mac == ixgbe_mac_82598EB)
6747 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6748 else
6749 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6750
6751 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6752#ifdef IXGBE_FCOE
6753 indices += min_t(unsigned int, num_possible_cpus(),
6754 IXGBE_MAX_FCOE_INDICES);
6755#endif
c85a2618 6756 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
9a799d71
AK
6757 if (!netdev) {
6758 err = -ENOMEM;
6759 goto err_alloc_etherdev;
6760 }
6761
9a799d71
AK
6762 SET_NETDEV_DEV(netdev, &pdev->dev);
6763
9a799d71 6764 adapter = netdev_priv(netdev);
c60fbb00 6765 pci_set_drvdata(pdev, adapter);
9a799d71
AK
6766
6767 adapter->netdev = netdev;
6768 adapter->pdev = pdev;
6769 hw = &adapter->hw;
6770 hw->back = adapter;
6771 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6772
05857980 6773 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
e8e9f696 6774 pci_resource_len(pdev, 0));
9a799d71
AK
6775 if (!hw->hw_addr) {
6776 err = -EIO;
6777 goto err_ioremap;
6778 }
6779
6780 for (i = 1; i <= 5; i++) {
6781 if (pci_resource_len(pdev, i) == 0)
6782 continue;
6783 }
6784
0edc3527 6785 netdev->netdev_ops = &ixgbe_netdev_ops;
9a799d71 6786 ixgbe_set_ethtool_ops(netdev);
9a799d71 6787 netdev->watchdog_timeo = 5 * HZ;
9a799d71
AK
6788 strcpy(netdev->name, pci_name(pdev));
6789
9a799d71
AK
6790 adapter->bd_number = cards_found;
6791
9a799d71
AK
6792 /* Setup hw api */
6793 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
021230d4 6794 hw->mac.type = ii->mac;
9a799d71 6795
c44ade9e
JB
6796 /* EEPROM */
6797 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6798 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6799 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6800 if (!(eec & (1 << 8)))
6801 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6802
6803 /* PHY */
6804 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
c4900be0 6805 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6b73e10d
BH
6806 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
6807 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
6808 hw->phy.mdio.mmds = 0;
6809 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6810 hw->phy.mdio.dev = netdev;
6811 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6812 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
c4900be0
DS
6813
6814 /* set up this timer and work struct before calling get_invariants
6815 * which might start the timer
6816 */
6817 init_timer(&adapter->sfp_timer);
c061b18d 6818 adapter->sfp_timer.function = ixgbe_sfp_timer;
c4900be0
DS
6819 adapter->sfp_timer.data = (unsigned long) adapter;
6820
6821 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
c44ade9e 6822
e8e26350
PW
6823 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6824 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6825
6826 /* a new SFP+ module arrival, called from GPI SDP2 context */
6827 INIT_WORK(&adapter->sfp_config_module_task,
e8e9f696 6828 ixgbe_sfp_config_module_task);
e8e26350 6829
8ca783ab 6830 ii->get_invariants(hw);
9a799d71
AK
6831
6832 /* setup the private structure */
6833 err = ixgbe_sw_init(adapter);
6834 if (err)
6835 goto err_sw_init;
6836
e86bff0e
DS
6837 /* Make it possible the adapter to be woken up via WOL */
6838 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6839 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6840
bf069c97
DS
6841 /*
6842 * If there is a fan on this device and it has failed log the
6843 * failure.
6844 */
6845 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6846 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6847 if (esdp & IXGBE_ESDP_SDP1)
396e799c 6848 e_crit(probe, "Fan has stopped, replace the adapter\n");
bf069c97
DS
6849 }
6850
c44ade9e 6851 /* reset_hw fills in the perm_addr as well */
119fc60a 6852 hw->phy.reset_if_overtemp = true;
c44ade9e 6853 err = hw->mac.ops.reset_hw(hw);
119fc60a 6854 hw->phy.reset_if_overtemp = false;
8ca783ab
DS
6855 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6856 hw->mac.type == ixgbe_mac_82598EB) {
6857 /*
6858 * Start a kernel thread to watch for a module to arrive.
6859 * Only do this for 82598, since 82599 will generate
6860 * interrupts on module arrival.
6861 */
6862 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6863 mod_timer(&adapter->sfp_timer,
6864 round_jiffies(jiffies + (2 * HZ)));
6865 err = 0;
6866 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
6867 e_dev_err("failed to initialize because an unsupported SFP+ "
6868 "module type was detected.\n");
6869 e_dev_err("Reload the driver after installing a supported "
6870 "module.\n");
04f165ef
PW
6871 goto err_sw_init;
6872 } else if (err) {
849c4542 6873 e_dev_err("HW Init failed: %d\n", err);
c44ade9e
JB
6874 goto err_sw_init;
6875 }
6876
1cdd1ec8
GR
6877 ixgbe_probe_vf(adapter, ii);
6878
396e799c 6879 netdev->features = NETIF_F_SG |
e8e9f696
JP
6880 NETIF_F_IP_CSUM |
6881 NETIF_F_HW_VLAN_TX |
6882 NETIF_F_HW_VLAN_RX |
6883 NETIF_F_HW_VLAN_FILTER;
9a799d71 6884
e9990a9c 6885 netdev->features |= NETIF_F_IPV6_CSUM;
9a799d71 6886 netdev->features |= NETIF_F_TSO;
9a799d71 6887 netdev->features |= NETIF_F_TSO6;
78b6f4ce 6888 netdev->features |= NETIF_F_GRO;
ad31c402 6889
45a5ead0
JB
6890 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6891 netdev->features |= NETIF_F_SCTP_CSUM;
6892
ad31c402
JK
6893 netdev->vlan_features |= NETIF_F_TSO;
6894 netdev->vlan_features |= NETIF_F_TSO6;
22f32b7a 6895 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 6896 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
ad31c402
JK
6897 netdev->vlan_features |= NETIF_F_SG;
6898
1cdd1ec8
GR
6899 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6900 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6901 IXGBE_FLAG_DCB_ENABLED);
2f90b865
AD
6902 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6903 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6904
7a6b6f51 6905#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
6906 netdev->dcbnl_ops = &dcbnl_ops;
6907#endif
6908
eacd73f7 6909#ifdef IXGBE_FCOE
0d551589 6910 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
eacd73f7
YZ
6911 if (hw->mac.ops.get_device_caps) {
6912 hw->mac.ops.get_device_caps(hw, &device_caps);
0d551589
YZ
6913 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
6914 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
eacd73f7
YZ
6915 }
6916 }
5e09d7f6
YZ
6917 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6918 netdev->vlan_features |= NETIF_F_FCOE_CRC;
6919 netdev->vlan_features |= NETIF_F_FSO;
6920 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6921 }
eacd73f7 6922#endif /* IXGBE_FCOE */
7b872a55 6923 if (pci_using_dac) {
9a799d71 6924 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
6925 netdev->vlan_features |= NETIF_F_HIGHDMA;
6926 }
9a799d71 6927
0c19d6af 6928 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
f8212f97
AD
6929 netdev->features |= NETIF_F_LRO;
6930
9a799d71 6931 /* make sure the EEPROM is good */
c44ade9e 6932 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
849c4542 6933 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9a799d71
AK
6934 err = -EIO;
6935 goto err_eeprom;
6936 }
6937
6938 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6939 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6940
c44ade9e 6941 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
849c4542 6942 e_dev_err("invalid MAC address\n");
9a799d71
AK
6943 err = -EIO;
6944 goto err_eeprom;
6945 }
6946
61fac744
PW
6947 /* power down the optics */
6948 if (hw->phy.multispeed_fiber)
6949 hw->mac.ops.disable_tx_laser(hw);
6950
9a799d71 6951 init_timer(&adapter->watchdog_timer);
c061b18d 6952 adapter->watchdog_timer.function = ixgbe_watchdog;
9a799d71
AK
6953 adapter->watchdog_timer.data = (unsigned long)adapter;
6954
6955 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
cf8280ee 6956 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
9a799d71 6957
021230d4
AV
6958 err = ixgbe_init_interrupt_scheme(adapter);
6959 if (err)
6960 goto err_sw_init;
9a799d71 6961
e8e26350
PW
6962 switch (pdev->device) {
6963 case IXGBE_DEV_ID_82599_KX4:
495dce12 6964 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
e8e9f696 6965 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
e8e26350
PW
6966 break;
6967 default:
6968 adapter->wol = 0;
6969 break;
6970 }
e8e26350
PW
6971 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6972
04f165ef
PW
6973 /* pick up the PCI bus settings for reporting later */
6974 hw->mac.ops.get_bus_info(hw);
6975
9a799d71 6976 /* print bus type/speed/width info */
849c4542 6977 e_dev_info("(PCI Express:%s:%s) %pM\n",
e8e9f696
JP
6978 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
6979 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
6980 "Unknown"),
6981 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6982 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6983 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6984 "Unknown"),
6985 netdev->dev_addr);
c44ade9e 6986 ixgbe_read_pba_num_generic(hw, &part_num);
e8e26350 6987 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
849c4542
ET
6988 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
6989 "PBA No: %06x-%03x\n",
6990 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6991 (part_num >> 8), (part_num & 0xff));
e8e26350 6992 else
849c4542
ET
6993 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6994 hw->mac.type, hw->phy.type,
6995 (part_num >> 8), (part_num & 0xff));
9a799d71 6996
e8e26350 6997 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
849c4542
ET
6998 e_dev_warn("PCI-Express bandwidth available for this card is "
6999 "not sufficient for optimal performance.\n");
7000 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7001 "is required.\n");
0c254d86
AK
7002 }
7003
34b0368c
PWJ
7004 /* save off EEPROM version number */
7005 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
7006
9a799d71 7007 /* reset the hardware with the new settings */
794caeb2 7008 err = hw->mac.ops.start_hw(hw);
c44ade9e 7009
794caeb2
PWJ
7010 if (err == IXGBE_ERR_EEPROM_VERSION) {
7011 /* We are running on a pre-production device, log a warning */
849c4542
ET
7012 e_dev_warn("This device is a pre-production adapter/LOM. "
7013 "Please be aware there may be issues associated "
7014 "with your hardware. If you are experiencing "
7015 "problems please contact your Intel or hardware "
7016 "representative who provided you with this "
7017 "hardware.\n");
794caeb2 7018 }
9a799d71
AK
7019 strcpy(netdev->name, "eth%d");
7020 err = register_netdev(netdev);
7021 if (err)
7022 goto err_register;
7023
54386467
JB
7024 /* carrier off reporting is important to ethtool even BEFORE open */
7025 netif_carrier_off(netdev);
7026
c4cf55e5
PWJ
7027 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7028 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7029 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7030
119fc60a 7031 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
e8e9f696
JP
7032 INIT_WORK(&adapter->check_overtemp_task,
7033 ixgbe_check_overtemp_task);
5dd2d332 7034#ifdef CONFIG_IXGBE_DCA
652f093f 7035 if (dca_add_requester(&pdev->dev) == 0) {
bd0362dd 7036 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
7037 ixgbe_setup_dca(adapter);
7038 }
7039#endif
1cdd1ec8 7040 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
396e799c 7041 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
1cdd1ec8
GR
7042 for (i = 0; i < adapter->num_vfs; i++)
7043 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7044 }
7045
0365e6e4
PW
7046 /* add san mac addr to netdev */
7047 ixgbe_add_sanmac_netdev(netdev);
9a799d71 7048
849c4542 7049 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
9a799d71
AK
7050 cards_found++;
7051 return 0;
7052
7053err_register:
5eba3699 7054 ixgbe_release_hw_control(adapter);
7a921c93 7055 ixgbe_clear_interrupt_scheme(adapter);
9a799d71
AK
7056err_sw_init:
7057err_eeprom:
1cdd1ec8
GR
7058 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7059 ixgbe_disable_sriov(adapter);
c4900be0
DS
7060 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7061 del_timer_sync(&adapter->sfp_timer);
7062 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
7063 cancel_work_sync(&adapter->multispeed_fiber_task);
7064 cancel_work_sync(&adapter->sfp_config_module_task);
9a799d71
AK
7065 iounmap(hw->hw_addr);
7066err_ioremap:
7067 free_netdev(netdev);
7068err_alloc_etherdev:
e8e9f696
JP
7069 pci_release_selected_regions(pdev,
7070 pci_select_bars(pdev, IORESOURCE_MEM));
9a799d71
AK
7071err_pci_reg:
7072err_dma:
7073 pci_disable_device(pdev);
7074 return err;
7075}
7076
7077/**
7078 * ixgbe_remove - Device Removal Routine
7079 * @pdev: PCI device information struct
7080 *
7081 * ixgbe_remove is called by the PCI subsystem to alert the driver
7082 * that it should release a PCI device. The could be caused by a
7083 * Hot-Plug event, or because the driver is going to be removed from
7084 * memory.
7085 **/
7086static void __devexit ixgbe_remove(struct pci_dev *pdev)
7087{
c60fbb00
AD
7088 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7089 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7090
7091 set_bit(__IXGBE_DOWN, &adapter->state);
c4900be0
DS
7092 /* clear the module not found bit to make sure the worker won't
7093 * reschedule
7094 */
7095 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
9a799d71
AK
7096 del_timer_sync(&adapter->watchdog_timer);
7097
c4900be0
DS
7098 del_timer_sync(&adapter->sfp_timer);
7099 cancel_work_sync(&adapter->watchdog_task);
7100 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
7101 cancel_work_sync(&adapter->multispeed_fiber_task);
7102 cancel_work_sync(&adapter->sfp_config_module_task);
c4cf55e5
PWJ
7103 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7104 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7105 cancel_work_sync(&adapter->fdir_reinit_task);
9a799d71
AK
7106 flush_scheduled_work();
7107
5dd2d332 7108#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7109 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7110 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7111 dca_remove_requester(&pdev->dev);
7112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7113 }
7114
7115#endif
332d4a7d
YZ
7116#ifdef IXGBE_FCOE
7117 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7118 ixgbe_cleanup_fcoe(adapter);
7119
7120#endif /* IXGBE_FCOE */
0365e6e4
PW
7121
7122 /* remove the added san mac */
7123 ixgbe_del_sanmac_netdev(netdev);
7124
c4900be0
DS
7125 if (netdev->reg_state == NETREG_REGISTERED)
7126 unregister_netdev(netdev);
9a799d71 7127
1cdd1ec8
GR
7128 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7129 ixgbe_disable_sriov(adapter);
7130
7a921c93 7131 ixgbe_clear_interrupt_scheme(adapter);
5eba3699 7132
021230d4 7133 ixgbe_release_hw_control(adapter);
9a799d71
AK
7134
7135 iounmap(adapter->hw.hw_addr);
9ce77666 7136 pci_release_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 7137 IORESOURCE_MEM));
9a799d71 7138
849c4542 7139 e_dev_info("complete\n");
021230d4 7140
9a799d71
AK
7141 free_netdev(netdev);
7142
19d5afd4 7143 pci_disable_pcie_error_reporting(pdev);
6fabd715 7144
9a799d71
AK
7145 pci_disable_device(pdev);
7146}
7147
7148/**
7149 * ixgbe_io_error_detected - called when PCI error is detected
7150 * @pdev: Pointer to PCI device
7151 * @state: The current pci connection state
7152 *
7153 * This function is called after a PCI bus error affecting
7154 * this device has been detected.
7155 */
7156static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
e8e9f696 7157 pci_channel_state_t state)
9a799d71 7158{
c60fbb00
AD
7159 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7160 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7161
7162 netif_device_detach(netdev);
7163
3044b8d1
BL
7164 if (state == pci_channel_io_perm_failure)
7165 return PCI_ERS_RESULT_DISCONNECT;
7166
9a799d71
AK
7167 if (netif_running(netdev))
7168 ixgbe_down(adapter);
7169 pci_disable_device(pdev);
7170
b4617240 7171 /* Request a slot reset. */
9a799d71
AK
7172 return PCI_ERS_RESULT_NEED_RESET;
7173}
7174
7175/**
7176 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7177 * @pdev: Pointer to PCI device
7178 *
7179 * Restart the card from scratch, as if from a cold-boot.
7180 */
7181static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7182{
c60fbb00 7183 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6fabd715
PWJ
7184 pci_ers_result_t result;
7185 int err;
9a799d71 7186
9ce77666 7187 if (pci_enable_device_mem(pdev)) {
396e799c 7188 e_err(probe, "Cannot re-enable PCI device after reset.\n");
6fabd715
PWJ
7189 result = PCI_ERS_RESULT_DISCONNECT;
7190 } else {
7191 pci_set_master(pdev);
7192 pci_restore_state(pdev);
c0e1f68b 7193 pci_save_state(pdev);
9a799d71 7194
dd4d8ca6 7195 pci_wake_from_d3(pdev, false);
9a799d71 7196
6fabd715 7197 ixgbe_reset(adapter);
88512539 7198 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6fabd715
PWJ
7199 result = PCI_ERS_RESULT_RECOVERED;
7200 }
7201
7202 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7203 if (err) {
849c4542
ET
7204 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7205 "failed 0x%0x\n", err);
6fabd715
PWJ
7206 /* non-fatal, continue */
7207 }
9a799d71 7208
6fabd715 7209 return result;
9a799d71
AK
7210}
7211
7212/**
7213 * ixgbe_io_resume - called when traffic can start flowing again.
7214 * @pdev: Pointer to PCI device
7215 *
7216 * This callback is called when the error recovery driver tells us that
7217 * its OK to resume normal operation.
7218 */
7219static void ixgbe_io_resume(struct pci_dev *pdev)
7220{
c60fbb00
AD
7221 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7222 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7223
7224 if (netif_running(netdev)) {
7225 if (ixgbe_up(adapter)) {
396e799c 7226 e_info(probe, "ixgbe_up failed after reset\n");
9a799d71
AK
7227 return;
7228 }
7229 }
7230
7231 netif_device_attach(netdev);
9a799d71
AK
7232}
7233
7234static struct pci_error_handlers ixgbe_err_handler = {
7235 .error_detected = ixgbe_io_error_detected,
7236 .slot_reset = ixgbe_io_slot_reset,
7237 .resume = ixgbe_io_resume,
7238};
7239
7240static struct pci_driver ixgbe_driver = {
7241 .name = ixgbe_driver_name,
7242 .id_table = ixgbe_pci_tbl,
7243 .probe = ixgbe_probe,
7244 .remove = __devexit_p(ixgbe_remove),
7245#ifdef CONFIG_PM
7246 .suspend = ixgbe_suspend,
7247 .resume = ixgbe_resume,
7248#endif
7249 .shutdown = ixgbe_shutdown,
7250 .err_handler = &ixgbe_err_handler
7251};
7252
7253/**
7254 * ixgbe_init_module - Driver Registration Routine
7255 *
7256 * ixgbe_init_module is the first routine called when the driver is
7257 * loaded. All it does is register with the PCI subsystem.
7258 **/
7259static int __init ixgbe_init_module(void)
7260{
7261 int ret;
c7689578 7262 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
849c4542 7263 pr_info("%s\n", ixgbe_copyright);
9a799d71 7264
5dd2d332 7265#ifdef CONFIG_IXGBE_DCA
bd0362dd 7266 dca_register_notify(&dca_notifier);
bd0362dd 7267#endif
5dd2d332 7268
9a799d71
AK
7269 ret = pci_register_driver(&ixgbe_driver);
7270 return ret;
7271}
b4617240 7272
9a799d71
AK
7273module_init(ixgbe_init_module);
7274
7275/**
7276 * ixgbe_exit_module - Driver Exit Cleanup Routine
7277 *
7278 * ixgbe_exit_module is called just before the driver is removed
7279 * from memory.
7280 **/
7281static void __exit ixgbe_exit_module(void)
7282{
5dd2d332 7283#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7284 dca_unregister_notify(&dca_notifier);
7285#endif
9a799d71 7286 pci_unregister_driver(&ixgbe_driver);
1a51502b 7287 rcu_barrier(); /* Wait for completion of call_rcu()'s */
9a799d71 7288}
bd0362dd 7289
5dd2d332 7290#ifdef CONFIG_IXGBE_DCA
bd0362dd 7291static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
e8e9f696 7292 void *p)
bd0362dd
JC
7293{
7294 int ret_val;
7295
7296 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
e8e9f696 7297 __ixgbe_notify_dca);
bd0362dd
JC
7298
7299 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7300}
b453368d 7301
5dd2d332 7302#endif /* CONFIG_IXGBE_DCA */
849c4542 7303
b453368d 7304/**
849c4542 7305 * ixgbe_get_hw_dev return device
b453368d
AD
7306 * used by hardware layer to print debugging information
7307 **/
849c4542 7308struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
b453368d
AD
7309{
7310 struct ixgbe_adapter *adapter = hw->back;
849c4542 7311 return adapter->netdev;
b453368d 7312}
bd0362dd 7313
9a799d71
AK
7314module_exit(ixgbe_exit_module);
7315
7316/* ixgbe_main.c */