]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ixgbe/ixgbe_main.c
drivers/net: avoid some skb->ip_summed initializations
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
8c47eaa7 4 Copyright(c) 1999 - 2010 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
60127865 37#include <linux/pkt_sched.h>
9a799d71 38#include <linux/ipv6.h>
5a0e3ad6 39#include <linux/slab.h>
9a799d71
AK
40#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
eacd73f7 44#include <scsi/fc/fc_fcoe.h>
9a799d71
AK
45
46#include "ixgbe.h"
47#include "ixgbe_common.h"
ee5f784a 48#include "ixgbe_dcb_82599.h"
1cdd1ec8 49#include "ixgbe_sriov.h"
9a799d71
AK
50
51char ixgbe_driver_name[] = "ixgbe";
9c8eb720 52static const char ixgbe_driver_string[] =
b4617240 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
9a799d71 54
99faf68e 55#define DRV_VERSION "2.0.84-k2"
9c8eb720 56const char ixgbe_driver_version[] = DRV_VERSION;
8c47eaa7 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
9a799d71
AK
58
59static const struct ixgbe_info *ixgbe_info_tbl[] = {
b4617240 60 [board_82598] = &ixgbe_82598_info,
e8e26350 61 [board_82599] = &ixgbe_82599_info,
9a799d71
AK
62};
63
64/* ixgbe_pci_tbl - PCI Device ID Table
65 *
66 * Wildcard entries (PCI_ANY_ID) should come last
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70 * Class, Class Mask, private data (not used) }
71 */
a3aa1884 72static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
1e336d0f
DS
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74 board_82598 },
9a799d71 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
3957d63d 76 board_82598 },
9a799d71 77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
3957d63d 78 board_82598 },
0befdb3e
JB
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80 board_82598 },
3845bec0
PWJ
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82 board_82598 },
9a799d71 83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
3957d63d 84 board_82598 },
8d792cd9
JB
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86 board_82598 },
c4900be0
DS
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88 board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90 board_82598 },
b95f5fcb
JB
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92 board_82598 },
c4900be0
DS
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94 board_82598 },
2f21bdd3
DS
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96 board_82598 },
e8e26350
PW
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98 board_82599 },
1fcf03e6
PWJ
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100 board_82599 },
74757d49
DS
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102 board_82599 },
e8e26350
PW
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104 board_82599 },
38ad1c8e
DS
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106 board_82599 },
dbfec662
DS
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108 board_82599 },
8911184f
PWJ
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 },
119fc60a
MC
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 },
312eb931
DS
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 },
9a799d71
AK
115
116 /* required last entry */
117 {0, }
118};
119MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
5dd2d332 121#ifdef CONFIG_IXGBE_DCA
bd0362dd 122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
b4617240 123 void *p);
bd0362dd
JC
124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL,
127 .priority = 0
128};
129#endif
130
1cdd1ec8
GR
131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
135 "per physical function");
136#endif /* CONFIG_PCI_IOV */
137
9a799d71
AK
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
139MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_VERSION);
142
143#define DEFAULT_DEBUG_LEVEL_SHIFT 3
144
1cdd1ec8
GR
145static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
146{
147 struct ixgbe_hw *hw = &adapter->hw;
148 u32 gcr;
149 u32 gpie;
150 u32 vmdctl;
151
152#ifdef CONFIG_PCI_IOV
153 /* disable iov and allow time for transactions to clear */
154 pci_disable_sriov(adapter->pdev);
155#endif
156
157 /* turn off device IOV mode */
158 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
159 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
160 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
161 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
162 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
163 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
164
165 /* set default pool back to 0 */
166 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
167 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
168 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
169
170 /* take a breather then clean up driver data */
171 msleep(100);
172 if (adapter->vfinfo)
173 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL;
175
176 adapter->num_vfs = 0;
177 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178}
179
dcd79aeb
TI
180struct ixgbe_reg_info {
181 u32 ofs;
182 char *name;
183};
184
185static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
186
187 /* General Registers */
188 {IXGBE_CTRL, "CTRL"},
189 {IXGBE_STATUS, "STATUS"},
190 {IXGBE_CTRL_EXT, "CTRL_EXT"},
191
192 /* Interrupt Registers */
193 {IXGBE_EICR, "EICR"},
194
195 /* RX Registers */
196 {IXGBE_SRRCTL(0), "SRRCTL"},
197 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
198 {IXGBE_RDLEN(0), "RDLEN"},
199 {IXGBE_RDH(0), "RDH"},
200 {IXGBE_RDT(0), "RDT"},
201 {IXGBE_RXDCTL(0), "RXDCTL"},
202 {IXGBE_RDBAL(0), "RDBAL"},
203 {IXGBE_RDBAH(0), "RDBAH"},
204
205 /* TX Registers */
206 {IXGBE_TDBAL(0), "TDBAL"},
207 {IXGBE_TDBAH(0), "TDBAH"},
208 {IXGBE_TDLEN(0), "TDLEN"},
209 {IXGBE_TDH(0), "TDH"},
210 {IXGBE_TDT(0), "TDT"},
211 {IXGBE_TXDCTL(0), "TXDCTL"},
212
213 /* List Terminator */
214 {}
215};
216
217
218/*
219 * ixgbe_regdump - register printout routine
220 */
221static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
222{
223 int i = 0, j = 0;
224 char rname[16];
225 u32 regs[64];
226
227 switch (reginfo->ofs) {
228 case IXGBE_SRRCTL(0):
229 for (i = 0; i < 64; i++)
230 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
231 break;
232 case IXGBE_DCA_RXCTRL(0):
233 for (i = 0; i < 64; i++)
234 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
235 break;
236 case IXGBE_RDLEN(0):
237 for (i = 0; i < 64; i++)
238 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
239 break;
240 case IXGBE_RDH(0):
241 for (i = 0; i < 64; i++)
242 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
243 break;
244 case IXGBE_RDT(0):
245 for (i = 0; i < 64; i++)
246 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
247 break;
248 case IXGBE_RXDCTL(0):
249 for (i = 0; i < 64; i++)
250 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
251 break;
252 case IXGBE_RDBAL(0):
253 for (i = 0; i < 64; i++)
254 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
255 break;
256 case IXGBE_RDBAH(0):
257 for (i = 0; i < 64; i++)
258 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
259 break;
260 case IXGBE_TDBAL(0):
261 for (i = 0; i < 64; i++)
262 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
263 break;
264 case IXGBE_TDBAH(0):
265 for (i = 0; i < 64; i++)
266 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
267 break;
268 case IXGBE_TDLEN(0):
269 for (i = 0; i < 64; i++)
270 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
271 break;
272 case IXGBE_TDH(0):
273 for (i = 0; i < 64; i++)
274 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
275 break;
276 case IXGBE_TDT(0):
277 for (i = 0; i < 64; i++)
278 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
279 break;
280 case IXGBE_TXDCTL(0):
281 for (i = 0; i < 64; i++)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break;
284 default:
285 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return;
288 }
289
290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292 printk(KERN_ERR "%-15s ", rname);
293 for (j = 0; j < 8; j++)
294 printk(KERN_CONT "%08x ", regs[i*8+j]);
295 printk(KERN_CONT "\n");
296 }
297
298}
299
300/*
301 * ixgbe_dump - Print registers, tx-rings and rx-rings
302 */
303static void ixgbe_dump(struct ixgbe_adapter *adapter)
304{
305 struct net_device *netdev = adapter->netdev;
306 struct ixgbe_hw *hw = &adapter->hw;
307 struct ixgbe_reg_info *reginfo;
308 int n = 0;
309 struct ixgbe_ring *tx_ring;
310 struct ixgbe_tx_buffer *tx_buffer_info;
311 union ixgbe_adv_tx_desc *tx_desc;
312 struct my_u0 { u64 a; u64 b; } *u0;
313 struct ixgbe_ring *rx_ring;
314 union ixgbe_adv_rx_desc *rx_desc;
315 struct ixgbe_rx_buffer *rx_buffer_info;
316 u32 staterr;
317 int i = 0;
318
319 if (!netif_msg_hw(adapter))
320 return;
321
322 /* Print netdevice Info */
323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n");
325 printk(KERN_INFO "Device Name state "
326 "trans_start last_rx\n");
327 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
328 netdev->name,
329 netdev->state,
330 netdev->trans_start,
331 netdev->last_rx);
332 }
333
334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n");
336 printk(KERN_INFO " Register Name Value\n");
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo);
340 }
341
342 /* Print TX Ring Summary */
343 if (!netdev || !netif_running(netdev))
344 goto exit;
345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
348 "leng ntw timestamp\n");
349 for (n = 0; n < adapter->num_tx_queues; n++) {
350 tx_ring = adapter->tx_ring[n];
351 tx_buffer_info =
352 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
353 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
354 n, tx_ring->next_to_use, tx_ring->next_to_clean,
355 (u64)tx_buffer_info->dma,
356 tx_buffer_info->length,
357 tx_buffer_info->next_to_watch,
358 (u64)tx_buffer_info->time_stamp);
359 }
360
361 /* Print TX Rings */
362 if (!netif_msg_tx_done(adapter))
363 goto rx_ring_summary;
364
365 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
366
367 /* Transmit Descriptor Formats
368 *
369 * Advanced Transmit Descriptor
370 * +--------------------------------------------------------------+
371 * 0 | Buffer Address [63:0] |
372 * +--------------------------------------------------------------+
373 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
374 * +--------------------------------------------------------------+
375 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
376 */
377
378 for (n = 0; n < adapter->num_tx_queues; n++) {
379 tx_ring = adapter->tx_ring[n];
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
382 printk(KERN_INFO "------------------------------------\n");
383 printk(KERN_INFO "T [desc] [address 63:0 ] "
384 "[PlPOIdStDDt Ln] [bi->dma ] "
385 "leng ntw timestamp bi->skb\n");
386
387 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
31f05a2d 388 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
dcd79aeb
TI
389 tx_buffer_info = &tx_ring->tx_buffer_info[i];
390 u0 = (struct my_u0 *)tx_desc;
391 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
392 " %04X %3X %016llX %p", i,
393 le64_to_cpu(u0->a),
394 le64_to_cpu(u0->b),
395 (u64)tx_buffer_info->dma,
396 tx_buffer_info->length,
397 tx_buffer_info->next_to_watch,
398 (u64)tx_buffer_info->time_stamp,
399 tx_buffer_info->skb);
400 if (i == tx_ring->next_to_use &&
401 i == tx_ring->next_to_clean)
402 printk(KERN_CONT " NTC/U\n");
403 else if (i == tx_ring->next_to_use)
404 printk(KERN_CONT " NTU\n");
405 else if (i == tx_ring->next_to_clean)
406 printk(KERN_CONT " NTC\n");
407 else
408 printk(KERN_CONT "\n");
409
410 if (netif_msg_pktdata(adapter) &&
411 tx_buffer_info->dma != 0)
412 print_hex_dump(KERN_INFO, "",
413 DUMP_PREFIX_ADDRESS, 16, 1,
414 phys_to_virt(tx_buffer_info->dma),
415 tx_buffer_info->length, true);
416 }
417 }
418
419 /* Print RX Rings Summary */
420rx_ring_summary:
421 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
422 printk(KERN_INFO "Queue [NTU] [NTC]\n");
423 for (n = 0; n < adapter->num_rx_queues; n++) {
424 rx_ring = adapter->rx_ring[n];
425 printk(KERN_INFO "%5d %5X %5X\n", n,
426 rx_ring->next_to_use, rx_ring->next_to_clean);
427 }
428
429 /* Print RX Rings */
430 if (!netif_msg_rx_status(adapter))
431 goto exit;
432
433 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
434
435 /* Advanced Receive Descriptor (Read) Format
436 * 63 1 0
437 * +-----------------------------------------------------+
438 * 0 | Packet Buffer Address [63:1] |A0/NSE|
439 * +----------------------------------------------+------+
440 * 8 | Header Buffer Address [63:1] | DD |
441 * +-----------------------------------------------------+
442 *
443 *
444 * Advanced Receive Descriptor (Write-Back) Format
445 *
446 * 63 48 47 32 31 30 21 20 16 15 4 3 0
447 * +------------------------------------------------------+
448 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
449 * | Checksum Ident | | | | Type | Type |
450 * +------------------------------------------------------+
451 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
452 * +------------------------------------------------------+
453 * 63 48 47 32 31 20 19 0
454 */
455 for (n = 0; n < adapter->num_rx_queues; n++) {
456 rx_ring = adapter->rx_ring[n];
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
459 printk(KERN_INFO "------------------------------------\n");
460 printk(KERN_INFO "R [desc] [ PktBuf A0] "
461 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
462 "<-- Adv Rx Read format\n");
463 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
464 "[vl er S cks ln] ---------------- [bi->skb] "
465 "<-- Adv Rx Write-Back format\n");
466
467 for (i = 0; i < rx_ring->count; i++) {
468 rx_buffer_info = &rx_ring->rx_buffer_info[i];
31f05a2d 469 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
dcd79aeb
TI
470 u0 = (struct my_u0 *)rx_desc;
471 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
472 if (staterr & IXGBE_RXD_STAT_DD) {
473 /* Descriptor Done */
474 printk(KERN_INFO "RWB[0x%03X] %016llX "
475 "%016llX ---------------- %p", i,
476 le64_to_cpu(u0->a),
477 le64_to_cpu(u0->b),
478 rx_buffer_info->skb);
479 } else {
480 printk(KERN_INFO "R [0x%03X] %016llX "
481 "%016llX %016llX %p", i,
482 le64_to_cpu(u0->a),
483 le64_to_cpu(u0->b),
484 (u64)rx_buffer_info->dma,
485 rx_buffer_info->skb);
486
487 if (netif_msg_pktdata(adapter)) {
488 print_hex_dump(KERN_INFO, "",
489 DUMP_PREFIX_ADDRESS, 16, 1,
490 phys_to_virt(rx_buffer_info->dma),
491 rx_ring->rx_buf_len, true);
492
493 if (rx_ring->rx_buf_len
494 < IXGBE_RXBUFFER_2048)
495 print_hex_dump(KERN_INFO, "",
496 DUMP_PREFIX_ADDRESS, 16, 1,
497 phys_to_virt(
498 rx_buffer_info->page_dma +
499 rx_buffer_info->page_offset
500 ),
501 PAGE_SIZE/2, true);
502 }
503 }
504
505 if (i == rx_ring->next_to_use)
506 printk(KERN_CONT " NTU\n");
507 else if (i == rx_ring->next_to_clean)
508 printk(KERN_CONT " NTC\n");
509 else
510 printk(KERN_CONT "\n");
511
512 }
513 }
514
515exit:
516 return;
517}
518
5eba3699
AV
519static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
520{
521 u32 ctrl_ext;
522
523 /* Let firmware take over control of h/w */
524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
b4617240 526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699
AV
527}
528
529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
530{
531 u32 ctrl_ext;
532
533 /* Let firmware know the driver has taken over */
534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
b4617240 536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699 537}
9a799d71 538
e8e26350
PW
539/*
540 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
541 * @adapter: pointer to adapter struct
542 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
543 * @queue: queue to map the corresponding interrupt to
544 * @msix_vector: the vector to map to the corresponding queue
545 *
546 */
547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
548 u8 queue, u8 msix_vector)
9a799d71
AK
549{
550 u32 ivar, index;
e8e26350
PW
551 struct ixgbe_hw *hw = &adapter->hw;
552 switch (hw->mac.type) {
553 case ixgbe_mac_82598EB:
554 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
555 if (direction == -1)
556 direction = 0;
557 index = (((direction * 64) + queue) >> 2) & 0x1F;
558 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
559 ivar &= ~(0xFF << (8 * (queue & 0x3)));
560 ivar |= (msix_vector << (8 * (queue & 0x3)));
561 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
562 break;
563 case ixgbe_mac_82599EB:
564 if (direction == -1) {
565 /* other causes */
566 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
567 index = ((queue & 1) * 8);
568 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
569 ivar &= ~(0xFF << index);
570 ivar |= (msix_vector << index);
571 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
572 break;
573 } else {
574 /* tx or rx causes */
575 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
576 index = ((16 * (queue & 1)) + (8 * direction));
577 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
578 ivar &= ~(0xFF << index);
579 ivar |= (msix_vector << index);
580 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
581 break;
582 }
583 default:
584 break;
585 }
9a799d71
AK
586}
587
fe49f04a
AD
588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589 u64 qmask)
590{
591 u32 mask;
592
593 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
594 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
596 } else {
597 mask = (qmask & 0xFFFFFFFF);
598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
599 mask = (qmask >> 32);
600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
601 }
602}
603
84418e3b
AD
604void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
605 struct ixgbe_tx_buffer
606 *tx_buffer_info)
9a799d71 607{
e5a43549
AD
608 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page)
1b507730 610 dma_unmap_page(&adapter->pdev->dev,
e5a43549
AD
611 tx_buffer_info->dma,
612 tx_buffer_info->length,
1b507730 613 DMA_TO_DEVICE);
e5a43549 614 else
1b507730 615 dma_unmap_single(&adapter->pdev->dev,
e5a43549
AD
616 tx_buffer_info->dma,
617 tx_buffer_info->length,
1b507730 618 DMA_TO_DEVICE);
e5a43549
AD
619 tx_buffer_info->dma = 0;
620 }
9a799d71
AK
621 if (tx_buffer_info->skb) {
622 dev_kfree_skb_any(tx_buffer_info->skb);
623 tx_buffer_info->skb = NULL;
624 }
44df32c5 625 tx_buffer_info->time_stamp = 0;
9a799d71
AK
626 /* tx_buffer_info must be completely set up in the transmit path */
627}
628
26f23d82 629/**
7483d9dd 630 * ixgbe_tx_xon_state - check the tx ring xon state
26f23d82
YZ
631 * @adapter: the ixgbe adapter
632 * @tx_ring: the corresponding tx_ring
633 *
634 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
635 * corresponding TC of this tx_ring when checking TFCS.
636 *
7483d9dd 637 * Returns : true if in xon state (currently not paused)
26f23d82 638 */
7483d9dd 639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
26f23d82
YZ
640 struct ixgbe_ring *tx_ring)
641{
26f23d82
YZ
642 u32 txoff = IXGBE_TFCS_TXOFF;
643
644#ifdef CONFIG_IXGBE_DCB
ca739481 645 if (adapter->dcb_cfg.pfc_mode_enable) {
30b76832 646 int tc;
26f23d82
YZ
647 int reg_idx = tx_ring->reg_idx;
648 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
649
6837e895
PW
650 switch (adapter->hw.mac.type) {
651 case ixgbe_mac_82598EB:
26f23d82
YZ
652 tc = reg_idx >> 2;
653 txoff = IXGBE_TFCS_TXOFF0;
6837e895
PW
654 break;
655 case ixgbe_mac_82599EB:
26f23d82
YZ
656 tc = 0;
657 txoff = IXGBE_TFCS_TXOFF;
658 if (dcb_i == 8) {
659 /* TC0, TC1 */
660 tc = reg_idx >> 5;
661 if (tc == 2) /* TC2, TC3 */
662 tc += (reg_idx - 64) >> 4;
663 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
664 tc += 1 + ((reg_idx - 96) >> 3);
665 } else if (dcb_i == 4) {
666 /* TC0, TC1 */
667 tc = reg_idx >> 6;
668 if (tc == 1) {
669 tc += (reg_idx - 64) >> 5;
670 if (tc == 2) /* TC2, TC3 */
671 tc += (reg_idx - 96) >> 4;
672 }
673 }
6837e895
PW
674 break;
675 default:
676 tc = 0;
26f23d82
YZ
677 }
678 txoff <<= tc;
679 }
680#endif
681 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
682}
683
9a799d71 684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
b4617240
PW
685 struct ixgbe_ring *tx_ring,
686 unsigned int eop)
9a799d71 687{
e01c31a5 688 struct ixgbe_hw *hw = &adapter->hw;
e01c31a5 689
9a799d71 690 /* Detect a transmit hang in hardware, this serializes the
e01c31a5 691 * check with the clearing of time_stamp and movement of eop */
9a799d71 692 adapter->detect_tx_hung = false;
44df32c5 693 if (tx_ring->tx_buffer_info[eop].time_stamp &&
9a799d71 694 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
7483d9dd 695 ixgbe_tx_xon_state(adapter, tx_ring)) {
9a799d71 696 /* detected Tx unit hang */
e01c31a5 697 union ixgbe_adv_tx_desc *tx_desc;
31f05a2d 698 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
396e799c 699 e_err(drv, "Detected Tx Unit Hang\n"
849c4542
ET
700 " Tx Queue <%d>\n"
701 " TDH, TDT <%x>, <%x>\n"
702 " next_to_use <%x>\n"
703 " next_to_clean <%x>\n"
704 "tx_buffer_info[next_to_clean]\n"
705 " time_stamp <%lx>\n"
706 " jiffies <%lx>\n",
707 tx_ring->queue_index,
708 IXGBE_READ_REG(hw, tx_ring->head),
709 IXGBE_READ_REG(hw, tx_ring->tail),
710 tx_ring->next_to_use, eop,
711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
9a799d71
AK
712 return true;
713 }
714
715 return false;
716}
717
b4617240
PW
718#define IXGBE_MAX_TXD_PWR 14
719#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
e092be60
AV
720
721/* Tx Descriptors needed, worst case */
722#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
723 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
724#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
b4617240 725 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
e092be60 726
e01c31a5
JB
727static void ixgbe_tx_timeout(struct net_device *netdev);
728
9a799d71
AK
729/**
730 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
fe49f04a 731 * @q_vector: structure containing interrupt and ring information
e01c31a5 732 * @tx_ring: tx ring to clean
9a799d71 733 **/
fe49f04a 734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
e01c31a5 735 struct ixgbe_ring *tx_ring)
9a799d71 736{
fe49f04a 737 struct ixgbe_adapter *adapter = q_vector->adapter;
e01c31a5 738 struct net_device *netdev = adapter->netdev;
12207e49
PWJ
739 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
740 struct ixgbe_tx_buffer *tx_buffer_info;
741 unsigned int i, eop, count = 0;
e01c31a5 742 unsigned int total_bytes = 0, total_packets = 0;
9a799d71
AK
743
744 i = tx_ring->next_to_clean;
12207e49 745 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 746 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
747
748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
9a1a69ad 749 (count < tx_ring->work_limit)) {
12207e49 750 bool cleaned = false;
2d0bb1c1 751 rmb(); /* read buffer_info after eop_desc */
12207e49
PWJ
752 for ( ; !cleaned; count++) {
753 struct sk_buff *skb;
31f05a2d 754 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71 755 tx_buffer_info = &tx_ring->tx_buffer_info[i];
12207e49 756 cleaned = (i == eop);
e01c31a5 757 skb = tx_buffer_info->skb;
9a799d71 758
12207e49 759 if (cleaned && skb) {
e092be60 760 unsigned int segs, bytecount;
3d8fd385 761 unsigned int hlen = skb_headlen(skb);
e01c31a5
JB
762
763 /* gso_segs is currently only valid for tcp */
e092be60 764 segs = skb_shinfo(skb)->gso_segs ?: 1;
3d8fd385
YZ
765#ifdef IXGBE_FCOE
766 /* adjust for FCoE Sequence Offload */
767 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
768 && (skb->protocol == htons(ETH_P_FCOE)) &&
769 skb_is_gso(skb)) {
770 hlen = skb_transport_offset(skb) +
771 sizeof(struct fc_frame_header) +
772 sizeof(struct fcoe_crc_eof);
773 segs = DIV_ROUND_UP(skb->len - hlen,
774 skb_shinfo(skb)->gso_size);
775 }
776#endif /* IXGBE_FCOE */
e092be60 777 /* multiply data chunks by size of headers */
3d8fd385 778 bytecount = ((segs - 1) * hlen) + skb->len;
e01c31a5
JB
779 total_packets += segs;
780 total_bytes += bytecount;
e092be60 781 }
e01c31a5 782
9a799d71 783 ixgbe_unmap_and_free_tx_resource(adapter,
e01c31a5 784 tx_buffer_info);
9a799d71 785
12207e49
PWJ
786 tx_desc->wb.status = 0;
787
9a799d71
AK
788 i++;
789 if (i == tx_ring->count)
790 i = 0;
e01c31a5 791 }
12207e49
PWJ
792
793 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 794 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
795 }
796
9a799d71
AK
797 tx_ring->next_to_clean = i;
798
e092be60 799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
e01c31a5
JB
800 if (unlikely(count && netif_carrier_ok(netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
e092be60
AV
802 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean.
804 */
805 smp_mb();
30eba97a
AV
806 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
807 !test_bit(__IXGBE_DOWN, &adapter->state)) {
808 netif_wake_subqueue(netdev, tx_ring->queue_index);
7ca3bc58 809 ++tx_ring->restart_queue;
30eba97a 810 }
e092be60 811 }
9a799d71 812
e01c31a5
JB
813 if (adapter->detect_tx_hung) {
814 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
815 /* schedule immediate reset if we believe we hung */
396e799c
ET
816 e_info(probe, "tx hang %d detected, resetting "
817 "adapter\n", adapter->tx_timeout_count + 1);
e01c31a5
JB
818 ixgbe_tx_timeout(adapter->netdev);
819 }
820 }
9a799d71 821
e01c31a5 822 /* re-arm the interrupt */
fe49f04a
AD
823 if (count >= tx_ring->work_limit)
824 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
9a799d71 825
e01c31a5
JB
826 tx_ring->total_bytes += total_bytes;
827 tx_ring->total_packets += total_packets;
e01c31a5 828 tx_ring->stats.packets += total_packets;
12207e49 829 tx_ring->stats.bytes += total_bytes;
9a1a69ad 830 return (count < tx_ring->work_limit);
9a799d71
AK
831}
832
5dd2d332 833#ifdef CONFIG_IXGBE_DCA
bd0362dd 834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
b4617240 835 struct ixgbe_ring *rx_ring)
bd0362dd
JC
836{
837 u32 rxctrl;
838 int cpu = get_cpu();
4a0b9ca0 839 int q = rx_ring->reg_idx;
bd0362dd 840
3a581073 841 if (rx_ring->cpu != cpu) {
bd0362dd 842 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
e8e26350
PW
843 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
844 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
845 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
850 }
bd0362dd
JC
851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
15005a32
DS
853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
e8e26350 855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
bd0362dd 856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
3a581073 857 rx_ring->cpu = cpu;
bd0362dd
JC
858 }
859 put_cpu();
860}
861
862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
b4617240 863 struct ixgbe_ring *tx_ring)
bd0362dd
JC
864{
865 u32 txctrl;
866 int cpu = get_cpu();
4a0b9ca0 867 int q = tx_ring->reg_idx;
ee5f784a 868 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 869
3a581073 870 if (tx_ring->cpu != cpu) {
e8e26350 871 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
ee5f784a 872 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
e8e26350
PW
873 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
874 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
ee5f784a
DS
875 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
876 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
e8e26350 877 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
ee5f784a 878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
e8e26350
PW
879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
ee5f784a
DS
881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
e8e26350 884 }
3a581073 885 tx_ring->cpu = cpu;
bd0362dd
JC
886 }
887 put_cpu();
888}
889
890static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
891{
892 int i;
893
894 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
895 return;
896
e35ec126
AD
897 /* always use CB2 mode, difference is masked in the CB driver */
898 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
899
bd0362dd 900 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0
PW
901 adapter->tx_ring[i]->cpu = -1;
902 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
bd0362dd
JC
903 }
904 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0
PW
905 adapter->rx_ring[i]->cpu = -1;
906 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
bd0362dd
JC
907 }
908}
909
910static int __ixgbe_notify_dca(struct device *dev, void *data)
911{
912 struct net_device *netdev = dev_get_drvdata(dev);
913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
914 unsigned long event = *(unsigned long *)data;
915
916 switch (event) {
917 case DCA_PROVIDER_ADD:
96b0e0f6
JB
918 /* if we're already enabled, don't do it again */
919 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
920 break;
652f093f 921 if (dca_add_requester(dev) == 0) {
96b0e0f6 922 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
923 ixgbe_setup_dca(adapter);
924 break;
925 }
926 /* Fall Through since DCA is disabled. */
927 case DCA_PROVIDER_REMOVE:
928 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
929 dca_remove_requester(dev);
930 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
931 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
932 }
933 break;
934 }
935
652f093f 936 return 0;
bd0362dd
JC
937}
938
5dd2d332 939#endif /* CONFIG_IXGBE_DCA */
9a799d71
AK
940/**
941 * ixgbe_receive_skb - Send a completed packet up the stack
942 * @adapter: board private structure
943 * @skb: packet to send up
177db6ff
MC
944 * @status: hardware indication of status of receive
945 * @rx_ring: rx descriptor ring (for a specific queue) to setup
946 * @rx_desc: rx descriptor
9a799d71 947 **/
78b6f4ce 948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
b4617240 949 struct sk_buff *skb, u8 status,
fdaff1ce 950 struct ixgbe_ring *ring,
177db6ff 951 union ixgbe_adv_rx_desc *rx_desc)
9a799d71 952{
78b6f4ce
HX
953 struct ixgbe_adapter *adapter = q_vector->adapter;
954 struct napi_struct *napi = &q_vector->napi;
177db6ff
MC
955 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
9a799d71 957
182ff8df 958 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
8a62babf 959 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
78b6f4ce 960 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
9a799d71 961 else
78b6f4ce 962 napi_gro_receive(napi, skb);
177db6ff 963 } else {
8a62babf 964 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
182ff8df
AD
965 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
966 else
967 netif_rx(skb);
9a799d71
AK
968 }
969}
970
e59bd25d
AV
971/**
972 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
973 * @adapter: address of board private structure
974 * @status_err: hardware indication of status of receive
975 * @skb: skb currently being received and modified
976 **/
9a799d71 977static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
8bae1b2b
DS
978 union ixgbe_adv_rx_desc *rx_desc,
979 struct sk_buff *skb)
9a799d71 980{
8bae1b2b
DS
981 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
982
bc8acf2c 983 skb_checksum_none_assert(skb);
9a799d71 984
712744be
JB
985 /* Rx csum disabled */
986 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
9a799d71 987 return;
e59bd25d
AV
988
989 /* if IP and error */
990 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
991 (status_err & IXGBE_RXDADV_ERR_IPE)) {
9a799d71
AK
992 adapter->hw_csum_rx_error++;
993 return;
994 }
e59bd25d
AV
995
996 if (!(status_err & IXGBE_RXD_STAT_L4CS))
997 return;
998
999 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
8bae1b2b
DS
1000 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1001
1002 /*
1003 * 82599 errata, UDP frames with a 0 checksum can be marked as
1004 * checksum errors.
1005 */
1006 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1007 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1008 return;
1009
e59bd25d
AV
1010 adapter->hw_csum_rx_error++;
1011 return;
1012 }
1013
9a799d71 1014 /* It must be a TCP or UDP packet with a valid checksum */
e59bd25d 1015 skb->ip_summed = CHECKSUM_UNNECESSARY;
9a799d71
AK
1016}
1017
e8e26350
PW
1018static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1019 struct ixgbe_ring *rx_ring, u32 val)
1020{
1021 /*
1022 * Force memory writes to complete before letting h/w
1023 * know there are new descriptors to fetch. (Only
1024 * applicable for weak-ordered memory model archs,
1025 * such as IA-64).
1026 */
1027 wmb();
1028 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
1029}
1030
9a799d71
AK
1031/**
1032 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1033 * @adapter: address of board private structure
1034 **/
84418e3b
AD
1035void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1036 struct ixgbe_ring *rx_ring,
1037 int cleaned_count)
9a799d71 1038{
d716a7d8 1039 struct net_device *netdev = adapter->netdev;
9a799d71
AK
1040 struct pci_dev *pdev = adapter->pdev;
1041 union ixgbe_adv_rx_desc *rx_desc;
3a581073 1042 struct ixgbe_rx_buffer *bi;
9a799d71 1043 unsigned int i;
d716a7d8 1044 unsigned int bufsz = rx_ring->rx_buf_len;
9a799d71
AK
1045
1046 i = rx_ring->next_to_use;
3a581073 1047 bi = &rx_ring->rx_buffer_info[i];
9a799d71
AK
1048
1049 while (cleaned_count--) {
31f05a2d 1050 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1051
762f4c57 1052 if (!bi->page_dma &&
6e455b89 1053 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
3a581073 1054 if (!bi->page) {
d716a7d8 1055 bi->page = netdev_alloc_page(netdev);
762f4c57
JB
1056 if (!bi->page) {
1057 adapter->alloc_rx_page_failed++;
1058 goto no_buffers;
1059 }
1060 bi->page_offset = 0;
1061 } else {
1062 /* use a half page if we're re-using */
1063 bi->page_offset ^= (PAGE_SIZE / 2);
9a799d71 1064 }
762f4c57 1065
1b507730 1066 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
762f4c57
JB
1067 bi->page_offset,
1068 (PAGE_SIZE / 2),
1b507730 1069 DMA_FROM_DEVICE);
9a799d71
AK
1070 }
1071
3a581073 1072 if (!bi->skb) {
d716a7d8
AD
1073 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1074 bufsz);
1075 bi->skb = skb;
9a799d71
AK
1076
1077 if (!skb) {
1078 adapter->alloc_rx_buff_failed++;
1079 goto no_buffers;
1080 }
d716a7d8
AD
1081 /* initialize queue mapping */
1082 skb_record_rx_queue(skb, rx_ring->queue_index);
1083 }
9a799d71 1084
d716a7d8
AD
1085 if (!bi->dma) {
1086 bi->dma = dma_map_single(&pdev->dev,
1087 bi->skb->data,
4f57ca6e 1088 rx_ring->rx_buf_len,
1b507730 1089 DMA_FROM_DEVICE);
9a799d71
AK
1090 }
1091 /* Refresh the desc even if buffer_addrs didn't change because
1092 * each write-back erases this info. */
6e455b89 1093 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
3a581073
JB
1094 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9a799d71 1096 } else {
3a581073 1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
84418e3b 1098 rx_desc->read.hdr_addr = 0;
9a799d71
AK
1099 }
1100
1101 i++;
1102 if (i == rx_ring->count)
1103 i = 0;
3a581073 1104 bi = &rx_ring->rx_buffer_info[i];
9a799d71 1105 }
7c6e0a43 1106
9a799d71
AK
1107no_buffers:
1108 if (rx_ring->next_to_use != i) {
1109 rx_ring->next_to_use = i;
1110 if (i-- == 0)
1111 i = (rx_ring->count - 1);
1112
e8e26350 1113 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
9a799d71
AK
1114 }
1115}
1116
7c6e0a43
JB
1117static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
1118{
1119 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1120}
1121
1122static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1123{
1124 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1125}
1126
f8212f97
AD
1127static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1128{
1129 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1130 IXGBE_RXDADV_RSCCNT_MASK) >>
1131 IXGBE_RXDADV_RSCCNT_SHIFT;
1132}
1133
1134/**
1135 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1136 * @skb: pointer to the last skb in the rsc queue
94b982b2 1137 * @count: pointer to number of packets coalesced in this context
f8212f97
AD
1138 *
1139 * This function changes a queue full of hw rsc buffers into a completed
1140 * packet. It uses the ->prev pointers to find the first packet and then
1141 * turns it into the frag list owner.
1142 **/
94b982b2
MC
1143static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1144 u64 *count)
f8212f97
AD
1145{
1146 unsigned int frag_list_size = 0;
1147
1148 while (skb->prev) {
1149 struct sk_buff *prev = skb->prev;
1150 frag_list_size += skb->len;
1151 skb->prev = NULL;
1152 skb = prev;
94b982b2 1153 *count += 1;
f8212f97
AD
1154 }
1155
1156 skb_shinfo(skb)->frag_list = skb->next;
1157 skb->next = NULL;
1158 skb->len += frag_list_size;
1159 skb->data_len += frag_list_size;
1160 skb->truesize += frag_list_size;
1161 return skb;
1162}
1163
43634e82
MC
1164struct ixgbe_rsc_cb {
1165 dma_addr_t dma;
e8171aaa 1166 bool delay_unmap;
43634e82
MC
1167};
1168
1169#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1170
78b6f4ce 1171static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
b4617240
PW
1172 struct ixgbe_ring *rx_ring,
1173 int *work_done, int work_to_do)
9a799d71 1174{
78b6f4ce 1175 struct ixgbe_adapter *adapter = q_vector->adapter;
2d86f139 1176 struct net_device *netdev = adapter->netdev;
9a799d71
AK
1177 struct pci_dev *pdev = adapter->pdev;
1178 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1179 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1180 struct sk_buff *skb;
f8212f97 1181 unsigned int i, rsc_count = 0;
7c6e0a43 1182 u32 len, staterr;
177db6ff
MC
1183 u16 hdr_info;
1184 bool cleaned = false;
9a799d71 1185 int cleaned_count = 0;
d2f4fbe2 1186 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
3d8fd385
YZ
1187#ifdef IXGBE_FCOE
1188 int ddp_bytes = 0;
1189#endif /* IXGBE_FCOE */
9a799d71
AK
1190
1191 i = rx_ring->next_to_clean;
31f05a2d 1192 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71
AK
1193 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1194 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
1195
1196 while (staterr & IXGBE_RXD_STAT_DD) {
7c6e0a43 1197 u32 upper_len = 0;
9a799d71
AK
1198 if (*work_done >= work_to_do)
1199 break;
1200 (*work_done)++;
1201
3c945e5b 1202 rmb(); /* read descriptor and rx_buffer_info after status DD */
6e455b89 1203 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
7c6e0a43
JB
1204 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1205 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
762f4c57 1206 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
9a799d71 1207 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
0b746e08
SN
1208 if ((len > IXGBE_RX_HDR_SIZE) ||
1209 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1210 len = IXGBE_RX_HDR_SIZE;
7c6e0a43 1211 } else {
9a799d71 1212 len = le16_to_cpu(rx_desc->wb.upper.length);
7c6e0a43 1213 }
9a799d71
AK
1214
1215 cleaned = true;
1216 skb = rx_buffer_info->skb;
7ca3bc58 1217 prefetch(skb->data);
9a799d71
AK
1218 rx_buffer_info->skb = NULL;
1219
21fa4e66 1220 if (rx_buffer_info->dma) {
43634e82
MC
1221 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1222 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
e8171aaa 1223 (!(skb->prev))) {
43634e82
MC
1224 /*
1225 * When HWRSC is enabled, delay unmapping
1226 * of the first packet. It carries the
1227 * header information, HW may still
1228 * access the header after the writeback.
1229 * Only unmap it when EOP is reached
1230 */
e8171aaa 1231 IXGBE_RSC_CB(skb)->delay_unmap = true;
43634e82 1232 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
e8171aaa 1233 } else {
1b507730 1234 dma_unmap_single(&pdev->dev,
e8171aaa 1235 rx_buffer_info->dma,
43634e82 1236 rx_ring->rx_buf_len,
e8171aaa
MC
1237 DMA_FROM_DEVICE);
1238 }
4f57ca6e 1239 rx_buffer_info->dma = 0;
9a799d71
AK
1240 skb_put(skb, len);
1241 }
1242
1243 if (upper_len) {
1b507730
NN
1244 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1245 PAGE_SIZE / 2, DMA_FROM_DEVICE);
9a799d71
AK
1246 rx_buffer_info->page_dma = 0;
1247 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
762f4c57
JB
1248 rx_buffer_info->page,
1249 rx_buffer_info->page_offset,
1250 upper_len);
1251
1252 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1253 (page_count(rx_buffer_info->page) != 1))
1254 rx_buffer_info->page = NULL;
1255 else
1256 get_page(rx_buffer_info->page);
9a799d71
AK
1257
1258 skb->len += upper_len;
1259 skb->data_len += upper_len;
1260 skb->truesize += upper_len;
1261 }
1262
1263 i++;
1264 if (i == rx_ring->count)
1265 i = 0;
9a799d71 1266
31f05a2d 1267 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1268 prefetch(next_rxd);
9a799d71 1269 cleaned_count++;
f8212f97 1270
0c19d6af 1271 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
f8212f97
AD
1272 rsc_count = ixgbe_get_rsc_count(rx_desc);
1273
1274 if (rsc_count) {
1275 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1276 IXGBE_RXDADV_NEXTP_SHIFT;
1277 next_buffer = &rx_ring->rx_buffer_info[nextp];
f8212f97
AD
1278 } else {
1279 next_buffer = &rx_ring->rx_buffer_info[i];
1280 }
1281
9a799d71 1282 if (staterr & IXGBE_RXD_STAT_EOP) {
f8212f97 1283 if (skb->prev)
94b982b2
MC
1284 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
1285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
e8171aaa 1286 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1b507730
NN
1287 dma_unmap_single(&pdev->dev,
1288 IXGBE_RSC_CB(skb)->dma,
43634e82 1289 rx_ring->rx_buf_len,
1b507730 1290 DMA_FROM_DEVICE);
fd3686a8 1291 IXGBE_RSC_CB(skb)->dma = 0;
e8171aaa 1292 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 1293 }
94b982b2
MC
1294 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1295 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
1296 else
1297 rx_ring->rsc_count++;
1298 rx_ring->rsc_flush++;
1299 }
9a799d71
AK
1300 rx_ring->stats.packets++;
1301 rx_ring->stats.bytes += skb->len;
1302 } else {
6e455b89 1303 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
f8212f97
AD
1304 rx_buffer_info->skb = next_buffer->skb;
1305 rx_buffer_info->dma = next_buffer->dma;
1306 next_buffer->skb = skb;
1307 next_buffer->dma = 0;
1308 } else {
1309 skb->next = next_buffer->skb;
1310 skb->next->prev = skb;
1311 }
7ca3bc58 1312 rx_ring->non_eop_descs++;
9a799d71
AK
1313 goto next_desc;
1314 }
1315
1316 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1317 dev_kfree_skb_irq(skb);
1318 goto next_desc;
1319 }
1320
8bae1b2b 1321 ixgbe_rx_checksum(adapter, rx_desc, skb);
d2f4fbe2
AV
1322
1323 /* probably a little skewed due to removing CRC */
1324 total_rx_bytes += skb->len;
1325 total_rx_packets++;
1326
74ce8dd2 1327 skb->protocol = eth_type_trans(skb, adapter->netdev);
332d4a7d
YZ
1328#ifdef IXGBE_FCOE
1329 /* if ddp, not passing to ULD unless for FCP_RSP or error */
3d8fd385
YZ
1330 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1331 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1332 if (!ddp_bytes)
332d4a7d 1333 goto next_desc;
3d8fd385 1334 }
332d4a7d 1335#endif /* IXGBE_FCOE */
fdaff1ce 1336 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
9a799d71
AK
1337
1338next_desc:
1339 rx_desc->wb.upper.status_error = 0;
1340
1341 /* return some buffers to hardware, one at a time is too slow */
1342 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1343 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1344 cleaned_count = 0;
1345 }
1346
1347 /* use prefetched values */
1348 rx_desc = next_rxd;
f8212f97 1349 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
1350
1351 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
177db6ff
MC
1352 }
1353
9a799d71
AK
1354 rx_ring->next_to_clean = i;
1355 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1356
1357 if (cleaned_count)
1358 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1359
3d8fd385
YZ
1360#ifdef IXGBE_FCOE
1361 /* include DDPed FCoE data */
1362 if (ddp_bytes > 0) {
1363 unsigned int mss;
1364
1365 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
1366 sizeof(struct fc_frame_header) -
1367 sizeof(struct fcoe_crc_eof);
1368 if (mss > 512)
1369 mss &= ~511;
1370 total_rx_bytes += ddp_bytes;
1371 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1372 }
1373#endif /* IXGBE_FCOE */
1374
f494e8fa
AV
1375 rx_ring->total_packets += total_rx_packets;
1376 rx_ring->total_bytes += total_rx_bytes;
2d86f139
AK
1377 netdev->stats.rx_bytes += total_rx_bytes;
1378 netdev->stats.rx_packets += total_rx_packets;
f494e8fa 1379
9a799d71
AK
1380 return cleaned;
1381}
1382
021230d4 1383static int ixgbe_clean_rxonly(struct napi_struct *, int);
9a799d71
AK
1384/**
1385 * ixgbe_configure_msix - Configure MSI-X hardware
1386 * @adapter: board private structure
1387 *
1388 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1389 * interrupts.
1390 **/
1391static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1392{
021230d4
AV
1393 struct ixgbe_q_vector *q_vector;
1394 int i, j, q_vectors, v_idx, r_idx;
1395 u32 mask;
9a799d71 1396
021230d4 1397 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71 1398
4df10466
JB
1399 /*
1400 * Populate the IVAR table and set the ITR values to the
021230d4
AV
1401 * corresponding register.
1402 */
1403 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
7a921c93 1404 q_vector = adapter->q_vector[v_idx];
984b3f57 1405 /* XXX for_each_set_bit(...) */
021230d4 1406 r_idx = find_first_bit(q_vector->rxr_idx,
b4617240 1407 adapter->num_rx_queues);
021230d4
AV
1408
1409 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1410 j = adapter->rx_ring[r_idx]->reg_idx;
e8e26350 1411 ixgbe_set_ivar(adapter, 0, j, v_idx);
021230d4 1412 r_idx = find_next_bit(q_vector->rxr_idx,
b4617240
PW
1413 adapter->num_rx_queues,
1414 r_idx + 1);
021230d4
AV
1415 }
1416 r_idx = find_first_bit(q_vector->txr_idx,
b4617240 1417 adapter->num_tx_queues);
021230d4
AV
1418
1419 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1420 j = adapter->tx_ring[r_idx]->reg_idx;
e8e26350 1421 ixgbe_set_ivar(adapter, 1, j, v_idx);
021230d4 1422 r_idx = find_next_bit(q_vector->txr_idx,
b4617240
PW
1423 adapter->num_tx_queues,
1424 r_idx + 1);
021230d4
AV
1425 }
1426
021230d4 1427 if (q_vector->txr_count && !q_vector->rxr_count)
f7554a2b
NS
1428 /* tx only */
1429 q_vector->eitr = adapter->tx_eitr_param;
509ee935 1430 else if (q_vector->rxr_count)
f7554a2b
NS
1431 /* rx or mixed */
1432 q_vector->eitr = adapter->rx_eitr_param;
021230d4 1433
fe49f04a 1434 ixgbe_write_eitr(q_vector);
9a799d71
AK
1435 }
1436
e8e26350
PW
1437 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1438 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1439 v_idx);
1440 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1441 ixgbe_set_ivar(adapter, -1, 1, v_idx);
021230d4
AV
1442 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1443
41fb9248 1444 /* set up to autoclear timer, and the vectors */
021230d4 1445 mask = IXGBE_EIMS_ENABLE_MASK;
1cdd1ec8
GR
1446 if (adapter->num_vfs)
1447 mask &= ~(IXGBE_EIMS_OTHER |
1448 IXGBE_EIMS_MAILBOX |
1449 IXGBE_EIMS_LSC);
1450 else
1451 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
021230d4 1452 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
9a799d71
AK
1453}
1454
f494e8fa
AV
1455enum latency_range {
1456 lowest_latency = 0,
1457 low_latency = 1,
1458 bulk_latency = 2,
1459 latency_invalid = 255
1460};
1461
1462/**
1463 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1464 * @adapter: pointer to adapter
1465 * @eitr: eitr setting (ints per sec) to give last timeslice
1466 * @itr_setting: current throttle rate in ints/second
1467 * @packets: the number of packets during this measurement interval
1468 * @bytes: the number of bytes during this measurement interval
1469 *
1470 * Stores a new ITR value based on packets and byte
1471 * counts during the last interrupt. The advantage of per interrupt
1472 * computation is faster updates and more accurate ITR for the current
1473 * traffic pattern. Constants in this function were computed
1474 * based on theoretical maximum wire speed and thresholds were set based
1475 * on testing data as well as attempting to minimize response time
1476 * while increasing bulk throughput.
1477 * this functionality is controlled by the InterruptThrottleRate module
1478 * parameter (see ixgbe_param.c)
1479 **/
1480static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
b4617240
PW
1481 u32 eitr, u8 itr_setting,
1482 int packets, int bytes)
f494e8fa
AV
1483{
1484 unsigned int retval = itr_setting;
1485 u32 timepassed_us;
1486 u64 bytes_perint;
1487
1488 if (packets == 0)
1489 goto update_itr_done;
1490
1491
1492 /* simple throttlerate management
1493 * 0-20MB/s lowest (100000 ints/s)
1494 * 20-100MB/s low (20000 ints/s)
1495 * 100-1249MB/s bulk (8000 ints/s)
1496 */
1497 /* what was last interrupt timeslice? */
1498 timepassed_us = 1000000/eitr;
1499 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1500
1501 switch (itr_setting) {
1502 case lowest_latency:
1503 if (bytes_perint > adapter->eitr_low)
1504 retval = low_latency;
1505 break;
1506 case low_latency:
1507 if (bytes_perint > adapter->eitr_high)
1508 retval = bulk_latency;
1509 else if (bytes_perint <= adapter->eitr_low)
1510 retval = lowest_latency;
1511 break;
1512 case bulk_latency:
1513 if (bytes_perint <= adapter->eitr_high)
1514 retval = low_latency;
1515 break;
1516 }
1517
1518update_itr_done:
1519 return retval;
1520}
1521
509ee935
JB
1522/**
1523 * ixgbe_write_eitr - write EITR register in hardware specific way
fe49f04a 1524 * @q_vector: structure containing interrupt and ring information
509ee935
JB
1525 *
1526 * This function is made to be called by ethtool and by the driver
1527 * when it needs to update EITR registers at runtime. Hardware
1528 * specific quirks/differences are taken care of here.
1529 */
fe49f04a 1530void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
509ee935 1531{
fe49f04a 1532 struct ixgbe_adapter *adapter = q_vector->adapter;
509ee935 1533 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
1534 int v_idx = q_vector->v_idx;
1535 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1536
509ee935
JB
1537 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1538 /* must write high and low 16 bits to reset counter */
1539 itr_reg |= (itr_reg << 16);
1540 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
f8d1dcaf
JB
1541 /*
1542 * 82599 can support a value of zero, so allow it for
1543 * max interrupt rate, but there is an errata where it can
1544 * not be zero with RSC
1545 */
1546 if (itr_reg == 8 &&
1547 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1548 itr_reg = 0;
1549
509ee935
JB
1550 /*
1551 * set the WDIS bit to not clear the timer bits and cause an
1552 * immediate assertion of the interrupt
1553 */
1554 itr_reg |= IXGBE_EITR_CNT_WDIS;
1555 }
1556 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1557}
1558
f494e8fa
AV
1559static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1560{
1561 struct ixgbe_adapter *adapter = q_vector->adapter;
f494e8fa
AV
1562 u32 new_itr;
1563 u8 current_itr, ret_itr;
fe49f04a 1564 int i, r_idx;
f494e8fa
AV
1565 struct ixgbe_ring *rx_ring, *tx_ring;
1566
1567 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1568 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1569 tx_ring = adapter->tx_ring[r_idx];
f494e8fa 1570 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
b4617240
PW
1571 q_vector->tx_itr,
1572 tx_ring->total_packets,
1573 tx_ring->total_bytes);
f494e8fa
AV
1574 /* if the result for this queue would decrease interrupt
1575 * rate for this vector then use that result */
30efa5a3 1576 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
b4617240 1577 q_vector->tx_itr - 1 : ret_itr);
f494e8fa 1578 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
b4617240 1579 r_idx + 1);
f494e8fa
AV
1580 }
1581
1582 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1583 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1584 rx_ring = adapter->rx_ring[r_idx];
f494e8fa 1585 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
b4617240
PW
1586 q_vector->rx_itr,
1587 rx_ring->total_packets,
1588 rx_ring->total_bytes);
f494e8fa
AV
1589 /* if the result for this queue would decrease interrupt
1590 * rate for this vector then use that result */
30efa5a3 1591 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
b4617240 1592 q_vector->rx_itr - 1 : ret_itr);
f494e8fa 1593 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
b4617240 1594 r_idx + 1);
f494e8fa
AV
1595 }
1596
30efa5a3 1597 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1598
1599 switch (current_itr) {
1600 /* counts and packets in update_itr are dependent on these numbers */
1601 case lowest_latency:
1602 new_itr = 100000;
1603 break;
1604 case low_latency:
1605 new_itr = 20000; /* aka hwitr = ~200 */
1606 break;
1607 case bulk_latency:
1608 default:
1609 new_itr = 8000;
1610 break;
1611 }
1612
1613 if (new_itr != q_vector->eitr) {
fe49f04a
AD
1614 /* do an exponential smoothing */
1615 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
1616
1617 /* save the algorithm value here, not the smoothed one */
1618 q_vector->eitr = new_itr;
fe49f04a
AD
1619
1620 ixgbe_write_eitr(q_vector);
f494e8fa 1621 }
f494e8fa
AV
1622}
1623
119fc60a
MC
1624/**
1625 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1626 * @work: pointer to work_struct containing our data
1627 **/
1628static void ixgbe_check_overtemp_task(struct work_struct *work)
1629{
1630 struct ixgbe_adapter *adapter = container_of(work,
1631 struct ixgbe_adapter,
1632 check_overtemp_task);
1633 struct ixgbe_hw *hw = &adapter->hw;
1634 u32 eicr = adapter->interrupt_event;
1635
1636 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
1637 switch (hw->device_id) {
1638 case IXGBE_DEV_ID_82599_T3_LOM: {
1639 u32 autoneg;
1640 bool link_up = false;
1641
1642 if (hw->mac.ops.check_link)
1643 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1644
1645 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1646 (eicr & IXGBE_EICR_LSC))
1647 /* Check if this is due to overtemp */
1648 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1649 break;
1650 }
1651 return;
1652 default:
1653 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1654 return;
1655 break;
1656 }
396e799c
ET
1657 e_crit(drv, "Network adapter has been stopped because it has "
1658 "over heated. Restart the computer. If the problem "
849c4542
ET
1659 "persists, power off the system and replace the "
1660 "adapter\n");
119fc60a
MC
1661 /* write to clear the interrupt */
1662 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1663 }
1664}
1665
0befdb3e
JB
1666static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1667{
1668 struct ixgbe_hw *hw = &adapter->hw;
1669
1670 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1671 (eicr & IXGBE_EICR_GPI_SDP1)) {
396e799c 1672 e_crit(probe, "Fan has stopped, replace the adapter\n");
0befdb3e
JB
1673 /* write to clear the interrupt */
1674 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1675 }
1676}
cf8280ee 1677
e8e26350
PW
1678static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1679{
1680 struct ixgbe_hw *hw = &adapter->hw;
1681
1682 if (eicr & IXGBE_EICR_GPI_SDP1) {
1683 /* Clear the interrupt */
1684 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1685 schedule_work(&adapter->multispeed_fiber_task);
1686 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1687 /* Clear the interrupt */
1688 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1689 schedule_work(&adapter->sfp_config_module_task);
1690 } else {
1691 /* Interrupt isn't for us... */
1692 return;
1693 }
1694}
1695
cf8280ee
JB
1696static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1697{
1698 struct ixgbe_hw *hw = &adapter->hw;
1699
1700 adapter->lsc_int++;
1701 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1702 adapter->link_check_timeout = jiffies;
1703 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1704 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
8a0717f3 1705 IXGBE_WRITE_FLUSH(hw);
cf8280ee
JB
1706 schedule_work(&adapter->watchdog_task);
1707 }
1708}
1709
9a799d71
AK
1710static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1711{
1712 struct net_device *netdev = data;
1713 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1714 struct ixgbe_hw *hw = &adapter->hw;
54037505
DS
1715 u32 eicr;
1716
1717 /*
1718 * Workaround for Silicon errata. Use clear-by-write instead
1719 * of clear-by-read. Reading with EICS will return the
1720 * interrupt causes without clearing, which later be done
1721 * with the write to EICR.
1722 */
1723 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1724 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
9a799d71 1725
cf8280ee
JB
1726 if (eicr & IXGBE_EICR_LSC)
1727 ixgbe_check_lsc(adapter);
d4f80882 1728
1cdd1ec8
GR
1729 if (eicr & IXGBE_EICR_MAILBOX)
1730 ixgbe_msg_task(adapter);
1731
e8e26350
PW
1732 if (hw->mac.type == ixgbe_mac_82598EB)
1733 ixgbe_check_fan_failure(adapter, eicr);
0befdb3e 1734
c4cf55e5 1735 if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 1736 ixgbe_check_sfp_event(adapter, eicr);
119fc60a
MC
1737 adapter->interrupt_event = eicr;
1738 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1739 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1740 schedule_work(&adapter->check_overtemp_task);
c4cf55e5
PWJ
1741
1742 /* Handle Flow Director Full threshold interrupt */
1743 if (eicr & IXGBE_EICR_FLOW_DIR) {
1744 int i;
1745 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1746 /* Disable transmits before FDIR Re-initialization */
1747 netif_tx_stop_all_queues(netdev);
1748 for (i = 0; i < adapter->num_tx_queues; i++) {
1749 struct ixgbe_ring *tx_ring =
4a0b9ca0 1750 adapter->tx_ring[i];
c4cf55e5
PWJ
1751 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1752 &tx_ring->reinit_state))
1753 schedule_work(&adapter->fdir_reinit_task);
1754 }
1755 }
1756 }
d4f80882
AV
1757 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1758 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
9a799d71
AK
1759
1760 return IRQ_HANDLED;
1761}
1762
fe49f04a
AD
1763static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1764 u64 qmask)
1765{
1766 u32 mask;
1767
1768 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1769 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1770 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1771 } else {
1772 mask = (qmask & 0xFFFFFFFF);
1773 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1774 mask = (qmask >> 32);
1775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1776 }
1777 /* skip the flush */
1778}
1779
1780static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1781 u64 qmask)
1782{
1783 u32 mask;
1784
1785 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1786 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1788 } else {
1789 mask = (qmask & 0xFFFFFFFF);
1790 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1791 mask = (qmask >> 32);
1792 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1793 }
1794 /* skip the flush */
1795}
1796
9a799d71
AK
1797static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1798{
021230d4
AV
1799 struct ixgbe_q_vector *q_vector = data;
1800 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1801 struct ixgbe_ring *tx_ring;
021230d4
AV
1802 int i, r_idx;
1803
1804 if (!q_vector->txr_count)
1805 return IRQ_HANDLED;
1806
1807 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1808 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1809 tx_ring = adapter->tx_ring[r_idx];
3a581073
JB
1810 tx_ring->total_bytes = 0;
1811 tx_ring->total_packets = 0;
021230d4 1812 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
b4617240 1813 r_idx + 1);
021230d4 1814 }
9a799d71 1815
9b471446 1816 /* EIAM disabled interrupts (on this vector) for us */
91281fd3
AD
1817 napi_schedule(&q_vector->napi);
1818
9a799d71
AK
1819 return IRQ_HANDLED;
1820}
1821
021230d4
AV
1822/**
1823 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1824 * @irq: unused
1825 * @data: pointer to our q_vector struct for this interrupt vector
1826 **/
9a799d71
AK
1827static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1828{
021230d4
AV
1829 struct ixgbe_q_vector *q_vector = data;
1830 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1831 struct ixgbe_ring *rx_ring;
021230d4 1832 int r_idx;
30efa5a3 1833 int i;
021230d4
AV
1834
1835 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
30efa5a3 1836 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1837 rx_ring = adapter->rx_ring[r_idx];
30efa5a3
JB
1838 rx_ring->total_bytes = 0;
1839 rx_ring->total_packets = 0;
1840 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1841 r_idx + 1);
1842 }
1843
021230d4
AV
1844 if (!q_vector->rxr_count)
1845 return IRQ_HANDLED;
1846
021230d4 1847 /* disable interrupts on this vector only */
9b471446 1848 /* EIAM disabled interrupts (on this vector) for us */
288379f0 1849 napi_schedule(&q_vector->napi);
021230d4
AV
1850
1851 return IRQ_HANDLED;
1852}
1853
1854static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1855{
91281fd3
AD
1856 struct ixgbe_q_vector *q_vector = data;
1857 struct ixgbe_adapter *adapter = q_vector->adapter;
1858 struct ixgbe_ring *ring;
1859 int r_idx;
1860 int i;
1861
1862 if (!q_vector->txr_count && !q_vector->rxr_count)
1863 return IRQ_HANDLED;
1864
1865 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1866 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1867 ring = adapter->tx_ring[r_idx];
91281fd3
AD
1868 ring->total_bytes = 0;
1869 ring->total_packets = 0;
1870 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1871 r_idx + 1);
1872 }
1873
1874 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1875 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1876 ring = adapter->rx_ring[r_idx];
91281fd3
AD
1877 ring->total_bytes = 0;
1878 ring->total_packets = 0;
1879 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1880 r_idx + 1);
1881 }
1882
9b471446 1883 /* EIAM disabled interrupts (on this vector) for us */
91281fd3 1884 napi_schedule(&q_vector->napi);
9a799d71 1885
9a799d71
AK
1886 return IRQ_HANDLED;
1887}
1888
021230d4
AV
1889/**
1890 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1891 * @napi: napi struct with our devices info in it
1892 * @budget: amount of work driver is allowed to do this pass, in packets
1893 *
f0848276
JB
1894 * This function is optimized for cleaning one queue only on a single
1895 * q_vector!!!
021230d4 1896 **/
9a799d71
AK
1897static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1898{
021230d4 1899 struct ixgbe_q_vector *q_vector =
b4617240 1900 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 1901 struct ixgbe_adapter *adapter = q_vector->adapter;
f0848276 1902 struct ixgbe_ring *rx_ring = NULL;
9a799d71 1903 int work_done = 0;
021230d4 1904 long r_idx;
9a799d71 1905
021230d4 1906 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 1907 rx_ring = adapter->rx_ring[r_idx];
5dd2d332 1908#ifdef CONFIG_IXGBE_DCA
bd0362dd 1909 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3a581073 1910 ixgbe_update_rx_dca(adapter, rx_ring);
bd0362dd 1911#endif
9a799d71 1912
78b6f4ce 1913 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
9a799d71 1914
021230d4
AV
1915 /* If all Rx work done, exit the polling mode */
1916 if (work_done < budget) {
288379f0 1917 napi_complete(napi);
f7554a2b 1918 if (adapter->rx_itr_setting & 1)
f494e8fa 1919 ixgbe_set_itr_msix(q_vector);
9a799d71 1920 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a
AD
1921 ixgbe_irq_enable_queues(adapter,
1922 ((u64)1 << q_vector->v_idx));
9a799d71
AK
1923 }
1924
1925 return work_done;
1926}
1927
f0848276 1928/**
91281fd3 1929 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
f0848276
JB
1930 * @napi: napi struct with our devices info in it
1931 * @budget: amount of work driver is allowed to do this pass, in packets
1932 *
1933 * This function will clean more than one rx queue associated with a
1934 * q_vector.
1935 **/
91281fd3 1936static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
f0848276
JB
1937{
1938 struct ixgbe_q_vector *q_vector =
1939 container_of(napi, struct ixgbe_q_vector, napi);
1940 struct ixgbe_adapter *adapter = q_vector->adapter;
91281fd3 1941 struct ixgbe_ring *ring = NULL;
f0848276
JB
1942 int work_done = 0, i;
1943 long r_idx;
91281fd3
AD
1944 bool tx_clean_complete = true;
1945
1946 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1947 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 1948 ring = adapter->tx_ring[r_idx];
91281fd3
AD
1949#ifdef CONFIG_IXGBE_DCA
1950 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1951 ixgbe_update_tx_dca(adapter, ring);
1952#endif
1953 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1954 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1955 r_idx + 1);
1956 }
f0848276
JB
1957
1958 /* attempt to distribute budget to each queue fairly, but don't allow
1959 * the budget to go below 1 because we'll exit polling */
1960 budget /= (q_vector->rxr_count ?: 1);
1961 budget = max(budget, 1);
1962 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1963 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 1964 ring = adapter->rx_ring[r_idx];
5dd2d332 1965#ifdef CONFIG_IXGBE_DCA
f0848276 1966 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
91281fd3 1967 ixgbe_update_rx_dca(adapter, ring);
f0848276 1968#endif
91281fd3 1969 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
f0848276
JB
1970 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1971 r_idx + 1);
1972 }
1973
1974 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 1975 ring = adapter->rx_ring[r_idx];
f0848276 1976 /* If all Rx work done, exit the polling mode */
7f821875 1977 if (work_done < budget) {
288379f0 1978 napi_complete(napi);
f7554a2b 1979 if (adapter->rx_itr_setting & 1)
f0848276
JB
1980 ixgbe_set_itr_msix(q_vector);
1981 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a
AD
1982 ixgbe_irq_enable_queues(adapter,
1983 ((u64)1 << q_vector->v_idx));
f0848276
JB
1984 return 0;
1985 }
1986
1987 return work_done;
1988}
91281fd3
AD
1989
1990/**
1991 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1992 * @napi: napi struct with our devices info in it
1993 * @budget: amount of work driver is allowed to do this pass, in packets
1994 *
1995 * This function is optimized for cleaning one queue only on a single
1996 * q_vector!!!
1997 **/
1998static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1999{
2000 struct ixgbe_q_vector *q_vector =
2001 container_of(napi, struct ixgbe_q_vector, napi);
2002 struct ixgbe_adapter *adapter = q_vector->adapter;
2003 struct ixgbe_ring *tx_ring = NULL;
2004 int work_done = 0;
2005 long r_idx;
2006
2007 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
4a0b9ca0 2008 tx_ring = adapter->tx_ring[r_idx];
91281fd3
AD
2009#ifdef CONFIG_IXGBE_DCA
2010 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2011 ixgbe_update_tx_dca(adapter, tx_ring);
2012#endif
2013
2014 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2015 work_done = budget;
2016
f7554a2b 2017 /* If all Tx work done, exit the polling mode */
91281fd3
AD
2018 if (work_done < budget) {
2019 napi_complete(napi);
f7554a2b 2020 if (adapter->tx_itr_setting & 1)
91281fd3
AD
2021 ixgbe_set_itr_msix(q_vector);
2022 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2023 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2024 }
2025
2026 return work_done;
2027}
2028
021230d4 2029static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
b4617240 2030 int r_idx)
021230d4 2031{
7a921c93
AD
2032 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2033
2034 set_bit(r_idx, q_vector->rxr_idx);
2035 q_vector->rxr_count++;
021230d4
AV
2036}
2037
2038static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
7a921c93 2039 int t_idx)
021230d4 2040{
7a921c93
AD
2041 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2042
2043 set_bit(t_idx, q_vector->txr_idx);
2044 q_vector->txr_count++;
021230d4
AV
2045}
2046
9a799d71 2047/**
021230d4
AV
2048 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2049 * @adapter: board private structure to initialize
2050 * @vectors: allotted vector count for descriptor rings
9a799d71 2051 *
021230d4
AV
2052 * This function maps descriptor rings to the queue-specific vectors
2053 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2054 * one vector per ring/queue, but on a constrained vector budget, we
2055 * group the rings as "efficiently" as possible. You would add new
2056 * mapping configurations in here.
9a799d71 2057 **/
021230d4 2058static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
b4617240 2059 int vectors)
021230d4
AV
2060{
2061 int v_start = 0;
2062 int rxr_idx = 0, txr_idx = 0;
2063 int rxr_remaining = adapter->num_rx_queues;
2064 int txr_remaining = adapter->num_tx_queues;
2065 int i, j;
2066 int rqpv, tqpv;
2067 int err = 0;
2068
2069 /* No mapping required if MSI-X is disabled. */
2070 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2071 goto out;
9a799d71 2072
021230d4
AV
2073 /*
2074 * The ideal configuration...
2075 * We have enough vectors to map one per queue.
2076 */
2077 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2078 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2079 map_vector_to_rxq(adapter, v_start, rxr_idx);
9a799d71 2080
021230d4
AV
2081 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2082 map_vector_to_txq(adapter, v_start, txr_idx);
9a799d71 2083
9a799d71 2084 goto out;
021230d4 2085 }
9a799d71 2086
021230d4
AV
2087 /*
2088 * If we don't have enough vectors for a 1-to-1
2089 * mapping, we'll have to group them so there are
2090 * multiple queues per vector.
2091 */
2092 /* Re-adjusting *qpv takes care of the remainder. */
2093 for (i = v_start; i < vectors; i++) {
2094 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
2095 for (j = 0; j < rqpv; j++) {
2096 map_vector_to_rxq(adapter, i, rxr_idx);
2097 rxr_idx++;
2098 rxr_remaining--;
2099 }
2100 }
2101 for (i = v_start; i < vectors; i++) {
2102 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2103 for (j = 0; j < tqpv; j++) {
2104 map_vector_to_txq(adapter, i, txr_idx);
2105 txr_idx++;
2106 txr_remaining--;
9a799d71 2107 }
9a799d71
AK
2108 }
2109
021230d4
AV
2110out:
2111 return err;
2112}
2113
2114/**
2115 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2116 * @adapter: board private structure
2117 *
2118 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2119 * interrupts from the kernel.
2120 **/
2121static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2122{
2123 struct net_device *netdev = adapter->netdev;
2124 irqreturn_t (*handler)(int, void *);
2125 int i, vector, q_vectors, err;
cb13fc20 2126 int ri=0, ti=0;
021230d4
AV
2127
2128 /* Decrement for Other and TCP Timer vectors */
2129 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2130
2131 /* Map the Tx/Rx rings to the vectors we were allotted. */
2132 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2133 if (err)
2134 goto out;
2135
2136#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
b4617240
PW
2137 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2138 &ixgbe_msix_clean_many)
021230d4 2139 for (vector = 0; vector < q_vectors; vector++) {
7a921c93 2140 handler = SET_HANDLER(adapter->q_vector[vector]);
cb13fc20
RO
2141
2142 if(handler == &ixgbe_msix_clean_rx) {
2143 sprintf(adapter->name[vector], "%s-%s-%d",
2144 netdev->name, "rx", ri++);
2145 }
2146 else if(handler == &ixgbe_msix_clean_tx) {
2147 sprintf(adapter->name[vector], "%s-%s-%d",
2148 netdev->name, "tx", ti++);
2149 }
2150 else
2151 sprintf(adapter->name[vector], "%s-%s-%d",
2152 netdev->name, "TxRx", vector);
2153
021230d4 2154 err = request_irq(adapter->msix_entries[vector].vector,
b4617240 2155 handler, 0, adapter->name[vector],
7a921c93 2156 adapter->q_vector[vector]);
9a799d71 2157 if (err) {
396e799c 2158 e_err(probe, "request_irq failed for MSIX interrupt "
849c4542 2159 "Error: %d\n", err);
021230d4 2160 goto free_queue_irqs;
9a799d71 2161 }
9a799d71
AK
2162 }
2163
021230d4
AV
2164 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2165 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2166 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
9a799d71 2167 if (err) {
396e799c 2168 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
021230d4 2169 goto free_queue_irqs;
9a799d71
AK
2170 }
2171
9a799d71
AK
2172 return 0;
2173
021230d4
AV
2174free_queue_irqs:
2175 for (i = vector - 1; i >= 0; i--)
2176 free_irq(adapter->msix_entries[--vector].vector,
7a921c93 2177 adapter->q_vector[i]);
021230d4
AV
2178 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2179 pci_disable_msix(adapter->pdev);
9a799d71
AK
2180 kfree(adapter->msix_entries);
2181 adapter->msix_entries = NULL;
021230d4 2182out:
9a799d71
AK
2183 return err;
2184}
2185
f494e8fa
AV
2186static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2187{
7a921c93 2188 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
f494e8fa
AV
2189 u8 current_itr;
2190 u32 new_itr = q_vector->eitr;
4a0b9ca0
PW
2191 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2192 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
f494e8fa 2193
30efa5a3 2194 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
b4617240
PW
2195 q_vector->tx_itr,
2196 tx_ring->total_packets,
2197 tx_ring->total_bytes);
30efa5a3 2198 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
b4617240
PW
2199 q_vector->rx_itr,
2200 rx_ring->total_packets,
2201 rx_ring->total_bytes);
f494e8fa 2202
30efa5a3 2203 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
2204
2205 switch (current_itr) {
2206 /* counts and packets in update_itr are dependent on these numbers */
2207 case lowest_latency:
2208 new_itr = 100000;
2209 break;
2210 case low_latency:
2211 new_itr = 20000; /* aka hwitr = ~200 */
2212 break;
2213 case bulk_latency:
2214 new_itr = 8000;
2215 break;
2216 default:
2217 break;
2218 }
2219
2220 if (new_itr != q_vector->eitr) {
fe49f04a
AD
2221 /* do an exponential smoothing */
2222 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
2223
2224 /* save the algorithm value here, not the smoothed one */
2225 q_vector->eitr = new_itr;
fe49f04a
AD
2226
2227 ixgbe_write_eitr(q_vector);
f494e8fa 2228 }
f494e8fa
AV
2229}
2230
79aefa45
AD
2231/**
2232 * ixgbe_irq_enable - Enable default interrupt generation settings
2233 * @adapter: board private structure
2234 **/
2235static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
2236{
2237 u32 mask;
835462fc
NS
2238
2239 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
119fc60a
MC
2240 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2241 mask |= IXGBE_EIMS_GPI_SDP0;
6ab33d51
DM
2242 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2243 mask |= IXGBE_EIMS_GPI_SDP1;
e8e26350 2244 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2a41ff81 2245 mask |= IXGBE_EIMS_ECC;
e8e26350
PW
2246 mask |= IXGBE_EIMS_GPI_SDP1;
2247 mask |= IXGBE_EIMS_GPI_SDP2;
1cdd1ec8
GR
2248 if (adapter->num_vfs)
2249 mask |= IXGBE_EIMS_MAILBOX;
e8e26350 2250 }
c4cf55e5
PWJ
2251 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2252 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2253 mask |= IXGBE_EIMS_FLOW_DIR;
e8e26350 2254
79aefa45 2255 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
835462fc 2256 ixgbe_irq_enable_queues(adapter, ~0);
79aefa45 2257 IXGBE_WRITE_FLUSH(&adapter->hw);
1cdd1ec8
GR
2258
2259 if (adapter->num_vfs > 32) {
2260 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2261 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2262 }
79aefa45 2263}
021230d4 2264
9a799d71 2265/**
021230d4 2266 * ixgbe_intr - legacy mode Interrupt Handler
9a799d71
AK
2267 * @irq: interrupt number
2268 * @data: pointer to a network interface device structure
9a799d71
AK
2269 **/
2270static irqreturn_t ixgbe_intr(int irq, void *data)
2271{
2272 struct net_device *netdev = data;
2273 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2274 struct ixgbe_hw *hw = &adapter->hw;
7a921c93 2275 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
9a799d71
AK
2276 u32 eicr;
2277
54037505
DS
2278 /*
2279 * Workaround for silicon errata. Mask the interrupts
2280 * before the read of EICR.
2281 */
2282 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2283
021230d4
AV
2284 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2285 * therefore no explict interrupt disable is necessary */
2286 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
f47cf66e
JB
2287 if (!eicr) {
2288 /* shared interrupt alert!
2289 * make sure interrupts are enabled because the read will
2290 * have disabled interrupts due to EIAM */
2291 ixgbe_irq_enable(adapter);
9a799d71 2292 return IRQ_NONE; /* Not our interrupt */
f47cf66e 2293 }
9a799d71 2294
cf8280ee
JB
2295 if (eicr & IXGBE_EICR_LSC)
2296 ixgbe_check_lsc(adapter);
021230d4 2297
e8e26350
PW
2298 if (hw->mac.type == ixgbe_mac_82599EB)
2299 ixgbe_check_sfp_event(adapter, eicr);
2300
0befdb3e 2301 ixgbe_check_fan_failure(adapter, eicr);
119fc60a
MC
2302 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2303 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2304 schedule_work(&adapter->check_overtemp_task);
0befdb3e 2305
7a921c93 2306 if (napi_schedule_prep(&(q_vector->napi))) {
4a0b9ca0
PW
2307 adapter->tx_ring[0]->total_packets = 0;
2308 adapter->tx_ring[0]->total_bytes = 0;
2309 adapter->rx_ring[0]->total_packets = 0;
2310 adapter->rx_ring[0]->total_bytes = 0;
021230d4 2311 /* would disable interrupts here but EIAM disabled it */
7a921c93 2312 __napi_schedule(&(q_vector->napi));
9a799d71
AK
2313 }
2314
2315 return IRQ_HANDLED;
2316}
2317
021230d4
AV
2318static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2319{
2320 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2321
2322 for (i = 0; i < q_vectors; i++) {
7a921c93 2323 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
021230d4
AV
2324 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2325 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2326 q_vector->rxr_count = 0;
2327 q_vector->txr_count = 0;
2328 }
2329}
2330
9a799d71
AK
2331/**
2332 * ixgbe_request_irq - initialize interrupts
2333 * @adapter: board private structure
2334 *
2335 * Attempts to configure interrupts using the best available
2336 * capabilities of the hardware and kernel.
2337 **/
021230d4 2338static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
9a799d71
AK
2339{
2340 struct net_device *netdev = adapter->netdev;
021230d4 2341 int err;
9a799d71 2342
021230d4
AV
2343 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2344 err = ixgbe_request_msix_irqs(adapter);
2345 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
a0607fd3 2346 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
b4617240 2347 netdev->name, netdev);
021230d4 2348 } else {
a0607fd3 2349 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
b4617240 2350 netdev->name, netdev);
9a799d71
AK
2351 }
2352
9a799d71 2353 if (err)
396e799c 2354 e_err(probe, "request_irq failed, Error %d\n", err);
9a799d71 2355
9a799d71
AK
2356 return err;
2357}
2358
2359static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2360{
2361 struct net_device *netdev = adapter->netdev;
2362
2363 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
021230d4 2364 int i, q_vectors;
9a799d71 2365
021230d4
AV
2366 q_vectors = adapter->num_msix_vectors;
2367
2368 i = q_vectors - 1;
9a799d71 2369 free_irq(adapter->msix_entries[i].vector, netdev);
9a799d71 2370
021230d4
AV
2371 i--;
2372 for (; i >= 0; i--) {
2373 free_irq(adapter->msix_entries[i].vector,
7a921c93 2374 adapter->q_vector[i]);
021230d4
AV
2375 }
2376
2377 ixgbe_reset_q_vectors(adapter);
2378 } else {
2379 free_irq(adapter->pdev->irq, netdev);
9a799d71
AK
2380 }
2381}
2382
22d5a71b
JB
2383/**
2384 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2385 * @adapter: board private structure
2386 **/
2387static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2388{
835462fc
NS
2389 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2390 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2391 } else {
2392 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
22d5a71b 2394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1cdd1ec8
GR
2395 if (adapter->num_vfs > 32)
2396 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
22d5a71b
JB
2397 }
2398 IXGBE_WRITE_FLUSH(&adapter->hw);
2399 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2400 int i;
2401 for (i = 0; i < adapter->num_msix_vectors; i++)
2402 synchronize_irq(adapter->msix_entries[i].vector);
2403 } else {
2404 synchronize_irq(adapter->pdev->irq);
2405 }
2406}
2407
9a799d71
AK
2408/**
2409 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2410 *
2411 **/
2412static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2413{
9a799d71
AK
2414 struct ixgbe_hw *hw = &adapter->hw;
2415
021230d4 2416 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
f7554a2b 2417 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
9a799d71 2418
e8e26350
PW
2419 ixgbe_set_ivar(adapter, 0, 0, 0);
2420 ixgbe_set_ivar(adapter, 1, 0, 0);
021230d4
AV
2421
2422 map_vector_to_rxq(adapter, 0, 0);
2423 map_vector_to_txq(adapter, 0, 0);
2424
396e799c 2425 e_info(hw, "Legacy interrupt IVAR setup done\n");
9a799d71
AK
2426}
2427
43e69bf0
AD
2428/**
2429 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2430 * @adapter: board private structure
2431 * @ring: structure containing ring specific data
2432 *
2433 * Configure the Tx descriptor ring after a reset.
2434 **/
84418e3b
AD
2435void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2436 struct ixgbe_ring *ring)
43e69bf0
AD
2437{
2438 struct ixgbe_hw *hw = &adapter->hw;
2439 u64 tdba = ring->dma;
2f1860b8
AD
2440 int wait_loop = 10;
2441 u32 txdctl;
43e69bf0
AD
2442 u16 reg_idx = ring->reg_idx;
2443
2f1860b8
AD
2444 /* disable queue to avoid issues while updating state */
2445 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2446 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2447 txdctl & ~IXGBE_TXDCTL_ENABLE);
2448 IXGBE_WRITE_FLUSH(hw);
2449
43e69bf0
AD
2450 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2451 (tdba & DMA_BIT_MASK(32)));
2452 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2453 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2454 ring->count * sizeof(union ixgbe_adv_tx_desc));
2455 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2456 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2457 ring->head = IXGBE_TDH(reg_idx);
2458 ring->tail = IXGBE_TDT(reg_idx);
2459
2f1860b8
AD
2460 /* configure fetching thresholds */
2461 if (adapter->rx_itr_setting == 0) {
2462 /* cannot set wthresh when itr==0 */
2463 txdctl &= ~0x007F0000;
2464 } else {
2465 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2466 txdctl |= (8 << 16);
2467 }
2468 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2469 /* PThresh workaround for Tx hang with DFP enabled. */
2470 txdctl |= 32;
2471 }
2472
2473 /* reinitialize flowdirector state */
2474 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2475
2476 /* enable queue */
2477 txdctl |= IXGBE_TXDCTL_ENABLE;
2478 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2479
2480 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2481 if (hw->mac.type == ixgbe_mac_82598EB &&
2482 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2483 return;
2484
2485 /* poll to verify queue is enabled */
2486 do {
2487 msleep(1);
2488 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2489 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2490 if (!wait_loop)
2491 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
43e69bf0
AD
2492}
2493
120ff942
AD
2494static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2495{
2496 struct ixgbe_hw *hw = &adapter->hw;
2497 u32 rttdcs;
2498 u32 mask;
2499
2500 if (hw->mac.type == ixgbe_mac_82598EB)
2501 return;
2502
2503 /* disable the arbiter while setting MTQC */
2504 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2505 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2506 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2507
2508 /* set transmit pool layout */
2509 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2510 switch (adapter->flags & mask) {
2511
2512 case (IXGBE_FLAG_SRIOV_ENABLED):
2513 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2514 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2515 break;
2516
2517 case (IXGBE_FLAG_DCB_ENABLED):
2518 /* We enable 8 traffic classes, DCB only */
2519 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2520 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2521 break;
2522
2523 default:
2524 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2525 break;
2526 }
2527
2528 /* re-enable the arbiter */
2529 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2530 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2531}
2532
9a799d71 2533/**
3a581073 2534 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
9a799d71
AK
2535 * @adapter: board private structure
2536 *
2537 * Configure the Tx unit of the MAC after a reset.
2538 **/
2539static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2540{
2f1860b8
AD
2541 struct ixgbe_hw *hw = &adapter->hw;
2542 u32 dmatxctl;
43e69bf0 2543 u32 i;
9a799d71 2544
2f1860b8
AD
2545 ixgbe_setup_mtqc(adapter);
2546
2547 if (hw->mac.type != ixgbe_mac_82598EB) {
2548 /* DMATXCTL.EN must be before Tx queues are enabled */
2549 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2550 dmatxctl |= IXGBE_DMATXCTL_TE;
2551 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2552 }
2553
9a799d71 2554 /* Setup the HW Tx Head and Tail descriptor pointers */
43e69bf0
AD
2555 for (i = 0; i < adapter->num_tx_queues; i++)
2556 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
9a799d71
AK
2557}
2558
e8e26350 2559#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
cc41ac7c 2560
a6616b42
YZ
2561static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2562 struct ixgbe_ring *rx_ring)
cc41ac7c 2563{
cc41ac7c 2564 u32 srrctl;
a6616b42 2565 int index;
0cefafad 2566 struct ixgbe_ring_feature *feature = adapter->ring_feature;
3be1adfb 2567
a6616b42
YZ
2568 index = rx_ring->reg_idx;
2569 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2570 unsigned long mask;
0cefafad 2571 mask = (unsigned long) feature[RING_F_RSS].mask;
3be1adfb 2572 index = index & mask;
cc41ac7c 2573 }
cc41ac7c
JB
2574 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2575
2576 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2577 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
9e10e045
AD
2578 if (adapter->num_vfs)
2579 srrctl |= IXGBE_SRRCTL_DROP_EN;
cc41ac7c 2580
afafd5b0
AD
2581 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2582 IXGBE_SRRCTL_BSIZEHDR_MASK;
2583
6e455b89 2584 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
afafd5b0
AD
2585#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2586 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2587#else
2588 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2589#endif
cc41ac7c 2590 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
cc41ac7c 2591 } else {
afafd5b0
AD
2592 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2593 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
cc41ac7c 2594 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
cc41ac7c 2595 }
e8e26350 2596
cc41ac7c
JB
2597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2598}
9a799d71 2599
05abb126 2600static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
0cefafad 2601{
05abb126
AD
2602 struct ixgbe_hw *hw = &adapter->hw;
2603 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2604 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2605 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2606 u32 mrqc = 0, reta = 0;
2607 u32 rxcsum;
2608 int i, j;
0cefafad
JB
2609 int mask;
2610
05abb126
AD
2611 /* Fill out hash function seeds */
2612 for (i = 0; i < 10; i++)
2613 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2614
2615 /* Fill out redirection table */
2616 for (i = 0, j = 0; i < 128; i++, j++) {
2617 if (j == adapter->ring_feature[RING_F_RSS].indices)
2618 j = 0;
2619 /* reta = 4-byte sliding window of
2620 * 0x00..(indices-1)(indices-1)00..etc. */
2621 reta = (reta << 8) | (j * 0x11);
2622 if ((i & 3) == 3)
2623 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2624 }
0cefafad 2625
05abb126
AD
2626 /* Disable indicating checksum in descriptor, enables RSS hash */
2627 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2628 rxcsum |= IXGBE_RXCSUM_PCSD;
2629 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2630
2631 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2632 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2633 else
2634 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
0cefafad 2635#ifdef CONFIG_IXGBE_DCB
05abb126 2636 | IXGBE_FLAG_DCB_ENABLED
0cefafad 2637#endif
05abb126
AD
2638 | IXGBE_FLAG_SRIOV_ENABLED
2639 );
0cefafad
JB
2640
2641 switch (mask) {
2642 case (IXGBE_FLAG_RSS_ENABLED):
2643 mrqc = IXGBE_MRQC_RSSEN;
2644 break;
1cdd1ec8
GR
2645 case (IXGBE_FLAG_SRIOV_ENABLED):
2646 mrqc = IXGBE_MRQC_VMDQEN;
2647 break;
0cefafad
JB
2648#ifdef CONFIG_IXGBE_DCB
2649 case (IXGBE_FLAG_DCB_ENABLED):
2650 mrqc = IXGBE_MRQC_RT8TCEN;
2651 break;
2652#endif /* CONFIG_IXGBE_DCB */
2653 default:
2654 break;
2655 }
2656
05abb126
AD
2657 /* Perform hash on these packet types */
2658 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2659 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2660 | IXGBE_MRQC_RSS_FIELD_IPV6
2661 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2662
2663 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
0cefafad
JB
2664}
2665
bb5a9ad2
NS
2666/**
2667 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2668 * @adapter: address of board private structure
2669 * @index: index of ring to set
bb5a9ad2 2670 **/
7367096a
AD
2671static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2672 struct ixgbe_ring *ring)
bb5a9ad2 2673{
bb5a9ad2 2674 struct ixgbe_hw *hw = &adapter->hw;
bb5a9ad2 2675 u32 rscctrl;
edd2ea55 2676 int rx_buf_len;
7367096a
AD
2677 u16 reg_idx = ring->reg_idx;
2678
2679 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2680 return;
bb5a9ad2 2681
7367096a
AD
2682 rx_buf_len = ring->rx_buf_len;
2683 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
bb5a9ad2
NS
2684 rscctrl |= IXGBE_RSCCTL_RSCEN;
2685 /*
2686 * we must limit the number of descriptors so that the
2687 * total size of max desc * buf_len is not greater
2688 * than 65535
2689 */
7367096a 2690 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
bb5a9ad2
NS
2691#if (MAX_SKB_FRAGS > 16)
2692 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2693#elif (MAX_SKB_FRAGS > 8)
2694 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2695#elif (MAX_SKB_FRAGS > 4)
2696 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2697#else
2698 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2699#endif
2700 } else {
2701 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2702 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2703 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2704 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2705 else
2706 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2707 }
7367096a 2708 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
bb5a9ad2
NS
2709}
2710
9e10e045
AD
2711/**
2712 * ixgbe_set_uta - Set unicast filter table address
2713 * @adapter: board private structure
2714 *
2715 * The unicast table address is a register array of 32-bit registers.
2716 * The table is meant to be used in a way similar to how the MTA is used
2717 * however due to certain limitations in the hardware it is necessary to
2718 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2719 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2720 **/
2721static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2722{
2723 struct ixgbe_hw *hw = &adapter->hw;
2724 int i;
2725
2726 /* The UTA table only exists on 82599 hardware and newer */
2727 if (hw->mac.type < ixgbe_mac_82599EB)
2728 return;
2729
2730 /* we only need to do this if VMDq is enabled */
2731 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2732 return;
2733
2734 for (i = 0; i < 128; i++)
2735 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2736}
2737
2738#define IXGBE_MAX_RX_DESC_POLL 10
2739static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2740 struct ixgbe_ring *ring)
2741{
2742 struct ixgbe_hw *hw = &adapter->hw;
2743 int reg_idx = ring->reg_idx;
2744 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2745 u32 rxdctl;
2746
2747 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2748 if (hw->mac.type == ixgbe_mac_82598EB &&
2749 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2750 return;
2751
2752 do {
2753 msleep(1);
2754 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2755 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2756
2757 if (!wait_loop) {
2758 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2759 "the polling period\n", reg_idx);
2760 }
2761}
2762
84418e3b
AD
2763void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2764 struct ixgbe_ring *ring)
acd37177
AD
2765{
2766 struct ixgbe_hw *hw = &adapter->hw;
2767 u64 rdba = ring->dma;
9e10e045 2768 u32 rxdctl;
acd37177
AD
2769 u16 reg_idx = ring->reg_idx;
2770
9e10e045
AD
2771 /* disable queue to avoid issues while updating state */
2772 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2773 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2774 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2775 IXGBE_WRITE_FLUSH(hw);
2776
acd37177
AD
2777 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2778 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2779 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2780 ring->count * sizeof(union ixgbe_adv_rx_desc));
2781 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2782 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2783 ring->head = IXGBE_RDH(reg_idx);
2784 ring->tail = IXGBE_RDT(reg_idx);
9e10e045
AD
2785
2786 ixgbe_configure_srrctl(adapter, ring);
2787 ixgbe_configure_rscctl(adapter, ring);
2788
2789 if (hw->mac.type == ixgbe_mac_82598EB) {
2790 /*
2791 * enable cache line friendly hardware writes:
2792 * PTHRESH=32 descriptors (half the internal cache),
2793 * this also removes ugly rx_no_buffer_count increment
2794 * HTHRESH=4 descriptors (to minimize latency on fetch)
2795 * WTHRESH=8 burst writeback up to two cache lines
2796 */
2797 rxdctl &= ~0x3FFFFF;
2798 rxdctl |= 0x080420;
2799 }
2800
2801 /* enable receive descriptor ring */
2802 rxdctl |= IXGBE_RXDCTL_ENABLE;
2803 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2804
2805 ixgbe_rx_desc_queue_enable(adapter, ring);
2806 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
acd37177
AD
2807}
2808
48654521
AD
2809static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2810{
2811 struct ixgbe_hw *hw = &adapter->hw;
2812 int p;
2813
2814 /* PSRTYPE must be initialized in non 82598 adapters */
2815 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2816 IXGBE_PSRTYPE_UDPHDR |
2817 IXGBE_PSRTYPE_IPV4HDR |
2818 IXGBE_PSRTYPE_L2HDR |
2819 IXGBE_PSRTYPE_IPV6HDR;
2820
2821 if (hw->mac.type == ixgbe_mac_82598EB)
2822 return;
2823
2824 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2825 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2826
2827 for (p = 0; p < adapter->num_rx_pools; p++)
2828 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2829 psrtype);
2830}
2831
f5b4a52e
AD
2832static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2833{
2834 struct ixgbe_hw *hw = &adapter->hw;
2835 u32 gcr_ext;
2836 u32 vt_reg_bits;
2837 u32 reg_offset, vf_shift;
2838 u32 vmdctl;
2839
2840 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2841 return;
2842
2843 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2844 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2845 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2846 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2847
2848 vf_shift = adapter->num_vfs % 32;
2849 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2850
2851 /* Enable only the PF's pool for Tx/Rx */
2852 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2853 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2854 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2855 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2856 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2857
2858 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2859 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2860
2861 /*
2862 * Set up VF register offsets for selected VT Mode,
2863 * i.e. 32 or 64 VFs for SR-IOV
2864 */
2865 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2866 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2867 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2868 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2869
2870 /* enable Tx loopback for VF/PF communication */
2871 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2872}
2873
477de6ed 2874static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
9a799d71 2875{
9a799d71
AK
2876 struct ixgbe_hw *hw = &adapter->hw;
2877 struct net_device *netdev = adapter->netdev;
2878 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
7c6e0a43 2879 int rx_buf_len;
477de6ed
AD
2880 struct ixgbe_ring *rx_ring;
2881 int i;
2882 u32 mhadd, hlreg0;
48654521 2883
9a799d71 2884 /* Decide whether to use packet split mode or not */
1cdd1ec8
GR
2885 /* Do not use packet split if we're in SR-IOV Mode */
2886 if (!adapter->num_vfs)
2887 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
9a799d71
AK
2888
2889 /* Set the RX buffer length according to the mode */
2890 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43 2891 rx_buf_len = IXGBE_RX_HDR_SIZE;
9a799d71 2892 } else {
0c19d6af 2893 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
f8212f97 2894 (netdev->mtu <= ETH_DATA_LEN))
7c6e0a43 2895 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9a799d71 2896 else
477de6ed 2897 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
9a799d71
AK
2898 }
2899
63f39bd1 2900#ifdef IXGBE_FCOE
477de6ed
AD
2901 /* adjust max frame to be able to do baby jumbo for FCoE */
2902 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2903 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2904 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
9a799d71 2905
477de6ed
AD
2906#endif /* IXGBE_FCOE */
2907 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2908 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2909 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2910 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2911
2912 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2913 }
2914
2915 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2916 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2917 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2918 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
9a799d71 2919
0cefafad
JB
2920 /*
2921 * Setup the HW Rx Head and Tail Descriptor Pointers and
2922 * the Base and Length of the Rx Descriptor Ring
2923 */
9a799d71 2924 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0 2925 rx_ring = adapter->rx_ring[i];
a6616b42 2926 rx_ring->rx_buf_len = rx_buf_len;
cc41ac7c 2927
6e455b89
YZ
2928 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2929 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
1b3ff02e
PWJ
2930 else
2931 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
cc41ac7c 2932
63f39bd1 2933#ifdef IXGBE_FCOE
477de6ed
AD
2934 if (netdev->features & NETIF_F_FCOE_MTU)
2935 {
63f39bd1
YZ
2936 struct ixgbe_ring_feature *f;
2937 f = &adapter->ring_feature[RING_F_FCOE];
6e455b89
YZ
2938 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2940 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2941 rx_ring->rx_buf_len =
2942 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2943 }
63f39bd1 2944 }
63f39bd1 2945#endif /* IXGBE_FCOE */
477de6ed
AD
2946 }
2947
2948}
2949
7367096a
AD
2950static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2951{
2952 struct ixgbe_hw *hw = &adapter->hw;
2953 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2954
2955 switch (hw->mac.type) {
2956 case ixgbe_mac_82598EB:
2957 /*
2958 * For VMDq support of different descriptor types or
2959 * buffer sizes through the use of multiple SRRCTL
2960 * registers, RDRXCTL.MVMEN must be set to 1
2961 *
2962 * also, the manual doesn't mention it clearly but DCA hints
2963 * will only use queue 0's tags unless this bit is set. Side
2964 * effects of setting this bit are only that SRRCTL must be
2965 * fully programmed [0..15]
2966 */
2967 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2968 break;
2969 case ixgbe_mac_82599EB:
2970 /* Disable RSC for ACK packets */
2971 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2972 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2973 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2974 /* hardware requires some bits to be set by default */
2975 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
2976 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2977 break;
2978 default:
2979 /* We should do nothing since we don't know this hardware */
2980 return;
2981 }
2982
2983 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2984}
2985
477de6ed
AD
2986/**
2987 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2988 * @adapter: board private structure
2989 *
2990 * Configure the Rx unit of the MAC after a reset.
2991 **/
2992static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2993{
2994 struct ixgbe_hw *hw = &adapter->hw;
477de6ed
AD
2995 int i;
2996 u32 rxctrl;
477de6ed
AD
2997
2998 /* disable receives while setting up the descriptors */
2999 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3000 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3001
3002 ixgbe_setup_psrtype(adapter);
7367096a 3003 ixgbe_setup_rdrxctl(adapter);
477de6ed 3004
9e10e045 3005 /* Program registers for the distribution of queues */
f5b4a52e
AD
3006 ixgbe_setup_mrqc(adapter);
3007 ixgbe_configure_virtualization(adapter);
3008
9e10e045
AD
3009 ixgbe_set_uta(adapter);
3010
477de6ed
AD
3011 /* set_rx_buffer_len must be called before ring initialization */
3012 ixgbe_set_rx_buffer_len(adapter);
3013
3014 /*
3015 * Setup the HW Rx Head and Tail Descriptor Pointers and
3016 * the Base and Length of the Rx Descriptor Ring
3017 */
9e10e045
AD
3018 for (i = 0; i < adapter->num_rx_queues; i++)
3019 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
177db6ff 3020
9e10e045
AD
3021 /* disable drop enable for 82598 parts */
3022 if (hw->mac.type == ixgbe_mac_82598EB)
3023 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3024
3025 /* enable all receives */
3026 rxctrl |= IXGBE_RXCTRL_RXEN;
3027 hw->mac.ops.enable_rx_dma(hw, rxctrl);
9a799d71
AK
3028}
3029
068c89b0
DS
3030static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3031{
3032 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3033 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3034 int pool_ndx = adapter->num_vfs;
068c89b0
DS
3035
3036 /* add VID to filter table */
1ada1b1b 3037 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
068c89b0
DS
3038}
3039
3040static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3041{
3042 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3043 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3044 int pool_ndx = adapter->num_vfs;
068c89b0
DS
3045
3046 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3047 ixgbe_irq_disable(adapter);
3048
3049 vlan_group_set_device(adapter->vlgrp, vid, NULL);
3050
3051 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3052 ixgbe_irq_enable(adapter);
3053
3054 /* remove VID from filter table */
1ada1b1b 3055 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
068c89b0
DS
3056}
3057
5f6c0181
JB
3058/**
3059 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3060 * @adapter: driver data
3061 */
3062static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3063{
3064 struct ixgbe_hw *hw = &adapter->hw;
3065 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3066 int i, j;
3067
3068 switch (hw->mac.type) {
3069 case ixgbe_mac_82598EB:
38e0bd98
YZ
3070 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
3071#ifdef CONFIG_IXGBE_DCB
3072 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3073 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3074#endif
5f6c0181
JB
3075 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3076 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3077 break;
3078 case ixgbe_mac_82599EB:
3079 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
3080 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3081 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
38e0bd98
YZ
3082#ifdef CONFIG_IXGBE_DCB
3083 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
3084 break;
3085#endif
5f6c0181
JB
3086 for (i = 0; i < adapter->num_rx_queues; i++) {
3087 j = adapter->rx_ring[i]->reg_idx;
3088 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3089 vlnctrl &= ~IXGBE_RXDCTL_VME;
3090 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3091 }
3092 break;
3093 default:
3094 break;
3095 }
3096}
3097
3098/**
3099 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3100 * @adapter: driver data
3101 */
3102static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3103{
3104 struct ixgbe_hw *hw = &adapter->hw;
3105 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3106 int i, j;
3107
3108 switch (hw->mac.type) {
3109 case ixgbe_mac_82598EB:
3110 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
3111 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3112 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3113 break;
3114 case ixgbe_mac_82599EB:
3115 vlnctrl |= IXGBE_VLNCTRL_VFE;
3116 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3118 for (i = 0; i < adapter->num_rx_queues; i++) {
3119 j = adapter->rx_ring[i]->reg_idx;
3120 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3121 vlnctrl |= IXGBE_RXDCTL_VME;
3122 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3123 }
3124 break;
3125 default:
3126 break;
3127 }
3128}
3129
9a799d71 3130static void ixgbe_vlan_rx_register(struct net_device *netdev,
b4617240 3131 struct vlan_group *grp)
9a799d71
AK
3132{
3133 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71 3134
d4f80882
AV
3135 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3136 ixgbe_irq_disable(adapter);
9a799d71
AK
3137 adapter->vlgrp = grp;
3138
2f90b865
AD
3139 /*
3140 * For a DCB driver, always enable VLAN tag stripping so we can
3141 * still receive traffic from a DCB-enabled host even if we're
3142 * not in DCB mode.
3143 */
5f6c0181 3144 ixgbe_vlan_filter_enable(adapter);
dc63d377 3145
e8e26350 3146 ixgbe_vlan_rx_add_vid(netdev, 0);
9a799d71 3147
d4f80882
AV
3148 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3149 ixgbe_irq_enable(adapter);
9a799d71
AK
3150}
3151
9a799d71
AK
3152static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3153{
3154 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3155
3156 if (adapter->vlgrp) {
3157 u16 vid;
3158 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
3159 if (!vlan_group_get_device(adapter->vlgrp, vid))
3160 continue;
3161 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3162 }
3163 }
3164}
3165
2850062a
AD
3166/**
3167 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3168 * @netdev: network interface device structure
3169 *
3170 * Writes unicast address list to the RAR table.
3171 * Returns: -ENOMEM on failure/insufficient address space
3172 * 0 on no addresses written
3173 * X on writing X addresses to the RAR table
3174 **/
3175static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3176{
3177 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3178 struct ixgbe_hw *hw = &adapter->hw;
3179 unsigned int vfn = adapter->num_vfs;
3180 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3181 int count = 0;
3182
3183 /* return ENOMEM indicating insufficient memory for addresses */
3184 if (netdev_uc_count(netdev) > rar_entries)
3185 return -ENOMEM;
3186
3187 if (!netdev_uc_empty(netdev) && rar_entries) {
3188 struct netdev_hw_addr *ha;
3189 /* return error if we do not support writing to RAR table */
3190 if (!hw->mac.ops.set_rar)
3191 return -ENOMEM;
3192
3193 netdev_for_each_uc_addr(ha, netdev) {
3194 if (!rar_entries)
3195 break;
3196 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3197 vfn, IXGBE_RAH_AV);
3198 count++;
3199 }
3200 }
3201 /* write the addresses in reverse order to avoid write combining */
3202 for (; rar_entries > 0 ; rar_entries--)
3203 hw->mac.ops.clear_rar(hw, rar_entries);
3204
3205 return count;
3206}
3207
9a799d71 3208/**
2c5645cf 3209 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
9a799d71
AK
3210 * @netdev: network interface device structure
3211 *
2c5645cf
CL
3212 * The set_rx_method entry point is called whenever the unicast/multicast
3213 * address list or the network interface flags are updated. This routine is
3214 * responsible for configuring the hardware for proper unicast, multicast and
3215 * promiscuous mode.
9a799d71 3216 **/
7f870475 3217void ixgbe_set_rx_mode(struct net_device *netdev)
9a799d71
AK
3218{
3219 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3220 struct ixgbe_hw *hw = &adapter->hw;
2850062a
AD
3221 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3222 int count;
9a799d71
AK
3223
3224 /* Check for Promiscuous and All Multicast modes */
3225
3226 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3227
f5dc442b
AD
3228 /* set all bits that we expect to always be set */
3229 fctrl |= IXGBE_FCTRL_BAM;
3230 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3231 fctrl |= IXGBE_FCTRL_PMCF;
3232
2850062a
AD
3233 /* clear the bits we are changing the status of */
3234 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3235
9a799d71 3236 if (netdev->flags & IFF_PROMISC) {
e433ea1f 3237 hw->addr_ctrl.user_set_promisc = true;
9a799d71 3238 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2850062a 3239 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
5f6c0181
JB
3240 /* don't hardware filter vlans in promisc mode */
3241 ixgbe_vlan_filter_disable(adapter);
9a799d71 3242 } else {
746b9f02
PM
3243 if (netdev->flags & IFF_ALLMULTI) {
3244 fctrl |= IXGBE_FCTRL_MPE;
2850062a
AD
3245 vmolr |= IXGBE_VMOLR_MPE;
3246 } else {
3247 /*
3248 * Write addresses to the MTA, if the attempt fails
3249 * then we should just turn on promiscous mode so
3250 * that we can at least receive multicast traffic
3251 */
3252 hw->mac.ops.update_mc_addr_list(hw, netdev);
3253 vmolr |= IXGBE_VMOLR_ROMPE;
746b9f02 3254 }
5f6c0181 3255 ixgbe_vlan_filter_enable(adapter);
e433ea1f 3256 hw->addr_ctrl.user_set_promisc = false;
2850062a
AD
3257 /*
3258 * Write addresses to available RAR registers, if there is not
3259 * sufficient space to store all the addresses then enable
3260 * unicast promiscous mode
3261 */
3262 count = ixgbe_write_uc_addr_list(netdev);
3263 if (count < 0) {
3264 fctrl |= IXGBE_FCTRL_UPE;
3265 vmolr |= IXGBE_VMOLR_ROPE;
3266 }
9a799d71
AK
3267 }
3268
2850062a 3269 if (adapter->num_vfs) {
1cdd1ec8 3270 ixgbe_restore_vf_multicasts(adapter);
2850062a
AD
3271 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3272 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3273 IXGBE_VMOLR_ROPE);
3274 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3275 }
3276
3277 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
9a799d71
AK
3278}
3279
021230d4
AV
3280static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3281{
3282 int q_idx;
3283 struct ixgbe_q_vector *q_vector;
3284 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3285
3286 /* legacy and MSI only use one vector */
3287 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3288 q_vectors = 1;
3289
3290 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
f0848276 3291 struct napi_struct *napi;
7a921c93 3292 q_vector = adapter->q_vector[q_idx];
f0848276 3293 napi = &q_vector->napi;
91281fd3
AD
3294 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3295 if (!q_vector->rxr_count || !q_vector->txr_count) {
3296 if (q_vector->txr_count == 1)
3297 napi->poll = &ixgbe_clean_txonly;
3298 else if (q_vector->rxr_count == 1)
3299 napi->poll = &ixgbe_clean_rxonly;
3300 }
3301 }
f0848276
JB
3302
3303 napi_enable(napi);
021230d4
AV
3304 }
3305}
3306
3307static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3308{
3309 int q_idx;
3310 struct ixgbe_q_vector *q_vector;
3311 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3312
3313 /* legacy and MSI only use one vector */
3314 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3315 q_vectors = 1;
3316
3317 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7a921c93 3318 q_vector = adapter->q_vector[q_idx];
021230d4
AV
3319 napi_disable(&q_vector->napi);
3320 }
3321}
3322
7a6b6f51 3323#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3324/*
3325 * ixgbe_configure_dcb - Configure DCB hardware
3326 * @adapter: ixgbe adapter struct
3327 *
3328 * This is called by the driver on open to configure the DCB hardware.
3329 * This is also called by the gennetlink interface when reconfiguring
3330 * the DCB state.
3331 */
3332static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3333{
3334 struct ixgbe_hw *hw = &adapter->hw;
5f6c0181 3335 u32 txdctl;
2f90b865
AD
3336 int i, j;
3337
67ebd791
AD
3338 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3339 if (hw->mac.type == ixgbe_mac_82598EB)
3340 netif_set_gso_max_size(adapter->netdev, 65536);
3341 return;
3342 }
3343
3344 if (hw->mac.type == ixgbe_mac_82598EB)
3345 netif_set_gso_max_size(adapter->netdev, 32768);
3346
2f90b865
AD
3347 ixgbe_dcb_check_config(&adapter->dcb_cfg);
3348 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
3349 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
3350
3351 /* reconfigure the hardware */
3352 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3353
3354 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 3355 j = adapter->tx_ring[i]->reg_idx;
2f90b865
AD
3356 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3357 /* PThresh workaround for Tx hang with DFP enabled. */
3358 txdctl |= 32;
3359 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3360 }
3361 /* Enable VLAN tag insert/strip */
5f6c0181
JB
3362 ixgbe_vlan_filter_enable(adapter);
3363
2f90b865
AD
3364 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3365}
3366
3367#endif
9a799d71
AK
3368static void ixgbe_configure(struct ixgbe_adapter *adapter)
3369{
3370 struct net_device *netdev = adapter->netdev;
c4cf55e5 3371 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
3372 int i;
3373
2c5645cf 3374 ixgbe_set_rx_mode(netdev);
9a799d71
AK
3375
3376 ixgbe_restore_vlan(adapter);
7a6b6f51 3377#ifdef CONFIG_IXGBE_DCB
67ebd791 3378 ixgbe_configure_dcb(adapter);
2f90b865 3379#endif
9a799d71 3380
eacd73f7
YZ
3381#ifdef IXGBE_FCOE
3382 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3383 ixgbe_configure_fcoe(adapter);
3384
3385#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
3386 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3387 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 3388 adapter->tx_ring[i]->atr_sample_rate =
c4cf55e5
PWJ
3389 adapter->atr_sample_rate;
3390 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3391 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3392 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3393 }
3394
9a799d71
AK
3395 ixgbe_configure_tx(adapter);
3396 ixgbe_configure_rx(adapter);
9a799d71
AK
3397}
3398
e8e26350
PW
3399static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3400{
3401 switch (hw->phy.type) {
3402 case ixgbe_phy_sfp_avago:
3403 case ixgbe_phy_sfp_ftl:
3404 case ixgbe_phy_sfp_intel:
3405 case ixgbe_phy_sfp_unknown:
ea0a04df
DS
3406 case ixgbe_phy_sfp_passive_tyco:
3407 case ixgbe_phy_sfp_passive_unknown:
3408 case ixgbe_phy_sfp_active_unknown:
3409 case ixgbe_phy_sfp_ftl_active:
e8e26350
PW
3410 return true;
3411 default:
3412 return false;
3413 }
3414}
3415
0ecc061d 3416/**
e8e26350
PW
3417 * ixgbe_sfp_link_config - set up SFP+ link
3418 * @adapter: pointer to private adapter struct
3419 **/
3420static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3421{
3422 struct ixgbe_hw *hw = &adapter->hw;
3423
3424 if (hw->phy.multispeed_fiber) {
3425 /*
3426 * In multispeed fiber setups, the device may not have
3427 * had a physical connection when the driver loaded.
3428 * If that's the case, the initial link configuration
3429 * couldn't get the MAC into 10G or 1G mode, so we'll
3430 * never have a link status change interrupt fire.
3431 * We need to try and force an autonegotiation
3432 * session, then bring up link.
3433 */
3434 hw->mac.ops.setup_sfp(hw);
3435 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3436 schedule_work(&adapter->multispeed_fiber_task);
3437 } else {
3438 /*
3439 * Direct Attach Cu and non-multispeed fiber modules
3440 * still need to be configured properly prior to
3441 * attempting link.
3442 */
3443 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3444 schedule_work(&adapter->sfp_config_module_task);
3445 }
3446}
3447
3448/**
3449 * ixgbe_non_sfp_link_config - set up non-SFP+ link
0ecc061d
PWJ
3450 * @hw: pointer to private hardware struct
3451 *
3452 * Returns 0 on success, negative on failure
3453 **/
e8e26350 3454static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
0ecc061d
PWJ
3455{
3456 u32 autoneg;
8620a103 3457 bool negotiation, link_up = false;
0ecc061d
PWJ
3458 u32 ret = IXGBE_ERR_LINK_SETUP;
3459
3460 if (hw->mac.ops.check_link)
3461 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3462
3463 if (ret)
3464 goto link_cfg_out;
3465
3466 if (hw->mac.ops.get_link_capabilities)
8620a103 3467 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
0ecc061d
PWJ
3468 if (ret)
3469 goto link_cfg_out;
3470
8620a103
MC
3471 if (hw->mac.ops.setup_link)
3472 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
0ecc061d
PWJ
3473link_cfg_out:
3474 return ret;
3475}
3476
a34bcfff 3477static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
9a799d71 3478{
9a799d71 3479 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3480 u32 gpie = 0;
9a799d71 3481
9b471446 3482 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
a34bcfff
AD
3483 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3484 IXGBE_GPIE_OCD;
3485 gpie |= IXGBE_GPIE_EIAME;
9b471446
JB
3486 /*
3487 * use EIAM to auto-mask when MSI-X interrupt is asserted
3488 * this saves a register write for every interrupt
3489 */
3490 switch (hw->mac.type) {
3491 case ixgbe_mac_82598EB:
3492 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3493 break;
3494 default:
3495 case ixgbe_mac_82599EB:
3496 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3497 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3498 break;
3499 }
3500 } else {
021230d4
AV
3501 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3502 * specifically only auto mask tx and rx interrupts */
3503 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3504 }
9a799d71 3505
a34bcfff
AD
3506 /* XXX: to interrupt immediately for EICS writes, enable this */
3507 /* gpie |= IXGBE_GPIE_EIMEN; */
3508
3509 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3510 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3511 gpie |= IXGBE_GPIE_VTMODE_64;
119fc60a
MC
3512 }
3513
a34bcfff
AD
3514 /* Enable fan failure interrupt */
3515 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
0befdb3e 3516 gpie |= IXGBE_SDP1_GPIEN;
0befdb3e 3517
a34bcfff 3518 if (hw->mac.type == ixgbe_mac_82599EB)
e8e26350
PW
3519 gpie |= IXGBE_SDP1_GPIEN;
3520 gpie |= IXGBE_SDP2_GPIEN;
a34bcfff
AD
3521
3522 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3523}
3524
3525static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3526{
3527 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3528 int err;
a34bcfff
AD
3529 u32 ctrl_ext;
3530
3531 ixgbe_get_hw_control(adapter);
3532 ixgbe_setup_gpie(adapter);
e8e26350 3533
9a799d71
AK
3534 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3535 ixgbe_configure_msix(adapter);
3536 else
3537 ixgbe_configure_msi_and_legacy(adapter);
3538
61fac744
PW
3539 /* enable the optics */
3540 if (hw->phy.multispeed_fiber)
3541 hw->mac.ops.enable_tx_laser(hw);
3542
9a799d71 3543 clear_bit(__IXGBE_DOWN, &adapter->state);
021230d4
AV
3544 ixgbe_napi_enable_all(adapter);
3545
3546 /* clear any pending interrupts, may auto mask */
3547 IXGBE_READ_REG(hw, IXGBE_EICR);
9a799d71
AK
3548 ixgbe_irq_enable(adapter);
3549
bf069c97
DS
3550 /*
3551 * If this adapter has a fan, check to see if we had a failure
3552 * before we enabled the interrupt.
3553 */
3554 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3555 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3556 if (esdp & IXGBE_ESDP_SDP1)
396e799c 3557 e_crit(drv, "Fan has stopped, replace the adapter\n");
bf069c97
DS
3558 }
3559
e8e26350
PW
3560 /*
3561 * For hot-pluggable SFP+ devices, a new SFP+ module may have
19343de2
DS
3562 * arrived before interrupts were enabled but after probe. Such
3563 * devices wouldn't have their type identified yet. We need to
3564 * kick off the SFP+ module setup first, then try to bring up link.
e8e26350
PW
3565 * If we're not hot-pluggable SFP+, we just need to configure link
3566 * and bring it up.
3567 */
19343de2
DS
3568 if (hw->phy.type == ixgbe_phy_unknown) {
3569 err = hw->phy.ops.identify(hw);
3570 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5da43c1a
DS
3571 /*
3572 * Take the device down and schedule the sfp tasklet
3573 * which will unregister_netdev and log it.
3574 */
19343de2 3575 ixgbe_down(adapter);
5da43c1a 3576 schedule_work(&adapter->sfp_config_module_task);
19343de2
DS
3577 return err;
3578 }
e8e26350
PW
3579 }
3580
3581 if (ixgbe_is_sfp(hw)) {
3582 ixgbe_sfp_link_config(adapter);
3583 } else {
3584 err = ixgbe_non_sfp_link_config(hw);
3585 if (err)
396e799c 3586 e_err(probe, "link_config FAILED %d\n", err);
e8e26350 3587 }
0ecc061d 3588
1da100bb 3589 /* enable transmits */
477de6ed 3590 netif_tx_start_all_queues(adapter->netdev);
1da100bb 3591
9a799d71
AK
3592 /* bring the link up in the watchdog, this could race with our first
3593 * link up interrupt but shouldn't be a problem */
cf8280ee
JB
3594 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3595 adapter->link_check_timeout = jiffies;
9a799d71 3596 mod_timer(&adapter->watchdog_timer, jiffies);
c9205697
GR
3597
3598 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3599 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3600 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3601 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3602
9a799d71
AK
3603 return 0;
3604}
3605
d4f80882
AV
3606void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3607{
3608 WARN_ON(in_interrupt());
3609 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3610 msleep(1);
3611 ixgbe_down(adapter);
5809a1ae
GR
3612 /*
3613 * If SR-IOV enabled then wait a bit before bringing the adapter
3614 * back up to give the VFs time to respond to the reset. The
3615 * two second wait is based upon the watchdog timer cycle in
3616 * the VF driver.
3617 */
3618 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3619 msleep(2000);
d4f80882
AV
3620 ixgbe_up(adapter);
3621 clear_bit(__IXGBE_RESETTING, &adapter->state);
3622}
3623
9a799d71
AK
3624int ixgbe_up(struct ixgbe_adapter *adapter)
3625{
3626 /* hardware has been reset, we need to reload some things */
3627 ixgbe_configure(adapter);
3628
3629 return ixgbe_up_complete(adapter);
3630}
3631
3632void ixgbe_reset(struct ixgbe_adapter *adapter)
3633{
c44ade9e 3634 struct ixgbe_hw *hw = &adapter->hw;
8ca783ab
DS
3635 int err;
3636
3637 err = hw->mac.ops.init_hw(hw);
da4dd0f7
PWJ
3638 switch (err) {
3639 case 0:
3640 case IXGBE_ERR_SFP_NOT_PRESENT:
3641 break;
3642 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
849c4542 3643 e_dev_err("master disable timed out\n");
da4dd0f7 3644 break;
794caeb2
PWJ
3645 case IXGBE_ERR_EEPROM_VERSION:
3646 /* We are running on a pre-production device, log a warning */
849c4542
ET
3647 e_dev_warn("This device is a pre-production adapter/LOM. "
3648 "Please be aware there may be issuesassociated with "
3649 "your hardware. If you are experiencing problems "
3650 "please contact your Intel or hardware "
3651 "representative who provided you with this "
3652 "hardware.\n");
794caeb2 3653 break;
da4dd0f7 3654 default:
849c4542 3655 e_dev_err("Hardware Error: %d\n", err);
da4dd0f7 3656 }
9a799d71
AK
3657
3658 /* reprogram the RAR[0] in case user changed it. */
1cdd1ec8
GR
3659 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3660 IXGBE_RAH_AV);
9a799d71
AK
3661}
3662
9a799d71
AK
3663/**
3664 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3665 * @adapter: board private structure
3666 * @rx_ring: ring to free buffers from
3667 **/
3668static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
b4617240 3669 struct ixgbe_ring *rx_ring)
9a799d71
AK
3670{
3671 struct pci_dev *pdev = adapter->pdev;
3672 unsigned long size;
3673 unsigned int i;
3674
84418e3b
AD
3675 /* ring already cleared, nothing to do */
3676 if (!rx_ring->rx_buffer_info)
3677 return;
9a799d71 3678
84418e3b 3679 /* Free all the Rx ring sk_buffs */
9a799d71
AK
3680 for (i = 0; i < rx_ring->count; i++) {
3681 struct ixgbe_rx_buffer *rx_buffer_info;
3682
3683 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3684 if (rx_buffer_info->dma) {
1b507730 3685 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
b4617240 3686 rx_ring->rx_buf_len,
1b507730 3687 DMA_FROM_DEVICE);
9a799d71
AK
3688 rx_buffer_info->dma = 0;
3689 }
3690 if (rx_buffer_info->skb) {
f8212f97 3691 struct sk_buff *skb = rx_buffer_info->skb;
9a799d71 3692 rx_buffer_info->skb = NULL;
f8212f97
AD
3693 do {
3694 struct sk_buff *this = skb;
e8171aaa 3695 if (IXGBE_RSC_CB(this)->delay_unmap) {
1b507730
NN
3696 dma_unmap_single(&pdev->dev,
3697 IXGBE_RSC_CB(this)->dma,
43634e82 3698 rx_ring->rx_buf_len,
1b507730 3699 DMA_FROM_DEVICE);
fd3686a8 3700 IXGBE_RSC_CB(this)->dma = 0;
e8171aaa 3701 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 3702 }
f8212f97
AD
3703 skb = skb->prev;
3704 dev_kfree_skb(this);
3705 } while (skb);
9a799d71
AK
3706 }
3707 if (!rx_buffer_info->page)
3708 continue;
4f57ca6e 3709 if (rx_buffer_info->page_dma) {
1b507730
NN
3710 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3711 PAGE_SIZE / 2, DMA_FROM_DEVICE);
4f57ca6e
JB
3712 rx_buffer_info->page_dma = 0;
3713 }
9a799d71
AK
3714 put_page(rx_buffer_info->page);
3715 rx_buffer_info->page = NULL;
762f4c57 3716 rx_buffer_info->page_offset = 0;
9a799d71
AK
3717 }
3718
3719 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3720 memset(rx_ring->rx_buffer_info, 0, size);
3721
3722 /* Zero out the descriptor ring */
3723 memset(rx_ring->desc, 0, rx_ring->size);
3724
3725 rx_ring->next_to_clean = 0;
3726 rx_ring->next_to_use = 0;
3727
9891ca7c
JB
3728 if (rx_ring->head)
3729 writel(0, adapter->hw.hw_addr + rx_ring->head);
3730 if (rx_ring->tail)
3731 writel(0, adapter->hw.hw_addr + rx_ring->tail);
9a799d71
AK
3732}
3733
3734/**
3735 * ixgbe_clean_tx_ring - Free Tx Buffers
3736 * @adapter: board private structure
3737 * @tx_ring: ring to be cleaned
3738 **/
3739static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
b4617240 3740 struct ixgbe_ring *tx_ring)
9a799d71
AK
3741{
3742 struct ixgbe_tx_buffer *tx_buffer_info;
3743 unsigned long size;
3744 unsigned int i;
3745
84418e3b
AD
3746 /* ring already cleared, nothing to do */
3747 if (!tx_ring->tx_buffer_info)
3748 return;
9a799d71 3749
84418e3b 3750 /* Free all the Tx ring sk_buffs */
9a799d71
AK
3751 for (i = 0; i < tx_ring->count; i++) {
3752 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3753 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3754 }
3755
3756 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3757 memset(tx_ring->tx_buffer_info, 0, size);
3758
3759 /* Zero out the descriptor ring */
3760 memset(tx_ring->desc, 0, tx_ring->size);
3761
3762 tx_ring->next_to_use = 0;
3763 tx_ring->next_to_clean = 0;
3764
9891ca7c
JB
3765 if (tx_ring->head)
3766 writel(0, adapter->hw.hw_addr + tx_ring->head);
3767 if (tx_ring->tail)
3768 writel(0, adapter->hw.hw_addr + tx_ring->tail);
9a799d71
AK
3769}
3770
3771/**
021230d4 3772 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
9a799d71
AK
3773 * @adapter: board private structure
3774 **/
021230d4 3775static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
3776{
3777 int i;
3778
021230d4 3779 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 3780 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
9a799d71
AK
3781}
3782
3783/**
021230d4 3784 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
9a799d71
AK
3785 * @adapter: board private structure
3786 **/
021230d4 3787static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
3788{
3789 int i;
3790
021230d4 3791 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 3792 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
9a799d71
AK
3793}
3794
3795void ixgbe_down(struct ixgbe_adapter *adapter)
3796{
3797 struct net_device *netdev = adapter->netdev;
7f821875 3798 struct ixgbe_hw *hw = &adapter->hw;
9a799d71 3799 u32 rxctrl;
7f821875
JB
3800 u32 txdctl;
3801 int i, j;
9a799d71
AK
3802
3803 /* signal that we are down to the interrupt handler */
3804 set_bit(__IXGBE_DOWN, &adapter->state);
3805
767081ad
GR
3806 /* disable receive for all VFs and wait one second */
3807 if (adapter->num_vfs) {
767081ad
GR
3808 /* ping all the active vfs to let them know we are going down */
3809 ixgbe_ping_all_vfs(adapter);
581d1aa7 3810
767081ad
GR
3811 /* Disable all VFTE/VFRE TX/RX */
3812 ixgbe_disable_tx_rx(adapter);
581d1aa7
GR
3813
3814 /* Mark all the VFs as inactive */
3815 for (i = 0 ; i < adapter->num_vfs; i++)
3816 adapter->vfinfo[i].clear_to_send = 0;
767081ad
GR
3817 }
3818
9a799d71 3819 /* disable receives */
7f821875
JB
3820 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3821 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
9a799d71 3822
7f821875 3823 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
3824 msleep(10);
3825
7f821875
JB
3826 netif_tx_stop_all_queues(netdev);
3827
0a1f87cb
DS
3828 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3829 del_timer_sync(&adapter->sfp_timer);
9a799d71 3830 del_timer_sync(&adapter->watchdog_timer);
cf8280ee 3831 cancel_work_sync(&adapter->watchdog_task);
9a799d71 3832
c0dfb90e
JF
3833 netif_carrier_off(netdev);
3834 netif_tx_disable(netdev);
3835
3836 ixgbe_irq_disable(adapter);
3837
3838 ixgbe_napi_disable_all(adapter);
3839
c4cf55e5
PWJ
3840 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3841 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3842 cancel_work_sync(&adapter->fdir_reinit_task);
3843
119fc60a
MC
3844 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3845 cancel_work_sync(&adapter->check_overtemp_task);
3846
7f821875
JB
3847 /* disable transmits in the hardware now that interrupts are off */
3848 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 3849 j = adapter->tx_ring[i]->reg_idx;
7f821875
JB
3850 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3851 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3852 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3853 }
88512539
PW
3854 /* Disable the Tx DMA engine on 82599 */
3855 if (hw->mac.type == ixgbe_mac_82599EB)
3856 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3857 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3858 ~IXGBE_DMATXCTL_TE));
7f821875 3859
9f756f01
JF
3860 /* power down the optics */
3861 if (hw->phy.multispeed_fiber)
3862 hw->mac.ops.disable_tx_laser(hw);
3863
9a713e7c
PW
3864 /* clear n-tuple filters that are cached */
3865 ethtool_ntuple_flush(netdev);
3866
6f4a0e45
PL
3867 if (!pci_channel_offline(adapter->pdev))
3868 ixgbe_reset(adapter);
9a799d71
AK
3869 ixgbe_clean_all_tx_rings(adapter);
3870 ixgbe_clean_all_rx_rings(adapter);
3871
5dd2d332 3872#ifdef CONFIG_IXGBE_DCA
96b0e0f6 3873 /* since we reset the hardware DCA settings were cleared */
e35ec126 3874 ixgbe_setup_dca(adapter);
96b0e0f6 3875#endif
9a799d71
AK
3876}
3877
9a799d71 3878/**
021230d4
AV
3879 * ixgbe_poll - NAPI Rx polling callback
3880 * @napi: structure for representing this polling device
3881 * @budget: how many packets driver is allowed to clean
3882 *
3883 * This function is used for legacy and MSI, NAPI mode
9a799d71 3884 **/
021230d4 3885static int ixgbe_poll(struct napi_struct *napi, int budget)
9a799d71 3886{
9a1a69ad
JB
3887 struct ixgbe_q_vector *q_vector =
3888 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 3889 struct ixgbe_adapter *adapter = q_vector->adapter;
9a1a69ad 3890 int tx_clean_complete, work_done = 0;
9a799d71 3891
5dd2d332 3892#ifdef CONFIG_IXGBE_DCA
bd0362dd 3893 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
4a0b9ca0
PW
3894 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3895 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
bd0362dd
JC
3896 }
3897#endif
3898
4a0b9ca0
PW
3899 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3900 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
9a799d71 3901
9a1a69ad 3902 if (!tx_clean_complete)
d2c7ddd6
DM
3903 work_done = budget;
3904
53e52c72
DM
3905 /* If budget not fully consumed, exit the polling mode */
3906 if (work_done < budget) {
288379f0 3907 napi_complete(napi);
f7554a2b 3908 if (adapter->rx_itr_setting & 1)
f494e8fa 3909 ixgbe_set_itr(adapter);
d4f80882 3910 if (!test_bit(__IXGBE_DOWN, &adapter->state))
835462fc 3911 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
9a799d71 3912 }
9a799d71
AK
3913 return work_done;
3914}
3915
3916/**
3917 * ixgbe_tx_timeout - Respond to a Tx Hang
3918 * @netdev: network interface device structure
3919 **/
3920static void ixgbe_tx_timeout(struct net_device *netdev)
3921{
3922 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3923
3924 /* Do the reset outside of interrupt context */
3925 schedule_work(&adapter->reset_task);
3926}
3927
3928static void ixgbe_reset_task(struct work_struct *work)
3929{
3930 struct ixgbe_adapter *adapter;
3931 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3932
2f90b865
AD
3933 /* If we're already down or resetting, just bail */
3934 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3935 test_bit(__IXGBE_RESETTING, &adapter->state))
3936 return;
3937
9a799d71
AK
3938 adapter->tx_timeout_count++;
3939
dcd79aeb
TI
3940 ixgbe_dump(adapter);
3941 netdev_err(adapter->netdev, "Reset adapter\n");
d4f80882 3942 ixgbe_reinit_locked(adapter);
9a799d71
AK
3943}
3944
bc97114d
PWJ
3945#ifdef CONFIG_IXGBE_DCB
3946static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
b9804972 3947{
bc97114d 3948 bool ret = false;
0cefafad 3949 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
b9804972 3950
0cefafad
JB
3951 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3952 return ret;
3953
3954 f->mask = 0x7 << 3;
3955 adapter->num_rx_queues = f->indices;
3956 adapter->num_tx_queues = f->indices;
3957 ret = true;
2f90b865 3958
bc97114d
PWJ
3959 return ret;
3960}
3961#endif
3962
4df10466
JB
3963/**
3964 * ixgbe_set_rss_queues: Allocate queues for RSS
3965 * @adapter: board private structure to initialize
3966 *
3967 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3968 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3969 *
3970 **/
bc97114d
PWJ
3971static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3972{
3973 bool ret = false;
0cefafad 3974 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
bc97114d
PWJ
3975
3976 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
0cefafad
JB
3977 f->mask = 0xF;
3978 adapter->num_rx_queues = f->indices;
3979 adapter->num_tx_queues = f->indices;
bc97114d
PWJ
3980 ret = true;
3981 } else {
bc97114d 3982 ret = false;
b9804972
JB
3983 }
3984
bc97114d
PWJ
3985 return ret;
3986}
3987
c4cf55e5
PWJ
3988/**
3989 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3990 * @adapter: board private structure to initialize
3991 *
3992 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3993 * to the original CPU that initiated the Tx session. This runs in addition
3994 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3995 * Rx load across CPUs using RSS.
3996 *
3997 **/
3998static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3999{
4000 bool ret = false;
4001 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4002
4003 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4004 f_fdir->mask = 0;
4005
4006 /* Flow Director must have RSS enabled */
4007 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4008 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4009 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4010 adapter->num_tx_queues = f_fdir->indices;
4011 adapter->num_rx_queues = f_fdir->indices;
4012 ret = true;
4013 } else {
4014 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4015 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4016 }
4017 return ret;
4018}
4019
0331a832
YZ
4020#ifdef IXGBE_FCOE
4021/**
4022 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4023 * @adapter: board private structure to initialize
4024 *
4025 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4026 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4027 * rx queues out of the max number of rx queues, instead, it is used as the
4028 * index of the first rx queue used by FCoE.
4029 *
4030 **/
4031static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4032{
4033 bool ret = false;
4034 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4035
4036 f->indices = min((int)num_online_cpus(), f->indices);
4037 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
8de8b2e6
YZ
4038 adapter->num_rx_queues = 1;
4039 adapter->num_tx_queues = 1;
0331a832
YZ
4040#ifdef CONFIG_IXGBE_DCB
4041 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
396e799c 4042 e_info(probe, "FCoE enabled with DCB\n");
0331a832
YZ
4043 ixgbe_set_dcb_queues(adapter);
4044 }
4045#endif
4046 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
396e799c 4047 e_info(probe, "FCoE enabled with RSS\n");
8faa2a78
YZ
4048 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4049 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4050 ixgbe_set_fdir_queues(adapter);
4051 else
4052 ixgbe_set_rss_queues(adapter);
0331a832
YZ
4053 }
4054 /* adding FCoE rx rings to the end */
4055 f->mask = adapter->num_rx_queues;
4056 adapter->num_rx_queues += f->indices;
8de8b2e6 4057 adapter->num_tx_queues += f->indices;
0331a832
YZ
4058
4059 ret = true;
4060 }
4061
4062 return ret;
4063}
4064
4065#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4066/**
4067 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4068 * @adapter: board private structure to initialize
4069 *
4070 * IOV doesn't actually use anything, so just NAK the
4071 * request for now and let the other queue routines
4072 * figure out what to do.
4073 */
4074static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4075{
4076 return false;
4077}
4078
4df10466
JB
4079/*
4080 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4081 * @adapter: board private structure to initialize
4082 *
4083 * This is the top level queue allocation routine. The order here is very
4084 * important, starting with the "most" number of features turned on at once,
4085 * and ending with the smallest set of features. This way large combinations
4086 * can be allocated if they're turned on, and smaller combinations are the
4087 * fallthrough conditions.
4088 *
4089 **/
bc97114d
PWJ
4090static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4091{
1cdd1ec8
GR
4092 /* Start with base case */
4093 adapter->num_rx_queues = 1;
4094 adapter->num_tx_queues = 1;
4095 adapter->num_rx_pools = adapter->num_rx_queues;
4096 adapter->num_rx_queues_per_pool = 1;
4097
4098 if (ixgbe_set_sriov_queues(adapter))
4099 return;
4100
0331a832
YZ
4101#ifdef IXGBE_FCOE
4102 if (ixgbe_set_fcoe_queues(adapter))
4103 goto done;
4104
4105#endif /* IXGBE_FCOE */
bc97114d
PWJ
4106#ifdef CONFIG_IXGBE_DCB
4107 if (ixgbe_set_dcb_queues(adapter))
af22ab1b 4108 goto done;
bc97114d
PWJ
4109
4110#endif
c4cf55e5
PWJ
4111 if (ixgbe_set_fdir_queues(adapter))
4112 goto done;
4113
bc97114d 4114 if (ixgbe_set_rss_queues(adapter))
af22ab1b
WF
4115 goto done;
4116
4117 /* fallback to base case */
4118 adapter->num_rx_queues = 1;
4119 adapter->num_tx_queues = 1;
4120
4121done:
4122 /* Notify the stack of the (possibly) reduced Tx Queue count. */
f0796d5c 4123 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
b9804972
JB
4124}
4125
021230d4 4126static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
b4617240 4127 int vectors)
021230d4
AV
4128{
4129 int err, vector_threshold;
4130
4131 /* We'll want at least 3 (vector_threshold):
4132 * 1) TxQ[0] Cleanup
4133 * 2) RxQ[0] Cleanup
4134 * 3) Other (Link Status Change, etc.)
4135 * 4) TCP Timer (optional)
4136 */
4137 vector_threshold = MIN_MSIX_COUNT;
4138
4139 /* The more we get, the more we will assign to Tx/Rx Cleanup
4140 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4141 * Right now, we simply care about how many we'll get; we'll
4142 * set them up later while requesting irq's.
4143 */
4144 while (vectors >= vector_threshold) {
4145 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
b4617240 4146 vectors);
021230d4
AV
4147 if (!err) /* Success in acquiring all requested vectors. */
4148 break;
4149 else if (err < 0)
4150 vectors = 0; /* Nasty failure, quit now */
4151 else /* err == number of vectors we should try again with */
4152 vectors = err;
4153 }
4154
4155 if (vectors < vector_threshold) {
4156 /* Can't allocate enough MSI-X interrupts? Oh well.
4157 * This just means we'll go with either a single MSI
4158 * vector or fall back to legacy interrupts.
4159 */
849c4542
ET
4160 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4161 "Unable to allocate MSI-X interrupts\n");
021230d4
AV
4162 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4163 kfree(adapter->msix_entries);
4164 adapter->msix_entries = NULL;
021230d4
AV
4165 } else {
4166 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
eb7f139c
PWJ
4167 /*
4168 * Adjust for only the vectors we'll use, which is minimum
4169 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4170 * vectors we were allocated.
4171 */
4172 adapter->num_msix_vectors = min(vectors,
4173 adapter->max_msix_q_vectors + NON_Q_VECTORS);
021230d4
AV
4174 }
4175}
4176
021230d4 4177/**
bc97114d 4178 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
021230d4
AV
4179 * @adapter: board private structure to initialize
4180 *
bc97114d
PWJ
4181 * Cache the descriptor ring offsets for RSS to the assigned rings.
4182 *
021230d4 4183 **/
bc97114d 4184static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
021230d4 4185{
bc97114d
PWJ
4186 int i;
4187 bool ret = false;
4188
4189 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4190 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4191 adapter->rx_ring[i]->reg_idx = i;
bc97114d 4192 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4193 adapter->tx_ring[i]->reg_idx = i;
bc97114d
PWJ
4194 ret = true;
4195 } else {
4196 ret = false;
4197 }
4198
4199 return ret;
4200}
4201
4202#ifdef CONFIG_IXGBE_DCB
4203/**
4204 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4205 * @adapter: board private structure to initialize
4206 *
4207 * Cache the descriptor ring offsets for DCB to the assigned rings.
4208 *
4209 **/
4210static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4211{
4212 int i;
4213 bool ret = false;
4214 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4215
4216 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4217 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2f90b865
AD
4218 /* the number of queues is assumed to be symmetric */
4219 for (i = 0; i < dcb_i; i++) {
4a0b9ca0
PW
4220 adapter->rx_ring[i]->reg_idx = i << 3;
4221 adapter->tx_ring[i]->reg_idx = i << 2;
2f90b865 4222 }
bc97114d 4223 ret = true;
e8e26350 4224 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
f92ef202
PW
4225 if (dcb_i == 8) {
4226 /*
4227 * Tx TC0 starts at: descriptor queue 0
4228 * Tx TC1 starts at: descriptor queue 32
4229 * Tx TC2 starts at: descriptor queue 64
4230 * Tx TC3 starts at: descriptor queue 80
4231 * Tx TC4 starts at: descriptor queue 96
4232 * Tx TC5 starts at: descriptor queue 104
4233 * Tx TC6 starts at: descriptor queue 112
4234 * Tx TC7 starts at: descriptor queue 120
4235 *
4236 * Rx TC0-TC7 are offset by 16 queues each
4237 */
4238 for (i = 0; i < 3; i++) {
4a0b9ca0
PW
4239 adapter->tx_ring[i]->reg_idx = i << 5;
4240 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4241 }
4242 for ( ; i < 5; i++) {
4a0b9ca0 4243 adapter->tx_ring[i]->reg_idx =
f92ef202 4244 ((i + 2) << 4);
4a0b9ca0 4245 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4246 }
4247 for ( ; i < dcb_i; i++) {
4a0b9ca0 4248 adapter->tx_ring[i]->reg_idx =
f92ef202 4249 ((i + 8) << 3);
4a0b9ca0 4250 adapter->rx_ring[i]->reg_idx = i << 4;
f92ef202
PW
4251 }
4252
4253 ret = true;
4254 } else if (dcb_i == 4) {
4255 /*
4256 * Tx TC0 starts at: descriptor queue 0
4257 * Tx TC1 starts at: descriptor queue 64
4258 * Tx TC2 starts at: descriptor queue 96
4259 * Tx TC3 starts at: descriptor queue 112
4260 *
4261 * Rx TC0-TC3 are offset by 32 queues each
4262 */
4a0b9ca0
PW
4263 adapter->tx_ring[0]->reg_idx = 0;
4264 adapter->tx_ring[1]->reg_idx = 64;
4265 adapter->tx_ring[2]->reg_idx = 96;
4266 adapter->tx_ring[3]->reg_idx = 112;
f92ef202 4267 for (i = 0 ; i < dcb_i; i++)
4a0b9ca0 4268 adapter->rx_ring[i]->reg_idx = i << 5;
f92ef202
PW
4269
4270 ret = true;
4271 } else {
4272 ret = false;
e8e26350 4273 }
bc97114d
PWJ
4274 } else {
4275 ret = false;
021230d4 4276 }
bc97114d
PWJ
4277 } else {
4278 ret = false;
021230d4 4279 }
bc97114d
PWJ
4280
4281 return ret;
4282}
4283#endif
4284
c4cf55e5
PWJ
4285/**
4286 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4287 * @adapter: board private structure to initialize
4288 *
4289 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4290 *
4291 **/
4292static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4293{
4294 int i;
4295 bool ret = false;
4296
4297 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4298 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4299 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4300 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4301 adapter->rx_ring[i]->reg_idx = i;
c4cf55e5 4302 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4303 adapter->tx_ring[i]->reg_idx = i;
c4cf55e5
PWJ
4304 ret = true;
4305 }
4306
4307 return ret;
4308}
4309
0331a832
YZ
4310#ifdef IXGBE_FCOE
4311/**
4312 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4313 * @adapter: board private structure to initialize
4314 *
4315 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4316 *
4317 */
4318static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4319{
8de8b2e6 4320 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
0331a832
YZ
4321 bool ret = false;
4322 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4323
4324 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4325#ifdef CONFIG_IXGBE_DCB
4326 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
8de8b2e6
YZ
4327 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4328
0331a832 4329 ixgbe_cache_ring_dcb(adapter);
8de8b2e6 4330 /* find out queues in TC for FCoE */
4a0b9ca0
PW
4331 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4332 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
8de8b2e6
YZ
4333 /*
4334 * In 82599, the number of Tx queues for each traffic
4335 * class for both 8-TC and 4-TC modes are:
4336 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4337 * 8 TCs: 32 32 16 16 8 8 8 8
4338 * 4 TCs: 64 64 32 32
4339 * We have max 8 queues for FCoE, where 8 the is
4340 * FCoE redirection table size. If TC for FCoE is
4341 * less than or equal to TC3, we have enough queues
4342 * to add max of 8 queues for FCoE, so we start FCoE
4343 * tx descriptor from the next one, i.e., reg_idx + 1.
4344 * If TC for FCoE is above TC3, implying 8 TC mode,
4345 * and we need 8 for FCoE, we have to take all queues
4346 * in that traffic class for FCoE.
4347 */
4348 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4349 fcoe_tx_i--;
0331a832
YZ
4350 }
4351#endif /* CONFIG_IXGBE_DCB */
4352 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
8faa2a78
YZ
4353 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4354 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4355 ixgbe_cache_ring_fdir(adapter);
4356 else
4357 ixgbe_cache_ring_rss(adapter);
4358
8de8b2e6
YZ
4359 fcoe_rx_i = f->mask;
4360 fcoe_tx_i = f->mask;
4361 }
4362 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4a0b9ca0
PW
4363 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4364 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
0331a832 4365 }
0331a832
YZ
4366 ret = true;
4367 }
4368 return ret;
4369}
4370
4371#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4372/**
4373 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4374 * @adapter: board private structure to initialize
4375 *
4376 * SR-IOV doesn't use any descriptor rings but changes the default if
4377 * no other mapping is used.
4378 *
4379 */
4380static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4381{
4a0b9ca0
PW
4382 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4383 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
1cdd1ec8
GR
4384 if (adapter->num_vfs)
4385 return true;
4386 else
4387 return false;
4388}
4389
bc97114d
PWJ
4390/**
4391 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4392 * @adapter: board private structure to initialize
4393 *
4394 * Once we know the feature-set enabled for the device, we'll cache
4395 * the register offset the descriptor ring is assigned to.
4396 *
4397 * Note, the order the various feature calls is important. It must start with
4398 * the "most" features enabled at the same time, then trickle down to the
4399 * least amount of features turned on at once.
4400 **/
4401static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4402{
4403 /* start with default case */
4a0b9ca0
PW
4404 adapter->rx_ring[0]->reg_idx = 0;
4405 adapter->tx_ring[0]->reg_idx = 0;
bc97114d 4406
1cdd1ec8
GR
4407 if (ixgbe_cache_ring_sriov(adapter))
4408 return;
4409
0331a832
YZ
4410#ifdef IXGBE_FCOE
4411 if (ixgbe_cache_ring_fcoe(adapter))
4412 return;
4413
4414#endif /* IXGBE_FCOE */
bc97114d
PWJ
4415#ifdef CONFIG_IXGBE_DCB
4416 if (ixgbe_cache_ring_dcb(adapter))
4417 return;
4418
4419#endif
c4cf55e5
PWJ
4420 if (ixgbe_cache_ring_fdir(adapter))
4421 return;
4422
bc97114d
PWJ
4423 if (ixgbe_cache_ring_rss(adapter))
4424 return;
021230d4
AV
4425}
4426
9a799d71
AK
4427/**
4428 * ixgbe_alloc_queues - Allocate memory for all rings
4429 * @adapter: board private structure to initialize
4430 *
4431 * We allocate one ring per queue at run-time since we don't know the
4df10466
JB
4432 * number of queues at compile-time. The polling_netdev array is
4433 * intended for Multiqueue, but should work fine with a single queue.
9a799d71 4434 **/
2f90b865 4435static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
9a799d71
AK
4436{
4437 int i;
4a0b9ca0 4438 int orig_node = adapter->node;
9a799d71 4439
021230d4 4440 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0
PW
4441 struct ixgbe_ring *ring = adapter->tx_ring[i];
4442 if (orig_node == -1) {
4443 int cur_node = next_online_node(adapter->node);
4444 if (cur_node == MAX_NUMNODES)
4445 cur_node = first_online_node;
4446 adapter->node = cur_node;
4447 }
4448 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4449 adapter->node);
4450 if (!ring)
4451 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4452 if (!ring)
4453 goto err_tx_ring_allocation;
4454 ring->count = adapter->tx_ring_count;
4455 ring->queue_index = i;
4456 ring->numa_node = adapter->node;
4457
4458 adapter->tx_ring[i] = ring;
021230d4 4459 }
b9804972 4460
4a0b9ca0
PW
4461 /* Restore the adapter's original node */
4462 adapter->node = orig_node;
4463
9a799d71 4464 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0
PW
4465 struct ixgbe_ring *ring = adapter->rx_ring[i];
4466 if (orig_node == -1) {
4467 int cur_node = next_online_node(adapter->node);
4468 if (cur_node == MAX_NUMNODES)
4469 cur_node = first_online_node;
4470 adapter->node = cur_node;
4471 }
4472 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4473 adapter->node);
4474 if (!ring)
4475 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4476 if (!ring)
4477 goto err_rx_ring_allocation;
4478 ring->count = adapter->rx_ring_count;
4479 ring->queue_index = i;
4480 ring->numa_node = adapter->node;
4481
4482 adapter->rx_ring[i] = ring;
021230d4
AV
4483 }
4484
4a0b9ca0
PW
4485 /* Restore the adapter's original node */
4486 adapter->node = orig_node;
4487
021230d4
AV
4488 ixgbe_cache_ring_register(adapter);
4489
4490 return 0;
4491
4492err_rx_ring_allocation:
4a0b9ca0
PW
4493 for (i = 0; i < adapter->num_tx_queues; i++)
4494 kfree(adapter->tx_ring[i]);
021230d4
AV
4495err_tx_ring_allocation:
4496 return -ENOMEM;
4497}
4498
4499/**
4500 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4501 * @adapter: board private structure to initialize
4502 *
4503 * Attempt to configure the interrupts using the best available
4504 * capabilities of the hardware and the kernel.
4505 **/
feea6a57 4506static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4 4507{
8be0e467 4508 struct ixgbe_hw *hw = &adapter->hw;
021230d4
AV
4509 int err = 0;
4510 int vector, v_budget;
4511
4512 /*
4513 * It's easy to be greedy for MSI-X vectors, but it really
4514 * doesn't do us much good if we have a lot more vectors
4515 * than CPU's. So let's be conservative and only ask for
342bde1b 4516 * (roughly) the same number of vectors as there are CPU's.
021230d4
AV
4517 */
4518 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
342bde1b 4519 (int)num_online_cpus()) + NON_Q_VECTORS;
021230d4
AV
4520
4521 /*
4522 * At the same time, hardware can only support a maximum of
8be0e467
PW
4523 * hw.mac->max_msix_vectors vectors. With features
4524 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4525 * descriptor queues supported by our device. Thus, we cap it off in
4526 * those rare cases where the cpu count also exceeds our vector limit.
021230d4 4527 */
8be0e467 4528 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
021230d4
AV
4529
4530 /* A failure in MSI-X entry allocation isn't fatal, but it does
4531 * mean we disable MSI-X capabilities of the adapter. */
4532 adapter->msix_entries = kcalloc(v_budget,
b4617240 4533 sizeof(struct msix_entry), GFP_KERNEL);
7a921c93
AD
4534 if (adapter->msix_entries) {
4535 for (vector = 0; vector < v_budget; vector++)
4536 adapter->msix_entries[vector].entry = vector;
021230d4 4537
7a921c93 4538 ixgbe_acquire_msix_vectors(adapter, v_budget);
021230d4 4539
7a921c93
AD
4540 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4541 goto out;
4542 }
26d27844 4543
7a921c93
AD
4544 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4545 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
c4cf55e5
PWJ
4546 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4547 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4548 adapter->atr_sample_rate = 0;
1cdd1ec8
GR
4549 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4550 ixgbe_disable_sriov(adapter);
4551
7a921c93 4552 ixgbe_set_num_queues(adapter);
021230d4 4553
021230d4
AV
4554 err = pci_enable_msi(adapter->pdev);
4555 if (!err) {
4556 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4557 } else {
849c4542
ET
4558 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4559 "Unable to allocate MSI interrupt, "
4560 "falling back to legacy. Error: %d\n", err);
021230d4
AV
4561 /* reset err */
4562 err = 0;
4563 }
4564
4565out:
021230d4
AV
4566 return err;
4567}
4568
7a921c93
AD
4569/**
4570 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4571 * @adapter: board private structure to initialize
4572 *
4573 * We allocate one q_vector per queue interrupt. If allocation fails we
4574 * return -ENOMEM.
4575 **/
4576static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4577{
4578 int q_idx, num_q_vectors;
4579 struct ixgbe_q_vector *q_vector;
4580 int napi_vectors;
4581 int (*poll)(struct napi_struct *, int);
4582
4583 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4584 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4585 napi_vectors = adapter->num_rx_queues;
91281fd3 4586 poll = &ixgbe_clean_rxtx_many;
7a921c93
AD
4587 } else {
4588 num_q_vectors = 1;
4589 napi_vectors = 1;
4590 poll = &ixgbe_poll;
4591 }
4592
4593 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1a6c14a2
JB
4594 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4595 GFP_KERNEL, adapter->node);
4596 if (!q_vector)
4597 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4598 GFP_KERNEL);
7a921c93
AD
4599 if (!q_vector)
4600 goto err_out;
4601 q_vector->adapter = adapter;
f7554a2b
NS
4602 if (q_vector->txr_count && !q_vector->rxr_count)
4603 q_vector->eitr = adapter->tx_eitr_param;
4604 else
4605 q_vector->eitr = adapter->rx_eitr_param;
fe49f04a 4606 q_vector->v_idx = q_idx;
91281fd3 4607 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
7a921c93
AD
4608 adapter->q_vector[q_idx] = q_vector;
4609 }
4610
4611 return 0;
4612
4613err_out:
4614 while (q_idx) {
4615 q_idx--;
4616 q_vector = adapter->q_vector[q_idx];
4617 netif_napi_del(&q_vector->napi);
4618 kfree(q_vector);
4619 adapter->q_vector[q_idx] = NULL;
4620 }
4621 return -ENOMEM;
4622}
4623
4624/**
4625 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4626 * @adapter: board private structure to initialize
4627 *
4628 * This function frees the memory allocated to the q_vectors. In addition if
4629 * NAPI is enabled it will delete any references to the NAPI struct prior
4630 * to freeing the q_vector.
4631 **/
4632static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4633{
4634 int q_idx, num_q_vectors;
7a921c93 4635
91281fd3 4636 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
7a921c93 4637 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 4638 else
7a921c93 4639 num_q_vectors = 1;
7a921c93
AD
4640
4641 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4642 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
7a921c93 4643 adapter->q_vector[q_idx] = NULL;
91281fd3 4644 netif_napi_del(&q_vector->napi);
7a921c93
AD
4645 kfree(q_vector);
4646 }
4647}
4648
7b25cdba 4649static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4
AV
4650{
4651 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4652 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4653 pci_disable_msix(adapter->pdev);
4654 kfree(adapter->msix_entries);
4655 adapter->msix_entries = NULL;
4656 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4657 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4658 pci_disable_msi(adapter->pdev);
4659 }
021230d4
AV
4660}
4661
4662/**
4663 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4664 * @adapter: board private structure to initialize
4665 *
4666 * We determine which interrupt scheme to use based on...
4667 * - Kernel support (MSI, MSI-X)
4668 * - which can be user-defined (via MODULE_PARAM)
4669 * - Hardware queue count (num_*_queues)
4670 * - defined by miscellaneous hardware support/features (RSS, etc.)
4671 **/
2f90b865 4672int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
021230d4
AV
4673{
4674 int err;
4675
4676 /* Number of supported queues */
4677 ixgbe_set_num_queues(adapter);
4678
021230d4
AV
4679 err = ixgbe_set_interrupt_capability(adapter);
4680 if (err) {
849c4542 4681 e_dev_err("Unable to setup interrupt capabilities\n");
021230d4 4682 goto err_set_interrupt;
9a799d71
AK
4683 }
4684
7a921c93
AD
4685 err = ixgbe_alloc_q_vectors(adapter);
4686 if (err) {
849c4542 4687 e_dev_err("Unable to allocate memory for queue vectors\n");
7a921c93
AD
4688 goto err_alloc_q_vectors;
4689 }
4690
4691 err = ixgbe_alloc_queues(adapter);
4692 if (err) {
849c4542 4693 e_dev_err("Unable to allocate memory for queues\n");
7a921c93
AD
4694 goto err_alloc_queues;
4695 }
4696
849c4542 4697 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
396e799c
ET
4698 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4699 adapter->num_rx_queues, adapter->num_tx_queues);
021230d4
AV
4700
4701 set_bit(__IXGBE_DOWN, &adapter->state);
4702
9a799d71 4703 return 0;
021230d4 4704
7a921c93
AD
4705err_alloc_queues:
4706 ixgbe_free_q_vectors(adapter);
4707err_alloc_q_vectors:
4708 ixgbe_reset_interrupt_capability(adapter);
021230d4 4709err_set_interrupt:
7a921c93
AD
4710 return err;
4711}
4712
4713/**
4714 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4715 * @adapter: board private structure to clear interrupt scheme on
4716 *
4717 * We go through and clear interrupt specific resources and reset the structure
4718 * to pre-load conditions
4719 **/
4720void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4721{
4a0b9ca0
PW
4722 int i;
4723
4724 for (i = 0; i < adapter->num_tx_queues; i++) {
4725 kfree(adapter->tx_ring[i]);
4726 adapter->tx_ring[i] = NULL;
4727 }
4728 for (i = 0; i < adapter->num_rx_queues; i++) {
4729 kfree(adapter->rx_ring[i]);
4730 adapter->rx_ring[i] = NULL;
4731 }
7a921c93
AD
4732
4733 ixgbe_free_q_vectors(adapter);
4734 ixgbe_reset_interrupt_capability(adapter);
9a799d71
AK
4735}
4736
c4900be0
DS
4737/**
4738 * ixgbe_sfp_timer - worker thread to find a missing module
4739 * @data: pointer to our adapter struct
4740 **/
4741static void ixgbe_sfp_timer(unsigned long data)
4742{
4743 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4744
4df10466
JB
4745 /*
4746 * Do the sfp_timer outside of interrupt context due to the
c4900be0
DS
4747 * delays that sfp+ detection requires
4748 */
4749 schedule_work(&adapter->sfp_task);
4750}
4751
4752/**
4753 * ixgbe_sfp_task - worker thread to find a missing module
4754 * @work: pointer to work_struct containing our data
4755 **/
4756static void ixgbe_sfp_task(struct work_struct *work)
4757{
4758 struct ixgbe_adapter *adapter = container_of(work,
4759 struct ixgbe_adapter,
4760 sfp_task);
4761 struct ixgbe_hw *hw = &adapter->hw;
4762
4763 if ((hw->phy.type == ixgbe_phy_nl) &&
4764 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4765 s32 ret = hw->phy.ops.identify_sfp(hw);
63d6e1d8 4766 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
c4900be0
DS
4767 goto reschedule;
4768 ret = hw->phy.ops.reset(hw);
4769 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
4770 e_dev_err("failed to initialize because an unsupported "
4771 "SFP+ module type was detected.\n");
4772 e_dev_err("Reload the driver after installing a "
4773 "supported module.\n");
c4900be0
DS
4774 unregister_netdev(adapter->netdev);
4775 } else {
396e799c 4776 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
c4900be0
DS
4777 }
4778 /* don't need this routine any more */
4779 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4780 }
4781 return;
4782reschedule:
4783 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4784 mod_timer(&adapter->sfp_timer,
4785 round_jiffies(jiffies + (2 * HZ)));
4786}
4787
9a799d71
AK
4788/**
4789 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4790 * @adapter: board private structure to initialize
4791 *
4792 * ixgbe_sw_init initializes the Adapter private data structure.
4793 * Fields are initialized based on PCI device information and
4794 * OS network device settings (MTU size).
4795 **/
4796static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4797{
4798 struct ixgbe_hw *hw = &adapter->hw;
4799 struct pci_dev *pdev = adapter->pdev;
9a713e7c 4800 struct net_device *dev = adapter->netdev;
021230d4 4801 unsigned int rss;
7a6b6f51 4802#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
4803 int j;
4804 struct tc_configuration *tc;
4805#endif
021230d4 4806
c44ade9e
JB
4807 /* PCI config space info */
4808
4809 hw->vendor_id = pdev->vendor;
4810 hw->device_id = pdev->device;
4811 hw->revision_id = pdev->revision;
4812 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4813 hw->subsystem_device_id = pdev->subsystem_device;
4814
021230d4
AV
4815 /* Set capability flags */
4816 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4817 adapter->ring_feature[RING_F_RSS].indices = rss;
4818 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2f90b865 4819 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
bf069c97
DS
4820 if (hw->mac.type == ixgbe_mac_82598EB) {
4821 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4822 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
e8e26350 4823 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
bf069c97 4824 } else if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 4825 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
0c19d6af
PWJ
4826 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4827 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
119fc60a
MC
4828 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4829 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
9a713e7c
PW
4830 if (dev->features & NETIF_F_NTUPLE) {
4831 /* Flow Director perfect filter enabled */
4832 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4833 adapter->atr_sample_rate = 0;
4834 spin_lock_init(&adapter->fdir_perfect_lock);
4835 } else {
4836 /* Flow Director hash filters enabled */
4837 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4838 adapter->atr_sample_rate = 20;
4839 }
c4cf55e5
PWJ
4840 adapter->ring_feature[RING_F_FDIR].indices =
4841 IXGBE_MAX_FDIR_INDICES;
c4cf55e5 4842 adapter->fdir_pballoc = 0;
eacd73f7 4843#ifdef IXGBE_FCOE
0d551589
YZ
4844 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4845 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4846 adapter->ring_feature[RING_F_FCOE].indices = 0;
61a0f421 4847#ifdef CONFIG_IXGBE_DCB
6ee16520
YZ
4848 /* Default traffic class to use for FCoE */
4849 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
56075a98 4850 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
61a0f421 4851#endif
eacd73f7 4852#endif /* IXGBE_FCOE */
f8212f97 4853 }
2f90b865 4854
7a6b6f51 4855#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
4856 /* Configure DCB traffic classes */
4857 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4858 tc = &adapter->dcb_cfg.tc_config[j];
4859 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4860 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4861 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4862 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4863 tc->dcb_pfc = pfc_disabled;
4864 }
4865 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4866 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4867 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
264857b8 4868 adapter->dcb_cfg.pfc_mode_enable = false;
2f90b865
AD
4869 adapter->dcb_cfg.round_robin_enable = false;
4870 adapter->dcb_set_bitmap = 0x00;
4871 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4872 adapter->ring_feature[RING_F_DCB].indices);
4873
4874#endif
9a799d71
AK
4875
4876 /* default flow control settings */
cd7664f6 4877 hw->fc.requested_mode = ixgbe_fc_full;
71fd570b 4878 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
264857b8
PWJ
4879#ifdef CONFIG_DCB
4880 adapter->last_lfc_mode = hw->fc.current_mode;
4881#endif
2b9ade93
JB
4882 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
4883 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
4884 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4885 hw->fc.send_xon = true;
71fd570b 4886 hw->fc.disable_fc_autoneg = false;
9a799d71 4887
30efa5a3 4888 /* enable itr by default in dynamic mode */
f7554a2b
NS
4889 adapter->rx_itr_setting = 1;
4890 adapter->rx_eitr_param = 20000;
4891 adapter->tx_itr_setting = 1;
4892 adapter->tx_eitr_param = 10000;
30efa5a3
JB
4893
4894 /* set defaults for eitr in MegaBytes */
4895 adapter->eitr_low = 10;
4896 adapter->eitr_high = 20;
4897
4898 /* set default ring sizes */
4899 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4900 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4901
9a799d71 4902 /* initialize eeprom parameters */
c44ade9e 4903 if (ixgbe_init_eeprom_params_generic(hw)) {
849c4542 4904 e_dev_err("EEPROM initialization failed\n");
9a799d71
AK
4905 return -EIO;
4906 }
4907
021230d4 4908 /* enable rx csum by default */
9a799d71
AK
4909 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4910
1a6c14a2
JB
4911 /* get assigned NUMA node */
4912 adapter->node = dev_to_node(&pdev->dev);
4913
9a799d71
AK
4914 set_bit(__IXGBE_DOWN, &adapter->state);
4915
4916 return 0;
4917}
4918
4919/**
4920 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4921 * @adapter: board private structure
3a581073 4922 * @tx_ring: tx descriptor ring (for a specific queue) to setup
9a799d71
AK
4923 *
4924 * Return 0 on success, negative on failure
4925 **/
4926int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
e01c31a5 4927 struct ixgbe_ring *tx_ring)
9a799d71
AK
4928{
4929 struct pci_dev *pdev = adapter->pdev;
4930 int size;
4931
3a581073 4932 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4a0b9ca0 4933 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
1a6c14a2
JB
4934 if (!tx_ring->tx_buffer_info)
4935 tx_ring->tx_buffer_info = vmalloc(size);
e01c31a5
JB
4936 if (!tx_ring->tx_buffer_info)
4937 goto err;
3a581073 4938 memset(tx_ring->tx_buffer_info, 0, size);
9a799d71
AK
4939
4940 /* round up to nearest 4K */
12207e49 4941 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3a581073 4942 tx_ring->size = ALIGN(tx_ring->size, 4096);
9a799d71 4943
1b507730
NN
4944 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4945 &tx_ring->dma, GFP_KERNEL);
e01c31a5
JB
4946 if (!tx_ring->desc)
4947 goto err;
9a799d71 4948
3a581073
JB
4949 tx_ring->next_to_use = 0;
4950 tx_ring->next_to_clean = 0;
4951 tx_ring->work_limit = tx_ring->count;
9a799d71 4952 return 0;
e01c31a5
JB
4953
4954err:
4955 vfree(tx_ring->tx_buffer_info);
4956 tx_ring->tx_buffer_info = NULL;
396e799c 4957 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
e01c31a5 4958 return -ENOMEM;
9a799d71
AK
4959}
4960
69888674
AD
4961/**
4962 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4963 * @adapter: board private structure
4964 *
4965 * If this function returns with an error, then it's possible one or
4966 * more of the rings is populated (while the rest are not). It is the
4967 * callers duty to clean those orphaned rings.
4968 *
4969 * Return 0 on success, negative on failure
4970 **/
4971static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4972{
4973 int i, err = 0;
4974
4975 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 4976 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
69888674
AD
4977 if (!err)
4978 continue;
396e799c 4979 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
69888674
AD
4980 break;
4981 }
4982
4983 return err;
4984}
4985
9a799d71
AK
4986/**
4987 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4988 * @adapter: board private structure
3a581073 4989 * @rx_ring: rx descriptor ring (for a specific queue) to setup
9a799d71
AK
4990 *
4991 * Returns 0 on success, negative on failure
4992 **/
4993int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
b4617240 4994 struct ixgbe_ring *rx_ring)
9a799d71
AK
4995{
4996 struct pci_dev *pdev = adapter->pdev;
021230d4 4997 int size;
9a799d71 4998
3a581073 4999 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
1a6c14a2
JB
5000 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
5001 if (!rx_ring->rx_buffer_info)
5002 rx_ring->rx_buffer_info = vmalloc(size);
3a581073 5003 if (!rx_ring->rx_buffer_info) {
396e799c
ET
5004 e_err(probe, "vmalloc allocation failed for the Rx "
5005 "descriptor ring\n");
177db6ff 5006 goto alloc_failed;
9a799d71 5007 }
3a581073 5008 memset(rx_ring->rx_buffer_info, 0, size);
9a799d71 5009
9a799d71 5010 /* Round up to nearest 4K */
3a581073
JB
5011 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5012 rx_ring->size = ALIGN(rx_ring->size, 4096);
9a799d71 5013
1b507730
NN
5014 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
5015 &rx_ring->dma, GFP_KERNEL);
9a799d71 5016
3a581073 5017 if (!rx_ring->desc) {
396e799c
ET
5018 e_err(probe, "Memory allocation failed for the Rx "
5019 "descriptor ring\n");
3a581073 5020 vfree(rx_ring->rx_buffer_info);
177db6ff 5021 goto alloc_failed;
9a799d71
AK
5022 }
5023
3a581073
JB
5024 rx_ring->next_to_clean = 0;
5025 rx_ring->next_to_use = 0;
9a799d71
AK
5026
5027 return 0;
177db6ff
MC
5028
5029alloc_failed:
177db6ff 5030 return -ENOMEM;
9a799d71
AK
5031}
5032
69888674
AD
5033/**
5034 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5035 * @adapter: board private structure
5036 *
5037 * If this function returns with an error, then it's possible one or
5038 * more of the rings is populated (while the rest are not). It is the
5039 * callers duty to clean those orphaned rings.
5040 *
5041 * Return 0 on success, negative on failure
5042 **/
5043
5044static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5045{
5046 int i, err = 0;
5047
5048 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0 5049 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
69888674
AD
5050 if (!err)
5051 continue;
396e799c 5052 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
69888674
AD
5053 break;
5054 }
5055
5056 return err;
5057}
5058
9a799d71
AK
5059/**
5060 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5061 * @adapter: board private structure
5062 * @tx_ring: Tx descriptor ring for a specific queue
5063 *
5064 * Free all transmit software resources
5065 **/
c431f97e
JB
5066void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5067 struct ixgbe_ring *tx_ring)
9a799d71
AK
5068{
5069 struct pci_dev *pdev = adapter->pdev;
5070
5071 ixgbe_clean_tx_ring(adapter, tx_ring);
5072
5073 vfree(tx_ring->tx_buffer_info);
5074 tx_ring->tx_buffer_info = NULL;
5075
1b507730
NN
5076 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
5077 tx_ring->dma);
9a799d71
AK
5078
5079 tx_ring->desc = NULL;
5080}
5081
5082/**
5083 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5084 * @adapter: board private structure
5085 *
5086 * Free all transmit software resources
5087 **/
5088static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5089{
5090 int i;
5091
5092 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0
PW
5093 if (adapter->tx_ring[i]->desc)
5094 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
9a799d71
AK
5095}
5096
5097/**
b4617240 5098 * ixgbe_free_rx_resources - Free Rx Resources
9a799d71
AK
5099 * @adapter: board private structure
5100 * @rx_ring: ring to clean the resources from
5101 *
5102 * Free all receive software resources
5103 **/
c431f97e
JB
5104void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5105 struct ixgbe_ring *rx_ring)
9a799d71
AK
5106{
5107 struct pci_dev *pdev = adapter->pdev;
5108
5109 ixgbe_clean_rx_ring(adapter, rx_ring);
5110
5111 vfree(rx_ring->rx_buffer_info);
5112 rx_ring->rx_buffer_info = NULL;
5113
1b507730
NN
5114 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
5115 rx_ring->dma);
9a799d71
AK
5116
5117 rx_ring->desc = NULL;
5118}
5119
5120/**
5121 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5122 * @adapter: board private structure
5123 *
5124 * Free all receive software resources
5125 **/
5126static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5127{
5128 int i;
5129
5130 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0
PW
5131 if (adapter->rx_ring[i]->desc)
5132 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
9a799d71
AK
5133}
5134
9a799d71
AK
5135/**
5136 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5137 * @netdev: network interface device structure
5138 * @new_mtu: new value for maximum frame size
5139 *
5140 * Returns 0 on success, negative on failure
5141 **/
5142static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5143{
5144 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5145 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5146
42c783c5
JB
5147 /* MTU < 68 is an error and causes problems on some kernels */
5148 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
9a799d71
AK
5149 return -EINVAL;
5150
396e799c 5151 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
021230d4 5152 /* must set new MTU before calling down or up */
9a799d71
AK
5153 netdev->mtu = new_mtu;
5154
d4f80882
AV
5155 if (netif_running(netdev))
5156 ixgbe_reinit_locked(adapter);
9a799d71
AK
5157
5158 return 0;
5159}
5160
5161/**
5162 * ixgbe_open - Called when a network interface is made active
5163 * @netdev: network interface device structure
5164 *
5165 * Returns 0 on success, negative value on failure
5166 *
5167 * The open entry point is called when a network interface is made
5168 * active by the system (IFF_UP). At this point all resources needed
5169 * for transmit and receive operations are allocated, the interrupt
5170 * handler is registered with the OS, the watchdog timer is started,
5171 * and the stack is notified that the interface is ready.
5172 **/
5173static int ixgbe_open(struct net_device *netdev)
5174{
5175 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5176 int err;
4bebfaa5
AK
5177
5178 /* disallow open during test */
5179 if (test_bit(__IXGBE_TESTING, &adapter->state))
5180 return -EBUSY;
9a799d71 5181
54386467
JB
5182 netif_carrier_off(netdev);
5183
9a799d71
AK
5184 /* allocate transmit descriptors */
5185 err = ixgbe_setup_all_tx_resources(adapter);
5186 if (err)
5187 goto err_setup_tx;
5188
9a799d71
AK
5189 /* allocate receive descriptors */
5190 err = ixgbe_setup_all_rx_resources(adapter);
5191 if (err)
5192 goto err_setup_rx;
5193
5194 ixgbe_configure(adapter);
5195
021230d4 5196 err = ixgbe_request_irq(adapter);
9a799d71
AK
5197 if (err)
5198 goto err_req_irq;
5199
9a799d71
AK
5200 err = ixgbe_up_complete(adapter);
5201 if (err)
5202 goto err_up;
5203
d55b53ff
JK
5204 netif_tx_start_all_queues(netdev);
5205
9a799d71
AK
5206 return 0;
5207
5208err_up:
5eba3699 5209 ixgbe_release_hw_control(adapter);
9a799d71
AK
5210 ixgbe_free_irq(adapter);
5211err_req_irq:
9a799d71 5212err_setup_rx:
a20a1199 5213 ixgbe_free_all_rx_resources(adapter);
9a799d71 5214err_setup_tx:
a20a1199 5215 ixgbe_free_all_tx_resources(adapter);
9a799d71
AK
5216 ixgbe_reset(adapter);
5217
5218 return err;
5219}
5220
5221/**
5222 * ixgbe_close - Disables a network interface
5223 * @netdev: network interface device structure
5224 *
5225 * Returns 0, this is not allowed to fail
5226 *
5227 * The close entry point is called when an interface is de-activated
5228 * by the OS. The hardware is still under the drivers control, but
5229 * needs to be disabled. A global MAC reset is issued to stop the
5230 * hardware, and all transmit and receive resources are freed.
5231 **/
5232static int ixgbe_close(struct net_device *netdev)
5233{
5234 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5235
5236 ixgbe_down(adapter);
5237 ixgbe_free_irq(adapter);
5238
5239 ixgbe_free_all_tx_resources(adapter);
5240 ixgbe_free_all_rx_resources(adapter);
5241
5eba3699 5242 ixgbe_release_hw_control(adapter);
9a799d71
AK
5243
5244 return 0;
5245}
5246
b3c8b4ba
AD
5247#ifdef CONFIG_PM
5248static int ixgbe_resume(struct pci_dev *pdev)
5249{
5250 struct net_device *netdev = pci_get_drvdata(pdev);
5251 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5252 u32 err;
5253
5254 pci_set_power_state(pdev, PCI_D0);
5255 pci_restore_state(pdev);
656ab817
DS
5256 /*
5257 * pci_restore_state clears dev->state_saved so call
5258 * pci_save_state to restore it.
5259 */
5260 pci_save_state(pdev);
9ce77666 5261
5262 err = pci_enable_device_mem(pdev);
b3c8b4ba 5263 if (err) {
849c4542 5264 e_dev_err("Cannot enable PCI device from suspend\n");
b3c8b4ba
AD
5265 return err;
5266 }
5267 pci_set_master(pdev);
5268
dd4d8ca6 5269 pci_wake_from_d3(pdev, false);
b3c8b4ba
AD
5270
5271 err = ixgbe_init_interrupt_scheme(adapter);
5272 if (err) {
849c4542 5273 e_dev_err("Cannot initialize interrupts for device\n");
b3c8b4ba
AD
5274 return err;
5275 }
5276
b3c8b4ba
AD
5277 ixgbe_reset(adapter);
5278
495dce12
WJP
5279 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5280
b3c8b4ba
AD
5281 if (netif_running(netdev)) {
5282 err = ixgbe_open(adapter->netdev);
5283 if (err)
5284 return err;
5285 }
5286
5287 netif_device_attach(netdev);
5288
5289 return 0;
5290}
b3c8b4ba 5291#endif /* CONFIG_PM */
9d8d05ae
RW
5292
5293static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
b3c8b4ba
AD
5294{
5295 struct net_device *netdev = pci_get_drvdata(pdev);
5296 struct ixgbe_adapter *adapter = netdev_priv(netdev);
e8e26350
PW
5297 struct ixgbe_hw *hw = &adapter->hw;
5298 u32 ctrl, fctrl;
5299 u32 wufc = adapter->wol;
b3c8b4ba
AD
5300#ifdef CONFIG_PM
5301 int retval = 0;
5302#endif
5303
5304 netif_device_detach(netdev);
5305
5306 if (netif_running(netdev)) {
5307 ixgbe_down(adapter);
5308 ixgbe_free_irq(adapter);
5309 ixgbe_free_all_tx_resources(adapter);
5310 ixgbe_free_all_rx_resources(adapter);
5311 }
b3c8b4ba
AD
5312
5313#ifdef CONFIG_PM
5314 retval = pci_save_state(pdev);
5315 if (retval)
5316 return retval;
4df10466 5317
b3c8b4ba 5318#endif
e8e26350
PW
5319 if (wufc) {
5320 ixgbe_set_rx_mode(netdev);
b3c8b4ba 5321
e8e26350
PW
5322 /* turn on all-multi mode if wake on multicast is enabled */
5323 if (wufc & IXGBE_WUFC_MC) {
5324 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5325 fctrl |= IXGBE_FCTRL_MPE;
5326 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5327 }
5328
5329 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5330 ctrl |= IXGBE_CTRL_GIO_DIS;
5331 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5332
5333 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5334 } else {
5335 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5336 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5337 }
5338
dd4d8ca6
DS
5339 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
5340 pci_wake_from_d3(pdev, true);
5341 else
5342 pci_wake_from_d3(pdev, false);
b3c8b4ba 5343
9d8d05ae
RW
5344 *enable_wake = !!wufc;
5345
fa378134
AG
5346 ixgbe_clear_interrupt_scheme(adapter);
5347
b3c8b4ba
AD
5348 ixgbe_release_hw_control(adapter);
5349
5350 pci_disable_device(pdev);
5351
9d8d05ae
RW
5352 return 0;
5353}
5354
5355#ifdef CONFIG_PM
5356static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5357{
5358 int retval;
5359 bool wake;
5360
5361 retval = __ixgbe_shutdown(pdev, &wake);
5362 if (retval)
5363 return retval;
5364
5365 if (wake) {
5366 pci_prepare_to_sleep(pdev);
5367 } else {
5368 pci_wake_from_d3(pdev, false);
5369 pci_set_power_state(pdev, PCI_D3hot);
5370 }
b3c8b4ba
AD
5371
5372 return 0;
5373}
9d8d05ae 5374#endif /* CONFIG_PM */
b3c8b4ba
AD
5375
5376static void ixgbe_shutdown(struct pci_dev *pdev)
5377{
9d8d05ae
RW
5378 bool wake;
5379
5380 __ixgbe_shutdown(pdev, &wake);
5381
5382 if (system_state == SYSTEM_POWER_OFF) {
5383 pci_wake_from_d3(pdev, wake);
5384 pci_set_power_state(pdev, PCI_D3hot);
5385 }
b3c8b4ba
AD
5386}
5387
9a799d71
AK
5388/**
5389 * ixgbe_update_stats - Update the board statistics counters.
5390 * @adapter: board private structure
5391 **/
5392void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5393{
2d86f139 5394 struct net_device *netdev = adapter->netdev;
9a799d71 5395 struct ixgbe_hw *hw = &adapter->hw;
6f11eef7
AV
5396 u64 total_mpc = 0;
5397 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
eb985f09 5398 u64 non_eop_descs = 0, restart_queue = 0;
9a799d71 5399
d08935c2
DS
5400 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5401 test_bit(__IXGBE_RESETTING, &adapter->state))
5402 return;
5403
94b982b2 5404 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
f8212f97 5405 u64 rsc_count = 0;
94b982b2 5406 u64 rsc_flush = 0;
d51019a4
PW
5407 for (i = 0; i < 16; i++)
5408 adapter->hw_rx_no_dma_resources +=
5409 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
94b982b2 5410 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0
PW
5411 rsc_count += adapter->rx_ring[i]->rsc_count;
5412 rsc_flush += adapter->rx_ring[i]->rsc_flush;
94b982b2
MC
5413 }
5414 adapter->rsc_total_count = rsc_count;
5415 adapter->rsc_total_flush = rsc_flush;
d51019a4
PW
5416 }
5417
7ca3bc58
JB
5418 /* gather some stats to the adapter struct that are per queue */
5419 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 5420 restart_queue += adapter->tx_ring[i]->restart_queue;
eb985f09 5421 adapter->restart_queue = restart_queue;
7ca3bc58
JB
5422
5423 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 5424 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
eb985f09 5425 adapter->non_eop_descs = non_eop_descs;
7ca3bc58 5426
9a799d71 5427 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6f11eef7
AV
5428 for (i = 0; i < 8; i++) {
5429 /* for packet buffers not used, the register should read 0 */
5430 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5431 missed_rx += mpc;
5432 adapter->stats.mpc[i] += mpc;
5433 total_mpc += adapter->stats.mpc[i];
e8e26350
PW
5434 if (hw->mac.type == ixgbe_mac_82598EB)
5435 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2f90b865
AD
5436 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5437 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5438 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5439 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
e8e26350
PW
5440 if (hw->mac.type == ixgbe_mac_82599EB) {
5441 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
5442 IXGBE_PXONRXCNT(i));
5443 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
5444 IXGBE_PXOFFRXCNT(i));
5445 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
e8e26350
PW
5446 } else {
5447 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
5448 IXGBE_PXONRXC(i));
5449 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
5450 IXGBE_PXOFFRXC(i));
5451 }
2f90b865
AD
5452 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
5453 IXGBE_PXONTXC(i));
2f90b865 5454 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
e8e26350 5455 IXGBE_PXOFFTXC(i));
6f11eef7
AV
5456 }
5457 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5458 /* work around hardware counting issue */
5459 adapter->stats.gprc -= missed_rx;
5460
5461 /* 82598 hardware only has a 32 bit counter in the high register */
e8e26350 5462 if (hw->mac.type == ixgbe_mac_82599EB) {
aad71918 5463 u64 tmp;
e8e26350 5464 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
aad71918
BG
5465 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
5466 adapter->stats.gorc += (tmp << 32);
e8e26350 5467 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
aad71918
BG
5468 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
5469 adapter->stats.gotc += (tmp << 32);
e8e26350
PW
5470 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5471 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5472 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5473 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
c4cf55e5
PWJ
5474 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5475 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6d45522c
YZ
5476#ifdef IXGBE_FCOE
5477 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5478 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5479 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5480 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5481 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5482 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5483#endif /* IXGBE_FCOE */
e8e26350
PW
5484 } else {
5485 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5486 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5487 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5488 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5489 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5490 }
9a799d71
AK
5491 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5492 adapter->stats.bprc += bprc;
5493 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
e8e26350
PW
5494 if (hw->mac.type == ixgbe_mac_82598EB)
5495 adapter->stats.mprc -= bprc;
9a799d71
AK
5496 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5497 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5498 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5499 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5500 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5501 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5502 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
9a799d71 5503 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6f11eef7
AV
5504 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5505 adapter->stats.lxontxc += lxon;
5506 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5507 adapter->stats.lxofftxc += lxoff;
9a799d71
AK
5508 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5509 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6f11eef7
AV
5510 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5511 /*
5512 * 82598 errata - tx of flow control packets is included in tx counters
5513 */
5514 xon_off_tot = lxon + lxoff;
5515 adapter->stats.gptc -= xon_off_tot;
5516 adapter->stats.mptc -= xon_off_tot;
5517 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
9a799d71
AK
5518 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5519 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5520 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
9a799d71
AK
5521 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5522 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6f11eef7 5523 adapter->stats.ptc64 -= xon_off_tot;
9a799d71
AK
5524 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5525 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5526 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5527 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5528 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
9a799d71
AK
5529 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5530
5531 /* Fill out the OS statistics structure */
2d86f139 5532 netdev->stats.multicast = adapter->stats.mprc;
9a799d71
AK
5533
5534 /* Rx Errors */
2d86f139 5535 netdev->stats.rx_errors = adapter->stats.crcerrs +
b4617240 5536 adapter->stats.rlec;
2d86f139
AK
5537 netdev->stats.rx_dropped = 0;
5538 netdev->stats.rx_length_errors = adapter->stats.rlec;
5539 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
5540 netdev->stats.rx_missed_errors = total_mpc;
9a799d71
AK
5541}
5542
5543/**
5544 * ixgbe_watchdog - Timer Call-back
5545 * @data: pointer to adapter cast into an unsigned long
5546 **/
5547static void ixgbe_watchdog(unsigned long data)
5548{
5549 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
cf8280ee 5550 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
5551 u64 eics = 0;
5552 int i;
cf8280ee 5553
fe49f04a
AD
5554 /*
5555 * Do the watchdog outside of interrupt context due to the lovely
5556 * delays that some of the newer hardware requires
5557 */
22d5a71b 5558
fe49f04a
AD
5559 if (test_bit(__IXGBE_DOWN, &adapter->state))
5560 goto watchdog_short_circuit;
22d5a71b 5561
fe49f04a
AD
5562 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5563 /*
5564 * for legacy and MSI interrupts don't set any bits
5565 * that are enabled for EIAM, because this operation
5566 * would set *both* EIMS and EICS for any bit in EIAM
5567 */
5568 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5569 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5570 goto watchdog_reschedule;
5571 }
5572
5573 /* get one bit for every active tx/rx interrupt vector */
5574 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5575 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5576 if (qv->rxr_count || qv->txr_count)
5577 eics |= ((u64)1 << i);
cf8280ee 5578 }
9a799d71 5579
fe49f04a
AD
5580 /* Cause software interrupt to ensure rx rings are cleaned */
5581 ixgbe_irq_rearm_queues(adapter, eics);
5582
5583watchdog_reschedule:
5584 /* Reset the timer */
5585 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5586
5587watchdog_short_circuit:
cf8280ee
JB
5588 schedule_work(&adapter->watchdog_task);
5589}
5590
e8e26350
PW
5591/**
5592 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5593 * @work: pointer to work_struct containing our data
5594 **/
5595static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5596{
5597 struct ixgbe_adapter *adapter = container_of(work,
5598 struct ixgbe_adapter,
5599 multispeed_fiber_task);
5600 struct ixgbe_hw *hw = &adapter->hw;
5601 u32 autoneg;
8620a103 5602 bool negotiation;
e8e26350
PW
5603
5604 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
a1f25324
MC
5605 autoneg = hw->phy.autoneg_advertised;
5606 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
8620a103 5607 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
1097cd17 5608 hw->mac.autotry_restart = false;
8620a103
MC
5609 if (hw->mac.ops.setup_link)
5610 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
e8e26350
PW
5611 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5612 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5613}
5614
5615/**
5616 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5617 * @work: pointer to work_struct containing our data
5618 **/
5619static void ixgbe_sfp_config_module_task(struct work_struct *work)
5620{
5621 struct ixgbe_adapter *adapter = container_of(work,
5622 struct ixgbe_adapter,
5623 sfp_config_module_task);
5624 struct ixgbe_hw *hw = &adapter->hw;
5625 u32 err;
5626
5627 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
63d6e1d8
DS
5628
5629 /* Time for electrical oscillations to settle down */
5630 msleep(100);
e8e26350 5631 err = hw->phy.ops.identify_sfp(hw);
63d6e1d8 5632
e8e26350 5633 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
5634 e_dev_err("failed to initialize because an unsupported SFP+ "
5635 "module type was detected.\n");
5636 e_dev_err("Reload the driver after installing a supported "
5637 "module.\n");
63d6e1d8 5638 unregister_netdev(adapter->netdev);
e8e26350
PW
5639 return;
5640 }
5641 hw->mac.ops.setup_sfp(hw);
5642
8d1c3c07 5643 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
e8e26350
PW
5644 /* This will also work for DA Twinax connections */
5645 schedule_work(&adapter->multispeed_fiber_task);
5646 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5647}
5648
c4cf55e5
PWJ
5649/**
5650 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5651 * @work: pointer to work_struct containing our data
5652 **/
5653static void ixgbe_fdir_reinit_task(struct work_struct *work)
5654{
5655 struct ixgbe_adapter *adapter = container_of(work,
5656 struct ixgbe_adapter,
5657 fdir_reinit_task);
5658 struct ixgbe_hw *hw = &adapter->hw;
5659 int i;
5660
5661 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5662 for (i = 0; i < adapter->num_tx_queues; i++)
5663 set_bit(__IXGBE_FDIR_INIT_DONE,
4a0b9ca0 5664 &(adapter->tx_ring[i]->reinit_state));
c4cf55e5 5665 } else {
396e799c 5666 e_err(probe, "failed to finish FDIR re-initialization, "
849c4542 5667 "ignored adding FDIR ATR filters\n");
c4cf55e5
PWJ
5668 }
5669 /* Done FDIR Re-initialization, enable transmits */
5670 netif_tx_start_all_queues(adapter->netdev);
5671}
5672
10eec955
JF
5673static DEFINE_MUTEX(ixgbe_watchdog_lock);
5674
cf8280ee 5675/**
69888674
AD
5676 * ixgbe_watchdog_task - worker thread to bring link up
5677 * @work: pointer to work_struct containing our data
cf8280ee
JB
5678 **/
5679static void ixgbe_watchdog_task(struct work_struct *work)
5680{
5681 struct ixgbe_adapter *adapter = container_of(work,
5682 struct ixgbe_adapter,
5683 watchdog_task);
5684 struct net_device *netdev = adapter->netdev;
5685 struct ixgbe_hw *hw = &adapter->hw;
10eec955
JF
5686 u32 link_speed;
5687 bool link_up;
bc59fcda
NS
5688 int i;
5689 struct ixgbe_ring *tx_ring;
5690 int some_tx_pending = 0;
cf8280ee 5691
10eec955
JF
5692 mutex_lock(&ixgbe_watchdog_lock);
5693
5694 link_up = adapter->link_up;
5695 link_speed = adapter->link_speed;
cf8280ee
JB
5696
5697 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5698 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
264857b8
PWJ
5699 if (link_up) {
5700#ifdef CONFIG_DCB
5701 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5702 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
620fa036 5703 hw->mac.ops.fc_enable(hw, i);
264857b8 5704 } else {
620fa036 5705 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5706 }
5707#else
620fa036 5708 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
5709#endif
5710 }
5711
cf8280ee
JB
5712 if (link_up ||
5713 time_after(jiffies, (adapter->link_check_timeout +
5714 IXGBE_TRY_LINK_TIMEOUT))) {
cf8280ee 5715 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
264857b8 5716 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
cf8280ee
JB
5717 }
5718 adapter->link_up = link_up;
5719 adapter->link_speed = link_speed;
5720 }
9a799d71
AK
5721
5722 if (link_up) {
5723 if (!netif_carrier_ok(netdev)) {
e8e26350
PW
5724 bool flow_rx, flow_tx;
5725
5726 if (hw->mac.type == ixgbe_mac_82599EB) {
5727 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5728 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
078788b6
PWJ
5729 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5730 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
e8e26350
PW
5731 } else {
5732 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5733 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
078788b6
PWJ
5734 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5735 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
e8e26350
PW
5736 }
5737
396e799c 5738 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
a46e534b 5739 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
849c4542
ET
5740 "10 Gbps" :
5741 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5742 "1 Gbps" : "unknown speed")),
e8e26350 5743 ((flow_rx && flow_tx) ? "RX/TX" :
849c4542
ET
5744 (flow_rx ? "RX" :
5745 (flow_tx ? "TX" : "None"))));
9a799d71
AK
5746
5747 netif_carrier_on(netdev);
9a799d71
AK
5748 } else {
5749 /* Force detection of hung controller */
5750 adapter->detect_tx_hung = true;
5751 }
5752 } else {
cf8280ee
JB
5753 adapter->link_up = false;
5754 adapter->link_speed = 0;
9a799d71 5755 if (netif_carrier_ok(netdev)) {
396e799c 5756 e_info(drv, "NIC Link is Down\n");
9a799d71 5757 netif_carrier_off(netdev);
9a799d71
AK
5758 }
5759 }
5760
bc59fcda
NS
5761 if (!netif_carrier_ok(netdev)) {
5762 for (i = 0; i < adapter->num_tx_queues; i++) {
4a0b9ca0 5763 tx_ring = adapter->tx_ring[i];
bc59fcda
NS
5764 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5765 some_tx_pending = 1;
5766 break;
5767 }
5768 }
5769
5770 if (some_tx_pending) {
5771 /* We've lost link, so the controller stops DMA,
5772 * but we've got queued Tx work that's never going
5773 * to get done, so reset controller to flush Tx.
5774 * (Do the reset outside of interrupt context).
5775 */
5776 schedule_work(&adapter->reset_task);
5777 }
5778 }
5779
9a799d71 5780 ixgbe_update_stats(adapter);
10eec955 5781 mutex_unlock(&ixgbe_watchdog_lock);
9a799d71
AK
5782}
5783
9a799d71 5784static int ixgbe_tso(struct ixgbe_adapter *adapter,
b4617240
PW
5785 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5786 u32 tx_flags, u8 *hdr_len)
9a799d71
AK
5787{
5788 struct ixgbe_adv_tx_context_desc *context_desc;
5789 unsigned int i;
5790 int err;
5791 struct ixgbe_tx_buffer *tx_buffer_info;
9f8cdf4f
JB
5792 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5793 u32 mss_l4len_idx, l4len;
9a799d71
AK
5794
5795 if (skb_is_gso(skb)) {
5796 if (skb_header_cloned(skb)) {
5797 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5798 if (err)
5799 return err;
5800 }
5801 l4len = tcp_hdrlen(skb);
5802 *hdr_len += l4len;
5803
8327d000 5804 if (skb->protocol == htons(ETH_P_IP)) {
9a799d71
AK
5805 struct iphdr *iph = ip_hdr(skb);
5806 iph->tot_len = 0;
5807 iph->check = 0;
5808 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
b4617240
PW
5809 iph->daddr, 0,
5810 IPPROTO_TCP,
5811 0);
8e1e8a47 5812 } else if (skb_is_gso_v6(skb)) {
9a799d71
AK
5813 ipv6_hdr(skb)->payload_len = 0;
5814 tcp_hdr(skb)->check =
5815 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
b4617240
PW
5816 &ipv6_hdr(skb)->daddr,
5817 0, IPPROTO_TCP, 0);
9a799d71
AK
5818 }
5819
5820 i = tx_ring->next_to_use;
5821
5822 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 5823 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
5824
5825 /* VLAN MACLEN IPLEN */
5826 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5827 vlan_macip_lens |=
5828 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5829 vlan_macip_lens |= ((skb_network_offset(skb)) <<
b4617240 5830 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
5831 *hdr_len += skb_network_offset(skb);
5832 vlan_macip_lens |=
5833 (skb_transport_header(skb) - skb_network_header(skb));
5834 *hdr_len +=
5835 (skb_transport_header(skb) - skb_network_header(skb));
5836 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5837 context_desc->seqnum_seed = 0;
5838
5839 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
9f8cdf4f 5840 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
b4617240 5841 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 5842
8327d000 5843 if (skb->protocol == htons(ETH_P_IP))
9a799d71
AK
5844 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5845 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5846 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5847
5848 /* MSS L4LEN IDX */
9f8cdf4f 5849 mss_l4len_idx =
9a799d71
AK
5850 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
5851 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4eeae6fd
PW
5852 /* use index 1 for TSO */
5853 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
5854 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5855
5856 tx_buffer_info->time_stamp = jiffies;
5857 tx_buffer_info->next_to_watch = i;
5858
5859 i++;
5860 if (i == tx_ring->count)
5861 i = 0;
5862 tx_ring->next_to_use = i;
5863
5864 return true;
5865 }
5866 return false;
5867}
5868
5869static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
b4617240
PW
5870 struct ixgbe_ring *tx_ring,
5871 struct sk_buff *skb, u32 tx_flags)
9a799d71
AK
5872{
5873 struct ixgbe_adv_tx_context_desc *context_desc;
5874 unsigned int i;
5875 struct ixgbe_tx_buffer *tx_buffer_info;
5876 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
5877
5878 if (skb->ip_summed == CHECKSUM_PARTIAL ||
5879 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5880 i = tx_ring->next_to_use;
5881 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 5882 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
5883
5884 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5885 vlan_macip_lens |=
5886 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5887 vlan_macip_lens |= (skb_network_offset(skb) <<
b4617240 5888 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
5889 if (skb->ip_summed == CHECKSUM_PARTIAL)
5890 vlan_macip_lens |= (skb_transport_header(skb) -
b4617240 5891 skb_network_header(skb));
9a799d71
AK
5892
5893 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5894 context_desc->seqnum_seed = 0;
5895
5896 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
b4617240 5897 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71
AK
5898
5899 if (skb->ip_summed == CHECKSUM_PARTIAL) {
ca553980
GS
5900 __be16 protocol;
5901
5902 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5903 const struct vlan_ethhdr *vhdr =
5904 (const struct vlan_ethhdr *)skb->data;
5905
5906 protocol = vhdr->h_vlan_encapsulated_proto;
5907 } else {
5908 protocol = skb->protocol;
5909 }
5910
5911 switch (protocol) {
09640e63 5912 case cpu_to_be16(ETH_P_IP):
9a799d71 5913 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
41825d71
AK
5914 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5915 type_tucmd_mlhl |=
b4617240 5916 IXGBE_ADVTXD_TUCMD_L4T_TCP;
45a5ead0
JB
5917 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5918 type_tucmd_mlhl |=
5919 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
41825d71 5920 break;
09640e63 5921 case cpu_to_be16(ETH_P_IPV6):
41825d71
AK
5922 /* XXX what about other V6 headers?? */
5923 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5924 type_tucmd_mlhl |=
b4617240 5925 IXGBE_ADVTXD_TUCMD_L4T_TCP;
45a5ead0
JB
5926 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5927 type_tucmd_mlhl |=
5928 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
41825d71 5929 break;
41825d71
AK
5930 default:
5931 if (unlikely(net_ratelimit())) {
396e799c
ET
5932 e_warn(probe, "partial checksum "
5933 "but proto=%x!\n",
5934 skb->protocol);
41825d71
AK
5935 }
5936 break;
5937 }
9a799d71
AK
5938 }
5939
5940 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4eeae6fd 5941 /* use index zero for tx checksum offload */
9a799d71
AK
5942 context_desc->mss_l4len_idx = 0;
5943
5944 tx_buffer_info->time_stamp = jiffies;
5945 tx_buffer_info->next_to_watch = i;
9f8cdf4f 5946
9a799d71
AK
5947 i++;
5948 if (i == tx_ring->count)
5949 i = 0;
5950 tx_ring->next_to_use = i;
5951
5952 return true;
5953 }
9f8cdf4f 5954
9a799d71
AK
5955 return false;
5956}
5957
5958static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
b4617240 5959 struct ixgbe_ring *tx_ring,
eacd73f7
YZ
5960 struct sk_buff *skb, u32 tx_flags,
5961 unsigned int first)
9a799d71 5962{
e5a43549 5963 struct pci_dev *pdev = adapter->pdev;
9a799d71 5964 struct ixgbe_tx_buffer *tx_buffer_info;
eacd73f7
YZ
5965 unsigned int len;
5966 unsigned int total = skb->len;
9a799d71
AK
5967 unsigned int offset = 0, size, count = 0, i;
5968 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5969 unsigned int f;
9a799d71
AK
5970
5971 i = tx_ring->next_to_use;
5972
eacd73f7
YZ
5973 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
5974 /* excluding fcoe_crc_eof for FCoE */
5975 total -= sizeof(struct fcoe_crc_eof);
5976
5977 len = min(skb_headlen(skb), total);
9a799d71
AK
5978 while (len) {
5979 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5980 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5981
5982 tx_buffer_info->length = size;
e5a43549 5983 tx_buffer_info->mapped_as_page = false;
1b507730 5984 tx_buffer_info->dma = dma_map_single(&pdev->dev,
e5a43549 5985 skb->data + offset,
1b507730
NN
5986 size, DMA_TO_DEVICE);
5987 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
e5a43549 5988 goto dma_error;
9a799d71
AK
5989 tx_buffer_info->time_stamp = jiffies;
5990 tx_buffer_info->next_to_watch = i;
5991
5992 len -= size;
eacd73f7 5993 total -= size;
9a799d71
AK
5994 offset += size;
5995 count++;
44df32c5
AD
5996
5997 if (len) {
5998 i++;
5999 if (i == tx_ring->count)
6000 i = 0;
6001 }
9a799d71
AK
6002 }
6003
6004 for (f = 0; f < nr_frags; f++) {
6005 struct skb_frag_struct *frag;
6006
6007 frag = &skb_shinfo(skb)->frags[f];
eacd73f7 6008 len = min((unsigned int)frag->size, total);
e5a43549 6009 offset = frag->page_offset;
9a799d71
AK
6010
6011 while (len) {
44df32c5
AD
6012 i++;
6013 if (i == tx_ring->count)
6014 i = 0;
6015
9a799d71
AK
6016 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6017 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6018
6019 tx_buffer_info->length = size;
1b507730 6020 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
e5a43549
AD
6021 frag->page,
6022 offset, size,
1b507730 6023 DMA_TO_DEVICE);
e5a43549 6024 tx_buffer_info->mapped_as_page = true;
1b507730 6025 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
e5a43549 6026 goto dma_error;
9a799d71
AK
6027 tx_buffer_info->time_stamp = jiffies;
6028 tx_buffer_info->next_to_watch = i;
6029
6030 len -= size;
eacd73f7 6031 total -= size;
9a799d71
AK
6032 offset += size;
6033 count++;
9a799d71 6034 }
eacd73f7
YZ
6035 if (total == 0)
6036 break;
9a799d71 6037 }
44df32c5 6038
9a799d71
AK
6039 tx_ring->tx_buffer_info[i].skb = skb;
6040 tx_ring->tx_buffer_info[first].next_to_watch = i;
6041
e5a43549
AD
6042 return count;
6043
6044dma_error:
849c4542 6045 e_dev_err("TX DMA map failed\n");
e5a43549
AD
6046
6047 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6048 tx_buffer_info->dma = 0;
6049 tx_buffer_info->time_stamp = 0;
6050 tx_buffer_info->next_to_watch = 0;
c1fa347f
RK
6051 if (count)
6052 count--;
e5a43549
AD
6053
6054 /* clear timestamp and dma mappings for remaining portion of packet */
c1fa347f
RK
6055 while (count--) {
6056 if (i==0)
e5a43549 6057 i += tx_ring->count;
c1fa347f 6058 i--;
e5a43549
AD
6059 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6060 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
6061 }
6062
e44d38e1 6063 return 0;
9a799d71
AK
6064}
6065
6066static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
b4617240
PW
6067 struct ixgbe_ring *tx_ring,
6068 int tx_flags, int count, u32 paylen, u8 hdr_len)
9a799d71
AK
6069{
6070 union ixgbe_adv_tx_desc *tx_desc = NULL;
6071 struct ixgbe_tx_buffer *tx_buffer_info;
6072 u32 olinfo_status = 0, cmd_type_len = 0;
6073 unsigned int i;
6074 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6075
6076 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6077
6078 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6079
6080 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6081 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6082
6083 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6084 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6085
6086 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
b4617240 6087 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6088
4eeae6fd
PW
6089 /* use index 1 context for tso */
6090 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6091 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6092 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
b4617240 6093 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71
AK
6094
6095 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6096 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
b4617240 6097 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6098
eacd73f7
YZ
6099 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6100 olinfo_status |= IXGBE_ADVTXD_CC;
6101 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6102 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6103 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6104 }
6105
9a799d71
AK
6106 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6107
6108 i = tx_ring->next_to_use;
6109 while (count--) {
6110 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6111 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71
AK
6112 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6113 tx_desc->read.cmd_type_len =
b4617240 6114 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
9a799d71 6115 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
9a799d71
AK
6116 i++;
6117 if (i == tx_ring->count)
6118 i = 0;
6119 }
6120
6121 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6122
6123 /*
6124 * Force memory writes to complete before letting h/w
6125 * know there are new descriptors to fetch. (Only
6126 * applicable for weak-ordered memory model archs,
6127 * such as IA-64).
6128 */
6129 wmb();
6130
6131 tx_ring->next_to_use = i;
6132 writel(i, adapter->hw.hw_addr + tx_ring->tail);
6133}
6134
c4cf55e5
PWJ
6135static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6136 int queue, u32 tx_flags)
6137{
c4cf55e5
PWJ
6138 struct ixgbe_atr_input atr_input;
6139 struct tcphdr *th;
c4cf55e5
PWJ
6140 struct iphdr *iph = ip_hdr(skb);
6141 struct ethhdr *eth = (struct ethhdr *)skb->data;
6142 u16 vlan_id, src_port, dst_port, flex_bytes;
6143 u32 src_ipv4_addr, dst_ipv4_addr;
6144 u8 l4type = 0;
6145
d3ead241
GG
6146 /* Right now, we support IPv4 only */
6147 if (skb->protocol != htons(ETH_P_IP))
6148 return;
c4cf55e5
PWJ
6149 /* check if we're UDP or TCP */
6150 if (iph->protocol == IPPROTO_TCP) {
6151 th = tcp_hdr(skb);
6152 src_port = th->source;
6153 dst_port = th->dest;
6154 l4type |= IXGBE_ATR_L4TYPE_TCP;
6155 /* l4type IPv4 type is 0, no need to assign */
c4cf55e5
PWJ
6156 } else {
6157 /* Unsupported L4 header, just bail here */
6158 return;
6159 }
6160
6161 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6162
6163 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6164 IXGBE_TX_FLAGS_VLAN_SHIFT;
6165 src_ipv4_addr = iph->saddr;
6166 dst_ipv4_addr = iph->daddr;
6167 flex_bytes = eth->h_proto;
6168
6169 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6170 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
6171 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
6172 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
6173 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
6174 /* src and dst are inverted, think how the receiver sees them */
6175 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
6176 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
6177
6178 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6179 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6180}
6181
e092be60 6182static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
b4617240 6183 struct ixgbe_ring *tx_ring, int size)
e092be60 6184{
30eba97a 6185 netif_stop_subqueue(netdev, tx_ring->queue_index);
e092be60
AV
6186 /* Herbert's original patch had:
6187 * smp_mb__after_netif_stop_queue();
6188 * but since that doesn't exist yet, just open code it. */
6189 smp_mb();
6190
6191 /* We need to check again in a case another CPU has just
6192 * made room available. */
6193 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6194 return -EBUSY;
6195
6196 /* A reprieve! - use start_queue because it doesn't call schedule */
af72166f 6197 netif_start_subqueue(netdev, tx_ring->queue_index);
7ca3bc58 6198 ++tx_ring->restart_queue;
e092be60
AV
6199 return 0;
6200}
6201
6202static int ixgbe_maybe_stop_tx(struct net_device *netdev,
b4617240 6203 struct ixgbe_ring *tx_ring, int size)
e092be60
AV
6204{
6205 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6206 return 0;
6207 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
6208}
6209
09a3b1f8
SH
6210static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6211{
6212 struct ixgbe_adapter *adapter = netdev_priv(dev);
5f715823 6213 int txq = smp_processor_id();
09a3b1f8 6214
56075a98
JF
6215#ifdef IXGBE_FCOE
6216 if ((skb->protocol == htons(ETH_P_FCOE)) ||
6217 (skb->protocol == htons(ETH_P_FIP))) {
6218 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6219 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6220 txq += adapter->ring_feature[RING_F_FCOE].mask;
6221 return txq;
4bc091d8 6222#ifdef CONFIG_IXGBE_DCB
56075a98
JF
6223 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6224 txq = adapter->fcoe.up;
6225 return txq;
4bc091d8 6226#endif
56075a98
JF
6227 }
6228 }
6229#endif
6230
fdd3d631
KK
6231 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6232 while (unlikely(txq >= dev->real_num_tx_queues))
6233 txq -= dev->real_num_tx_queues;
5f715823 6234 return txq;
fdd3d631 6235 }
c4cf55e5 6236
2ea186ae
JF
6237 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6238 if (skb->priority == TC_PRIO_CONTROL)
6239 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6240 else
6241 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6242 >> 13;
6243 return txq;
6244 }
09a3b1f8
SH
6245
6246 return skb_tx_hash(dev, skb);
6247}
6248
84418e3b
AD
6249netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
6250 struct ixgbe_adapter *adapter,
6251 struct ixgbe_ring *tx_ring)
9a799d71 6252{
60d51134 6253 struct netdev_queue *txq;
9a799d71
AK
6254 unsigned int first;
6255 unsigned int tx_flags = 0;
30eba97a 6256 u8 hdr_len = 0;
5f715823 6257 int tso;
9a799d71
AK
6258 int count = 0;
6259 unsigned int f;
9f8cdf4f 6260
9f8cdf4f
JB
6261 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
6262 tx_flags |= vlan_tx_tag_get(skb);
2f90b865
AD
6263 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6264 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5f715823 6265 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
2f90b865
AD
6266 }
6267 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6268 tx_flags |= IXGBE_TX_FLAGS_VLAN;
33c66bd1
JF
6269 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6270 skb->priority != TC_PRIO_CONTROL) {
2ea186ae
JF
6271 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6272 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6273 tx_flags |= IXGBE_TX_FLAGS_VLAN;
9a799d71 6274 }
eacd73f7 6275
09ad1cc0 6276#ifdef IXGBE_FCOE
56075a98
JF
6277 /* for FCoE with DCB, we force the priority to what
6278 * was specified by the switch */
6279 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6280 (skb->protocol == htons(ETH_P_FCOE) ||
6281 skb->protocol == htons(ETH_P_FIP))) {
4bc091d8
JF
6282#ifdef CONFIG_IXGBE_DCB
6283 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6284 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6285 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6286 tx_flags |= ((adapter->fcoe.up << 13)
6287 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6288 }
6289#endif
ca77cd59
RL
6290 /* flag for FCoE offloads */
6291 if (skb->protocol == htons(ETH_P_FCOE))
6292 tx_flags |= IXGBE_TX_FLAGS_FCOE;
09ad1cc0 6293 }
ca77cd59
RL
6294#endif
6295
eacd73f7 6296 /* four things can cause us to need a context descriptor */
9f8cdf4f
JB
6297 if (skb_is_gso(skb) ||
6298 (skb->ip_summed == CHECKSUM_PARTIAL) ||
eacd73f7
YZ
6299 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6300 (tx_flags & IXGBE_TX_FLAGS_FCOE))
9a799d71
AK
6301 count++;
6302
9f8cdf4f
JB
6303 count += TXD_USE_COUNT(skb_headlen(skb));
6304 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9a799d71
AK
6305 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6306
e092be60 6307 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
9a799d71 6308 adapter->tx_busy++;
9a799d71
AK
6309 return NETDEV_TX_BUSY;
6310 }
9a799d71 6311
9a799d71 6312 first = tx_ring->next_to_use;
eacd73f7
YZ
6313 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6314#ifdef IXGBE_FCOE
6315 /* setup tx offload for FCoE */
6316 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6317 if (tso < 0) {
6318 dev_kfree_skb_any(skb);
6319 return NETDEV_TX_OK;
6320 }
6321 if (tso)
6322 tx_flags |= IXGBE_TX_FLAGS_FSO;
6323#endif /* IXGBE_FCOE */
6324 } else {
6325 if (skb->protocol == htons(ETH_P_IP))
6326 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6327 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6328 if (tso < 0) {
6329 dev_kfree_skb_any(skb);
6330 return NETDEV_TX_OK;
6331 }
9a799d71 6332
eacd73f7
YZ
6333 if (tso)
6334 tx_flags |= IXGBE_TX_FLAGS_TSO;
6335 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
6336 (skb->ip_summed == CHECKSUM_PARTIAL))
6337 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6338 }
9a799d71 6339
eacd73f7 6340 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
44df32c5 6341 if (count) {
c4cf55e5
PWJ
6342 /* add the ATR filter if ATR is on */
6343 if (tx_ring->atr_sample_rate) {
6344 ++tx_ring->atr_count;
6345 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6346 test_bit(__IXGBE_FDIR_INIT_DONE,
6347 &tx_ring->reinit_state)) {
6348 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6349 tx_flags);
6350 tx_ring->atr_count = 0;
6351 }
6352 }
60d51134
ED
6353 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6354 txq->tx_bytes += skb->len;
6355 txq->tx_packets++;
44df32c5
AD
6356 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
6357 hdr_len);
44df32c5 6358 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
9a799d71 6359
44df32c5
AD
6360 } else {
6361 dev_kfree_skb_any(skb);
6362 tx_ring->tx_buffer_info[first].time_stamp = 0;
6363 tx_ring->next_to_use = first;
6364 }
9a799d71
AK
6365
6366 return NETDEV_TX_OK;
6367}
6368
84418e3b
AD
6369static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6370{
6371 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6372 struct ixgbe_ring *tx_ring;
6373
6374 tx_ring = adapter->tx_ring[skb->queue_mapping];
6375 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
6376}
6377
9a799d71
AK
6378/**
6379 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6380 * @netdev: network interface device structure
6381 * @p: pointer to an address structure
6382 *
6383 * Returns 0 on success, negative on failure
6384 **/
6385static int ixgbe_set_mac(struct net_device *netdev, void *p)
6386{
6387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
b4617240 6388 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
6389 struct sockaddr *addr = p;
6390
6391 if (!is_valid_ether_addr(addr->sa_data))
6392 return -EADDRNOTAVAIL;
6393
6394 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
b4617240 6395 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9a799d71 6396
1cdd1ec8
GR
6397 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6398 IXGBE_RAH_AV);
9a799d71
AK
6399
6400 return 0;
6401}
6402
6b73e10d
BH
6403static int
6404ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6405{
6406 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6407 struct ixgbe_hw *hw = &adapter->hw;
6408 u16 value;
6409 int rc;
6410
6411 if (prtad != hw->phy.mdio.prtad)
6412 return -EINVAL;
6413 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6414 if (!rc)
6415 rc = value;
6416 return rc;
6417}
6418
6419static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6420 u16 addr, u16 value)
6421{
6422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6423 struct ixgbe_hw *hw = &adapter->hw;
6424
6425 if (prtad != hw->phy.mdio.prtad)
6426 return -EINVAL;
6427 return hw->phy.ops.write_reg(hw, addr, devad, value);
6428}
6429
6430static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6431{
6432 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6433
6434 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6435}
6436
0365e6e4
PW
6437/**
6438 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
31278e71 6439 * netdev->dev_addrs
0365e6e4
PW
6440 * @netdev: network interface device structure
6441 *
6442 * Returns non-zero on failure
6443 **/
6444static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6445{
6446 int err = 0;
6447 struct ixgbe_adapter *adapter = netdev_priv(dev);
6448 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6449
6450 if (is_valid_ether_addr(mac->san_addr)) {
6451 rtnl_lock();
6452 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6453 rtnl_unlock();
6454 }
6455 return err;
6456}
6457
6458/**
6459 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
31278e71 6460 * netdev->dev_addrs
0365e6e4
PW
6461 * @netdev: network interface device structure
6462 *
6463 * Returns non-zero on failure
6464 **/
6465static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6466{
6467 int err = 0;
6468 struct ixgbe_adapter *adapter = netdev_priv(dev);
6469 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6470
6471 if (is_valid_ether_addr(mac->san_addr)) {
6472 rtnl_lock();
6473 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6474 rtnl_unlock();
6475 }
6476 return err;
6477}
6478
9a799d71
AK
6479#ifdef CONFIG_NET_POLL_CONTROLLER
6480/*
6481 * Polling 'interrupt' - used by things like netconsole to send skbs
6482 * without having to re-enable interrupts. It's not called while
6483 * the interrupt routine is executing.
6484 */
6485static void ixgbe_netpoll(struct net_device *netdev)
6486{
6487 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8f9a7167 6488 int i;
9a799d71 6489
1a647bd2
AD
6490 /* if interface is down do nothing */
6491 if (test_bit(__IXGBE_DOWN, &adapter->state))
6492 return;
6493
9a799d71 6494 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
8f9a7167
PWJ
6495 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6496 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6497 for (i = 0; i < num_q_vectors; i++) {
6498 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6499 ixgbe_msix_clean_many(0, q_vector);
6500 }
6501 } else {
6502 ixgbe_intr(adapter->pdev->irq, netdev);
6503 }
9a799d71 6504 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
9a799d71
AK
6505}
6506#endif
6507
0edc3527
SH
6508static const struct net_device_ops ixgbe_netdev_ops = {
6509 .ndo_open = ixgbe_open,
6510 .ndo_stop = ixgbe_close,
00829823 6511 .ndo_start_xmit = ixgbe_xmit_frame,
09a3b1f8 6512 .ndo_select_queue = ixgbe_select_queue,
e90d400c 6513 .ndo_set_rx_mode = ixgbe_set_rx_mode,
0edc3527
SH
6514 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6515 .ndo_validate_addr = eth_validate_addr,
6516 .ndo_set_mac_address = ixgbe_set_mac,
6517 .ndo_change_mtu = ixgbe_change_mtu,
6518 .ndo_tx_timeout = ixgbe_tx_timeout,
6519 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
6520 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6521 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6b73e10d 6522 .ndo_do_ioctl = ixgbe_ioctl,
7f01648a
GR
6523 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6524 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6525 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6526 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
0edc3527
SH
6527#ifdef CONFIG_NET_POLL_CONTROLLER
6528 .ndo_poll_controller = ixgbe_netpoll,
6529#endif
332d4a7d
YZ
6530#ifdef IXGBE_FCOE
6531 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6532 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8450ff8c
YZ
6533 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6534 .ndo_fcoe_disable = ixgbe_fcoe_disable,
61a1fa10 6535 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
332d4a7d 6536#endif /* IXGBE_FCOE */
0edc3527
SH
6537};
6538
1cdd1ec8
GR
6539static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6540 const struct ixgbe_info *ii)
6541{
6542#ifdef CONFIG_PCI_IOV
6543 struct ixgbe_hw *hw = &adapter->hw;
6544 int err;
6545
6546 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6547 return;
6548
6549 /* The 82599 supports up to 64 VFs per physical function
6550 * but this implementation limits allocation to 63 so that
6551 * basic networking resources are still available to the
6552 * physical function
6553 */
6554 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6555 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6556 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6557 if (err) {
396e799c 6558 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
1cdd1ec8
GR
6559 goto err_novfs;
6560 }
6561 /* If call to enable VFs succeeded then allocate memory
6562 * for per VF control structures.
6563 */
6564 adapter->vfinfo =
6565 kcalloc(adapter->num_vfs,
6566 sizeof(struct vf_data_storage), GFP_KERNEL);
6567 if (adapter->vfinfo) {
6568 /* Now that we're sure SR-IOV is enabled
6569 * and memory allocated set up the mailbox parameters
6570 */
6571 ixgbe_init_mbx_params_pf(hw);
6572 memcpy(&hw->mbx.ops, ii->mbx_ops,
6573 sizeof(hw->mbx.ops));
6574
6575 /* Disable RSC when in SR-IOV mode */
6576 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6577 IXGBE_FLAG2_RSC_ENABLED);
6578 return;
6579 }
6580
6581 /* Oh oh */
396e799c
ET
6582 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6583 "SRIOV disabled\n");
1cdd1ec8
GR
6584 pci_disable_sriov(adapter->pdev);
6585
6586err_novfs:
6587 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6588 adapter->num_vfs = 0;
6589#endif /* CONFIG_PCI_IOV */
6590}
6591
9a799d71
AK
6592/**
6593 * ixgbe_probe - Device Initialization Routine
6594 * @pdev: PCI device information struct
6595 * @ent: entry in ixgbe_pci_tbl
6596 *
6597 * Returns 0 on success, negative on failure
6598 *
6599 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6600 * The OS initialization, configuring of the adapter private structure,
6601 * and a hardware reset occur.
6602 **/
6603static int __devinit ixgbe_probe(struct pci_dev *pdev,
b4617240 6604 const struct pci_device_id *ent)
9a799d71
AK
6605{
6606 struct net_device *netdev;
6607 struct ixgbe_adapter *adapter = NULL;
6608 struct ixgbe_hw *hw;
6609 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9a799d71
AK
6610 static int cards_found;
6611 int i, err, pci_using_dac;
c85a2618 6612 unsigned int indices = num_possible_cpus();
eacd73f7
YZ
6613#ifdef IXGBE_FCOE
6614 u16 device_caps;
6615#endif
c44ade9e 6616 u32 part_num, eec;
9a799d71 6617
bded64a7
AG
6618 /* Catch broken hardware that put the wrong VF device ID in
6619 * the PCIe SR-IOV capability.
6620 */
6621 if (pdev->is_virtfn) {
6622 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6623 pci_name(pdev), pdev->vendor, pdev->device);
6624 return -EINVAL;
6625 }
6626
9ce77666 6627 err = pci_enable_device_mem(pdev);
9a799d71
AK
6628 if (err)
6629 return err;
6630
1b507730
NN
6631 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6632 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
9a799d71
AK
6633 pci_using_dac = 1;
6634 } else {
1b507730 6635 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9a799d71 6636 if (err) {
1b507730
NN
6637 err = dma_set_coherent_mask(&pdev->dev,
6638 DMA_BIT_MASK(32));
9a799d71 6639 if (err) {
b8bc0421
DC
6640 dev_err(&pdev->dev,
6641 "No usable DMA configuration, aborting\n");
9a799d71
AK
6642 goto err_dma;
6643 }
6644 }
6645 pci_using_dac = 0;
6646 }
6647
9ce77666 6648 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6649 IORESOURCE_MEM), ixgbe_driver_name);
9a799d71 6650 if (err) {
b8bc0421
DC
6651 dev_err(&pdev->dev,
6652 "pci_request_selected_regions failed 0x%x\n", err);
9a799d71
AK
6653 goto err_pci_reg;
6654 }
6655
19d5afd4 6656 pci_enable_pcie_error_reporting(pdev);
6fabd715 6657
9a799d71 6658 pci_set_master(pdev);
fb3b27bc 6659 pci_save_state(pdev);
9a799d71 6660
c85a2618
JF
6661 if (ii->mac == ixgbe_mac_82598EB)
6662 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6663 else
6664 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6665
6666 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6667#ifdef IXGBE_FCOE
6668 indices += min_t(unsigned int, num_possible_cpus(),
6669 IXGBE_MAX_FCOE_INDICES);
6670#endif
c85a2618 6671 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
9a799d71
AK
6672 if (!netdev) {
6673 err = -ENOMEM;
6674 goto err_alloc_etherdev;
6675 }
6676
9a799d71
AK
6677 SET_NETDEV_DEV(netdev, &pdev->dev);
6678
6679 pci_set_drvdata(pdev, netdev);
6680 adapter = netdev_priv(netdev);
6681
6682 adapter->netdev = netdev;
6683 adapter->pdev = pdev;
6684 hw = &adapter->hw;
6685 hw->back = adapter;
6686 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6687
05857980
JK
6688 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6689 pci_resource_len(pdev, 0));
9a799d71
AK
6690 if (!hw->hw_addr) {
6691 err = -EIO;
6692 goto err_ioremap;
6693 }
6694
6695 for (i = 1; i <= 5; i++) {
6696 if (pci_resource_len(pdev, i) == 0)
6697 continue;
6698 }
6699
0edc3527 6700 netdev->netdev_ops = &ixgbe_netdev_ops;
9a799d71 6701 ixgbe_set_ethtool_ops(netdev);
9a799d71 6702 netdev->watchdog_timeo = 5 * HZ;
9a799d71
AK
6703 strcpy(netdev->name, pci_name(pdev));
6704
9a799d71
AK
6705 adapter->bd_number = cards_found;
6706
9a799d71
AK
6707 /* Setup hw api */
6708 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
021230d4 6709 hw->mac.type = ii->mac;
9a799d71 6710
c44ade9e
JB
6711 /* EEPROM */
6712 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6713 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6714 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6715 if (!(eec & (1 << 8)))
6716 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6717
6718 /* PHY */
6719 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
c4900be0 6720 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6b73e10d
BH
6721 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
6722 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
6723 hw->phy.mdio.mmds = 0;
6724 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6725 hw->phy.mdio.dev = netdev;
6726 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6727 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
c4900be0
DS
6728
6729 /* set up this timer and work struct before calling get_invariants
6730 * which might start the timer
6731 */
6732 init_timer(&adapter->sfp_timer);
c061b18d 6733 adapter->sfp_timer.function = ixgbe_sfp_timer;
c4900be0
DS
6734 adapter->sfp_timer.data = (unsigned long) adapter;
6735
6736 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
c44ade9e 6737
e8e26350
PW
6738 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6739 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6740
6741 /* a new SFP+ module arrival, called from GPI SDP2 context */
6742 INIT_WORK(&adapter->sfp_config_module_task,
6743 ixgbe_sfp_config_module_task);
6744
8ca783ab 6745 ii->get_invariants(hw);
9a799d71
AK
6746
6747 /* setup the private structure */
6748 err = ixgbe_sw_init(adapter);
6749 if (err)
6750 goto err_sw_init;
6751
e86bff0e
DS
6752 /* Make it possible the adapter to be woken up via WOL */
6753 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6755
bf069c97
DS
6756 /*
6757 * If there is a fan on this device and it has failed log the
6758 * failure.
6759 */
6760 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6761 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6762 if (esdp & IXGBE_ESDP_SDP1)
396e799c 6763 e_crit(probe, "Fan has stopped, replace the adapter\n");
bf069c97
DS
6764 }
6765
c44ade9e 6766 /* reset_hw fills in the perm_addr as well */
119fc60a 6767 hw->phy.reset_if_overtemp = true;
c44ade9e 6768 err = hw->mac.ops.reset_hw(hw);
119fc60a 6769 hw->phy.reset_if_overtemp = false;
8ca783ab
DS
6770 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6771 hw->mac.type == ixgbe_mac_82598EB) {
6772 /*
6773 * Start a kernel thread to watch for a module to arrive.
6774 * Only do this for 82598, since 82599 will generate
6775 * interrupts on module arrival.
6776 */
6777 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6778 mod_timer(&adapter->sfp_timer,
6779 round_jiffies(jiffies + (2 * HZ)));
6780 err = 0;
6781 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849c4542
ET
6782 e_dev_err("failed to initialize because an unsupported SFP+ "
6783 "module type was detected.\n");
6784 e_dev_err("Reload the driver after installing a supported "
6785 "module.\n");
04f165ef
PW
6786 goto err_sw_init;
6787 } else if (err) {
849c4542 6788 e_dev_err("HW Init failed: %d\n", err);
c44ade9e
JB
6789 goto err_sw_init;
6790 }
6791
1cdd1ec8
GR
6792 ixgbe_probe_vf(adapter, ii);
6793
396e799c 6794 netdev->features = NETIF_F_SG |
b4617240
PW
6795 NETIF_F_IP_CSUM |
6796 NETIF_F_HW_VLAN_TX |
6797 NETIF_F_HW_VLAN_RX |
6798 NETIF_F_HW_VLAN_FILTER;
9a799d71 6799
e9990a9c 6800 netdev->features |= NETIF_F_IPV6_CSUM;
9a799d71 6801 netdev->features |= NETIF_F_TSO;
9a799d71 6802 netdev->features |= NETIF_F_TSO6;
78b6f4ce 6803 netdev->features |= NETIF_F_GRO;
ad31c402 6804
45a5ead0
JB
6805 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6806 netdev->features |= NETIF_F_SCTP_CSUM;
6807
ad31c402
JK
6808 netdev->vlan_features |= NETIF_F_TSO;
6809 netdev->vlan_features |= NETIF_F_TSO6;
22f32b7a 6810 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 6811 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
ad31c402
JK
6812 netdev->vlan_features |= NETIF_F_SG;
6813
1cdd1ec8
GR
6814 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6815 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6816 IXGBE_FLAG_DCB_ENABLED);
2f90b865
AD
6817 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6818 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6819
7a6b6f51 6820#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
6821 netdev->dcbnl_ops = &dcbnl_ops;
6822#endif
6823
eacd73f7 6824#ifdef IXGBE_FCOE
0d551589 6825 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
eacd73f7
YZ
6826 if (hw->mac.ops.get_device_caps) {
6827 hw->mac.ops.get_device_caps(hw, &device_caps);
0d551589
YZ
6828 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
6829 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
eacd73f7
YZ
6830 }
6831 }
5e09d7f6
YZ
6832 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6833 netdev->vlan_features |= NETIF_F_FCOE_CRC;
6834 netdev->vlan_features |= NETIF_F_FSO;
6835 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6836 }
eacd73f7 6837#endif /* IXGBE_FCOE */
9a799d71
AK
6838 if (pci_using_dac)
6839 netdev->features |= NETIF_F_HIGHDMA;
6840
0c19d6af 6841 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
f8212f97
AD
6842 netdev->features |= NETIF_F_LRO;
6843
9a799d71 6844 /* make sure the EEPROM is good */
c44ade9e 6845 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
849c4542 6846 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9a799d71
AK
6847 err = -EIO;
6848 goto err_eeprom;
6849 }
6850
6851 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6852 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6853
c44ade9e 6854 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
849c4542 6855 e_dev_err("invalid MAC address\n");
9a799d71
AK
6856 err = -EIO;
6857 goto err_eeprom;
6858 }
6859
61fac744
PW
6860 /* power down the optics */
6861 if (hw->phy.multispeed_fiber)
6862 hw->mac.ops.disable_tx_laser(hw);
6863
9a799d71 6864 init_timer(&adapter->watchdog_timer);
c061b18d 6865 adapter->watchdog_timer.function = ixgbe_watchdog;
9a799d71
AK
6866 adapter->watchdog_timer.data = (unsigned long)adapter;
6867
6868 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
cf8280ee 6869 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
9a799d71 6870
021230d4
AV
6871 err = ixgbe_init_interrupt_scheme(adapter);
6872 if (err)
6873 goto err_sw_init;
9a799d71 6874
e8e26350
PW
6875 switch (pdev->device) {
6876 case IXGBE_DEV_ID_82599_KX4:
495dce12
WJP
6877 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6878 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
e8e26350
PW
6879 break;
6880 default:
6881 adapter->wol = 0;
6882 break;
6883 }
e8e26350
PW
6884 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6885
04f165ef
PW
6886 /* pick up the PCI bus settings for reporting later */
6887 hw->mac.ops.get_bus_info(hw);
6888
9a799d71 6889 /* print bus type/speed/width info */
849c4542 6890 e_dev_info("(PCI Express:%s:%s) %pM\n",
e8e26350
PW
6891 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
6892 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
6893 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
6894 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
6895 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
b4617240 6896 "Unknown"),
7c510e4b 6897 netdev->dev_addr);
c44ade9e 6898 ixgbe_read_pba_num_generic(hw, &part_num);
e8e26350 6899 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
849c4542
ET
6900 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
6901 "PBA No: %06x-%03x\n",
6902 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6903 (part_num >> 8), (part_num & 0xff));
e8e26350 6904 else
849c4542
ET
6905 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6906 hw->mac.type, hw->phy.type,
6907 (part_num >> 8), (part_num & 0xff));
9a799d71 6908
e8e26350 6909 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
849c4542
ET
6910 e_dev_warn("PCI-Express bandwidth available for this card is "
6911 "not sufficient for optimal performance.\n");
6912 e_dev_warn("For optimal performance a x8 PCI-Express slot "
6913 "is required.\n");
0c254d86
AK
6914 }
6915
34b0368c
PWJ
6916 /* save off EEPROM version number */
6917 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
6918
9a799d71 6919 /* reset the hardware with the new settings */
794caeb2 6920 err = hw->mac.ops.start_hw(hw);
c44ade9e 6921
794caeb2
PWJ
6922 if (err == IXGBE_ERR_EEPROM_VERSION) {
6923 /* We are running on a pre-production device, log a warning */
849c4542
ET
6924 e_dev_warn("This device is a pre-production adapter/LOM. "
6925 "Please be aware there may be issues associated "
6926 "with your hardware. If you are experiencing "
6927 "problems please contact your Intel or hardware "
6928 "representative who provided you with this "
6929 "hardware.\n");
794caeb2 6930 }
9a799d71
AK
6931 strcpy(netdev->name, "eth%d");
6932 err = register_netdev(netdev);
6933 if (err)
6934 goto err_register;
6935
54386467
JB
6936 /* carrier off reporting is important to ethtool even BEFORE open */
6937 netif_carrier_off(netdev);
6938
c4cf55e5
PWJ
6939 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
6940 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6941 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6942
119fc60a
MC
6943 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6944 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
5dd2d332 6945#ifdef CONFIG_IXGBE_DCA
652f093f 6946 if (dca_add_requester(&pdev->dev) == 0) {
bd0362dd 6947 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
6948 ixgbe_setup_dca(adapter);
6949 }
6950#endif
1cdd1ec8 6951 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
396e799c 6952 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
1cdd1ec8
GR
6953 for (i = 0; i < adapter->num_vfs; i++)
6954 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6955 }
6956
0365e6e4
PW
6957 /* add san mac addr to netdev */
6958 ixgbe_add_sanmac_netdev(netdev);
9a799d71 6959
849c4542 6960 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
9a799d71
AK
6961 cards_found++;
6962 return 0;
6963
6964err_register:
5eba3699 6965 ixgbe_release_hw_control(adapter);
7a921c93 6966 ixgbe_clear_interrupt_scheme(adapter);
9a799d71
AK
6967err_sw_init:
6968err_eeprom:
1cdd1ec8
GR
6969 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6970 ixgbe_disable_sriov(adapter);
c4900be0
DS
6971 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6972 del_timer_sync(&adapter->sfp_timer);
6973 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
6974 cancel_work_sync(&adapter->multispeed_fiber_task);
6975 cancel_work_sync(&adapter->sfp_config_module_task);
9a799d71
AK
6976 iounmap(hw->hw_addr);
6977err_ioremap:
6978 free_netdev(netdev);
6979err_alloc_etherdev:
9ce77666 6980 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6981 IORESOURCE_MEM));
9a799d71
AK
6982err_pci_reg:
6983err_dma:
6984 pci_disable_device(pdev);
6985 return err;
6986}
6987
6988/**
6989 * ixgbe_remove - Device Removal Routine
6990 * @pdev: PCI device information struct
6991 *
6992 * ixgbe_remove is called by the PCI subsystem to alert the driver
6993 * that it should release a PCI device. The could be caused by a
6994 * Hot-Plug event, or because the driver is going to be removed from
6995 * memory.
6996 **/
6997static void __devexit ixgbe_remove(struct pci_dev *pdev)
6998{
6999 struct net_device *netdev = pci_get_drvdata(pdev);
7000 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7001
7002 set_bit(__IXGBE_DOWN, &adapter->state);
c4900be0
DS
7003 /* clear the module not found bit to make sure the worker won't
7004 * reschedule
7005 */
7006 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
9a799d71
AK
7007 del_timer_sync(&adapter->watchdog_timer);
7008
c4900be0
DS
7009 del_timer_sync(&adapter->sfp_timer);
7010 cancel_work_sync(&adapter->watchdog_task);
7011 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
7012 cancel_work_sync(&adapter->multispeed_fiber_task);
7013 cancel_work_sync(&adapter->sfp_config_module_task);
c4cf55e5
PWJ
7014 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7015 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7016 cancel_work_sync(&adapter->fdir_reinit_task);
9a799d71
AK
7017 flush_scheduled_work();
7018
5dd2d332 7019#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7020 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7021 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7022 dca_remove_requester(&pdev->dev);
7023 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7024 }
7025
7026#endif
332d4a7d
YZ
7027#ifdef IXGBE_FCOE
7028 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7029 ixgbe_cleanup_fcoe(adapter);
7030
7031#endif /* IXGBE_FCOE */
0365e6e4
PW
7032
7033 /* remove the added san mac */
7034 ixgbe_del_sanmac_netdev(netdev);
7035
c4900be0
DS
7036 if (netdev->reg_state == NETREG_REGISTERED)
7037 unregister_netdev(netdev);
9a799d71 7038
1cdd1ec8
GR
7039 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7040 ixgbe_disable_sriov(adapter);
7041
7a921c93 7042 ixgbe_clear_interrupt_scheme(adapter);
5eba3699 7043
021230d4 7044 ixgbe_release_hw_control(adapter);
9a799d71
AK
7045
7046 iounmap(adapter->hw.hw_addr);
9ce77666 7047 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7048 IORESOURCE_MEM));
9a799d71 7049
849c4542 7050 e_dev_info("complete\n");
021230d4 7051
9a799d71
AK
7052 free_netdev(netdev);
7053
19d5afd4 7054 pci_disable_pcie_error_reporting(pdev);
6fabd715 7055
9a799d71
AK
7056 pci_disable_device(pdev);
7057}
7058
7059/**
7060 * ixgbe_io_error_detected - called when PCI error is detected
7061 * @pdev: Pointer to PCI device
7062 * @state: The current pci connection state
7063 *
7064 * This function is called after a PCI bus error affecting
7065 * this device has been detected.
7066 */
7067static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
b4617240 7068 pci_channel_state_t state)
9a799d71
AK
7069{
7070 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 7071 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
7072
7073 netif_device_detach(netdev);
7074
3044b8d1
BL
7075 if (state == pci_channel_io_perm_failure)
7076 return PCI_ERS_RESULT_DISCONNECT;
7077
9a799d71
AK
7078 if (netif_running(netdev))
7079 ixgbe_down(adapter);
7080 pci_disable_device(pdev);
7081
b4617240 7082 /* Request a slot reset. */
9a799d71
AK
7083 return PCI_ERS_RESULT_NEED_RESET;
7084}
7085
7086/**
7087 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7088 * @pdev: Pointer to PCI device
7089 *
7090 * Restart the card from scratch, as if from a cold-boot.
7091 */
7092static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7093{
7094 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 7095 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6fabd715
PWJ
7096 pci_ers_result_t result;
7097 int err;
9a799d71 7098
9ce77666 7099 if (pci_enable_device_mem(pdev)) {
396e799c 7100 e_err(probe, "Cannot re-enable PCI device after reset.\n");
6fabd715
PWJ
7101 result = PCI_ERS_RESULT_DISCONNECT;
7102 } else {
7103 pci_set_master(pdev);
7104 pci_restore_state(pdev);
c0e1f68b 7105 pci_save_state(pdev);
9a799d71 7106
dd4d8ca6 7107 pci_wake_from_d3(pdev, false);
9a799d71 7108
6fabd715 7109 ixgbe_reset(adapter);
88512539 7110 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6fabd715
PWJ
7111 result = PCI_ERS_RESULT_RECOVERED;
7112 }
7113
7114 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7115 if (err) {
849c4542
ET
7116 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7117 "failed 0x%0x\n", err);
6fabd715
PWJ
7118 /* non-fatal, continue */
7119 }
9a799d71 7120
6fabd715 7121 return result;
9a799d71
AK
7122}
7123
7124/**
7125 * ixgbe_io_resume - called when traffic can start flowing again.
7126 * @pdev: Pointer to PCI device
7127 *
7128 * This callback is called when the error recovery driver tells us that
7129 * its OK to resume normal operation.
7130 */
7131static void ixgbe_io_resume(struct pci_dev *pdev)
7132{
7133 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 7134 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
7135
7136 if (netif_running(netdev)) {
7137 if (ixgbe_up(adapter)) {
396e799c 7138 e_info(probe, "ixgbe_up failed after reset\n");
9a799d71
AK
7139 return;
7140 }
7141 }
7142
7143 netif_device_attach(netdev);
9a799d71
AK
7144}
7145
7146static struct pci_error_handlers ixgbe_err_handler = {
7147 .error_detected = ixgbe_io_error_detected,
7148 .slot_reset = ixgbe_io_slot_reset,
7149 .resume = ixgbe_io_resume,
7150};
7151
7152static struct pci_driver ixgbe_driver = {
7153 .name = ixgbe_driver_name,
7154 .id_table = ixgbe_pci_tbl,
7155 .probe = ixgbe_probe,
7156 .remove = __devexit_p(ixgbe_remove),
7157#ifdef CONFIG_PM
7158 .suspend = ixgbe_suspend,
7159 .resume = ixgbe_resume,
7160#endif
7161 .shutdown = ixgbe_shutdown,
7162 .err_handler = &ixgbe_err_handler
7163};
7164
7165/**
7166 * ixgbe_init_module - Driver Registration Routine
7167 *
7168 * ixgbe_init_module is the first routine called when the driver is
7169 * loaded. All it does is register with the PCI subsystem.
7170 **/
7171static int __init ixgbe_init_module(void)
7172{
7173 int ret;
849c4542
ET
7174 pr_info("%s - version %s\n", ixgbe_driver_string,
7175 ixgbe_driver_version);
7176 pr_info("%s\n", ixgbe_copyright);
9a799d71 7177
5dd2d332 7178#ifdef CONFIG_IXGBE_DCA
bd0362dd 7179 dca_register_notify(&dca_notifier);
bd0362dd 7180#endif
5dd2d332 7181
9a799d71
AK
7182 ret = pci_register_driver(&ixgbe_driver);
7183 return ret;
7184}
b4617240 7185
9a799d71
AK
7186module_init(ixgbe_init_module);
7187
7188/**
7189 * ixgbe_exit_module - Driver Exit Cleanup Routine
7190 *
7191 * ixgbe_exit_module is called just before the driver is removed
7192 * from memory.
7193 **/
7194static void __exit ixgbe_exit_module(void)
7195{
5dd2d332 7196#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7197 dca_unregister_notify(&dca_notifier);
7198#endif
9a799d71
AK
7199 pci_unregister_driver(&ixgbe_driver);
7200}
bd0362dd 7201
5dd2d332 7202#ifdef CONFIG_IXGBE_DCA
bd0362dd 7203static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
b4617240 7204 void *p)
bd0362dd
JC
7205{
7206 int ret_val;
7207
7208 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
b4617240 7209 __ixgbe_notify_dca);
bd0362dd
JC
7210
7211 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7212}
b453368d 7213
5dd2d332 7214#endif /* CONFIG_IXGBE_DCA */
849c4542 7215
b453368d 7216/**
849c4542 7217 * ixgbe_get_hw_dev return device
b453368d
AD
7218 * used by hardware layer to print debugging information
7219 **/
849c4542 7220struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
b453368d
AD
7221{
7222 struct ixgbe_adapter *adapter = hw->back;
849c4542 7223 return adapter->netdev;
b453368d 7224}
bd0362dd 7225
9a799d71
AK
7226module_exit(ixgbe_exit_module);
7227
7228/* ixgbe_main.c */