]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/igb/igb_main.c
dma-mapping: replace all DMA_32BIT_MASK macro with DMA_BIT_MASK(32)
[net-next-2.6.git] / drivers / net / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
86d5d38f 4 Copyright(c) 2007-2009 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
9d5c8243
AK
34#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
c6cb090b 37#include <linux/net_tstamp.h>
9d5c8243
AK
38#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
c54106bb 42#include <linux/pci-aspm.h>
9d5c8243
AK
43#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
40a914fa 46#include <linux/aer.h>
421e02f0 47#ifdef CONFIG_IGB_DCA
fe4506b6
JC
48#include <linux/dca.h>
49#endif
9d5c8243
AK
50#include "igb.h"
51
86d5d38f 52#define DRV_VERSION "1.3.16-k2"
9d5c8243
AK
53char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
86d5d38f 57static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
9d5c8243 58
9d5c8243
AK
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
2d064c06 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
2d064c06
AD
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
c8ea5ea9 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
72 /* required last entry */
73 {0, }
74};
75
76MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
77
78void igb_reset(struct igb_adapter *);
79static int igb_setup_all_tx_resources(struct igb_adapter *);
80static int igb_setup_all_rx_resources(struct igb_adapter *);
81static void igb_free_all_tx_resources(struct igb_adapter *);
82static void igb_free_all_rx_resources(struct igb_adapter *);
9d5c8243
AK
83void igb_update_stats(struct igb_adapter *);
84static int igb_probe(struct pci_dev *, const struct pci_device_id *);
85static void __devexit igb_remove(struct pci_dev *pdev);
86static int igb_sw_init(struct igb_adapter *);
87static int igb_open(struct net_device *);
88static int igb_close(struct net_device *);
89static void igb_configure_tx(struct igb_adapter *);
90static void igb_configure_rx(struct igb_adapter *);
91static void igb_setup_rctl(struct igb_adapter *);
92static void igb_clean_all_tx_rings(struct igb_adapter *);
93static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
94static void igb_clean_tx_ring(struct igb_ring *);
95static void igb_clean_rx_ring(struct igb_ring *);
9d5c8243
AK
96static void igb_set_multi(struct net_device *);
97static void igb_update_phy_info(unsigned long);
98static void igb_watchdog(unsigned long);
99static void igb_watchdog_task(struct work_struct *);
100static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
101 struct igb_ring *);
102static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
103static struct net_device_stats *igb_get_stats(struct net_device *);
104static int igb_change_mtu(struct net_device *, int);
105static int igb_set_mac(struct net_device *, void *);
106static irqreturn_t igb_intr(int irq, void *);
107static irqreturn_t igb_intr_msi(int irq, void *);
108static irqreturn_t igb_msix_other(int irq, void *);
109static irqreturn_t igb_msix_rx(int irq, void *);
110static irqreturn_t igb_msix_tx(int irq, void *);
421e02f0 111#ifdef CONFIG_IGB_DCA
fe4506b6
JC
112static void igb_update_rx_dca(struct igb_ring *);
113static void igb_update_tx_dca(struct igb_ring *);
114static void igb_setup_dca(struct igb_adapter *);
421e02f0 115#endif /* CONFIG_IGB_DCA */
3b644cf6 116static bool igb_clean_tx_irq(struct igb_ring *);
661086df 117static int igb_poll(struct napi_struct *, int);
3b644cf6
MW
118static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
119static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
9d5c8243
AK
120static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
121static void igb_tx_timeout(struct net_device *);
122static void igb_reset_task(struct work_struct *);
123static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
124static void igb_vlan_rx_add_vid(struct net_device *, u16);
125static void igb_vlan_rx_kill_vid(struct net_device *, u16);
126static void igb_restore_vlan(struct igb_adapter *);
4ae196df
AD
127static void igb_ping_all_vfs(struct igb_adapter *);
128static void igb_msg_task(struct igb_adapter *);
129static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
e1739522
AD
130static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
131static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
4ae196df 132static void igb_vmm_control(struct igb_adapter *);
e1739522 133static inline void igb_set_vmolr(struct e1000_hw *, int);
4ae196df
AD
134static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
9d5c8243 137
9d5c8243 138#ifdef CONFIG_PM
3fe7c4c9 139static int igb_suspend(struct pci_dev *, pm_message_t);
9d5c8243
AK
140static int igb_resume(struct pci_dev *);
141#endif
142static void igb_shutdown(struct pci_dev *);
421e02f0 143#ifdef CONFIG_IGB_DCA
fe4506b6
JC
144static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
145static struct notifier_block dca_notifier = {
146 .notifier_call = igb_notify_dca,
147 .next = NULL,
148 .priority = 0
149};
150#endif
9d5c8243
AK
151#ifdef CONFIG_NET_POLL_CONTROLLER
152/* for netdump / net console */
153static void igb_netpoll(struct net_device *);
154#endif
155
37680117
AD
156#ifdef CONFIG_PCI_IOV
157static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *,
158 const char *, size_t);
159static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *,
160 char *);
161DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs);
162#endif
9d5c8243
AK
163static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
164 pci_channel_state_t);
165static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
166static void igb_io_resume(struct pci_dev *);
167
168static struct pci_error_handlers igb_err_handler = {
169 .error_detected = igb_io_error_detected,
170 .slot_reset = igb_io_slot_reset,
171 .resume = igb_io_resume,
172};
173
174
175static struct pci_driver igb_driver = {
176 .name = igb_driver_name,
177 .id_table = igb_pci_tbl,
178 .probe = igb_probe,
179 .remove = __devexit_p(igb_remove),
180#ifdef CONFIG_PM
181 /* Power Managment Hooks */
182 .suspend = igb_suspend,
183 .resume = igb_resume,
184#endif
185 .shutdown = igb_shutdown,
186 .err_handler = &igb_err_handler
187};
188
7dfc16fa
AD
189static int global_quad_port_a; /* global quad port a indication */
190
9d5c8243
AK
191MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
192MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
193MODULE_LICENSE("GPL");
194MODULE_VERSION(DRV_VERSION);
195
38c845c7
PO
196/**
197 * Scale the NIC clock cycle by a large factor so that
198 * relatively small clock corrections can be added or
199 * substracted at each clock tick. The drawbacks of a
200 * large factor are a) that the clock register overflows
201 * more quickly (not such a big deal) and b) that the
202 * increment per tick has to fit into 24 bits.
203 *
204 * Note that
205 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
206 * IGB_TSYNC_SCALE
207 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
208 *
209 * The base scale factor is intentionally a power of two
210 * so that the division in %struct timecounter can be done with
211 * a shift.
212 */
213#define IGB_TSYNC_SHIFT (19)
214#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
215
216/**
217 * The duration of one clock cycle of the NIC.
218 *
219 * @todo This hard-coded value is part of the specification and might change
220 * in future hardware revisions. Add revision check.
221 */
222#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
223
224#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
225# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
226#endif
227
228/**
229 * igb_read_clock - read raw cycle counter (to be used by time counter)
230 */
231static cycle_t igb_read_clock(const struct cyclecounter *tc)
232{
233 struct igb_adapter *adapter =
234 container_of(tc, struct igb_adapter, cycles);
235 struct e1000_hw *hw = &adapter->hw;
236 u64 stamp;
237
238 stamp = rd32(E1000_SYSTIML);
239 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
240
241 return stamp;
242}
243
9d5c8243
AK
244#ifdef DEBUG
245/**
246 * igb_get_hw_dev_name - return device name string
247 * used by hardware layer to print debugging information
248 **/
249char *igb_get_hw_dev_name(struct e1000_hw *hw)
250{
251 struct igb_adapter *adapter = hw->back;
252 return adapter->netdev->name;
253}
38c845c7
PO
254
255/**
256 * igb_get_time_str - format current NIC and system time as string
257 */
258static char *igb_get_time_str(struct igb_adapter *adapter,
259 char buffer[160])
260{
261 cycle_t hw = adapter->cycles.read(&adapter->cycles);
262 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
263 struct timespec sys;
264 struct timespec delta;
265 getnstimeofday(&sys);
266
267 delta = timespec_sub(nic, sys);
268
269 sprintf(buffer,
33af6bcc
PO
270 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
271 hw,
38c845c7
PO
272 (long)nic.tv_sec, nic.tv_nsec,
273 (long)sys.tv_sec, sys.tv_nsec,
274 (long)delta.tv_sec, delta.tv_nsec);
275
276 return buffer;
277}
9d5c8243
AK
278#endif
279
c493ea45
AD
280/**
281 * igb_desc_unused - calculate if we have unused descriptors
282 **/
283static int igb_desc_unused(struct igb_ring *ring)
284{
285 if (ring->next_to_clean > ring->next_to_use)
286 return ring->next_to_clean - ring->next_to_use - 1;
287
288 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
289}
290
9d5c8243
AK
291/**
292 * igb_init_module - Driver Registration Routine
293 *
294 * igb_init_module is the first routine called when the driver is
295 * loaded. All it does is register with the PCI subsystem.
296 **/
297static int __init igb_init_module(void)
298{
299 int ret;
300 printk(KERN_INFO "%s - version %s\n",
301 igb_driver_string, igb_driver_version);
302
303 printk(KERN_INFO "%s\n", igb_copyright);
304
7dfc16fa
AD
305 global_quad_port_a = 0;
306
421e02f0 307#ifdef CONFIG_IGB_DCA
fe4506b6
JC
308 dca_register_notify(&dca_notifier);
309#endif
bbd98fe4
AD
310
311 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
312 return ret;
313}
314
315module_init(igb_init_module);
316
317/**
318 * igb_exit_module - Driver Exit Cleanup Routine
319 *
320 * igb_exit_module is called just before the driver is removed
321 * from memory.
322 **/
323static void __exit igb_exit_module(void)
324{
421e02f0 325#ifdef CONFIG_IGB_DCA
fe4506b6
JC
326 dca_unregister_notify(&dca_notifier);
327#endif
9d5c8243
AK
328 pci_unregister_driver(&igb_driver);
329}
330
331module_exit(igb_exit_module);
332
26bc19ec
AD
333#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
334/**
335 * igb_cache_ring_register - Descriptor ring to register mapping
336 * @adapter: board private structure to initialize
337 *
338 * Once we know the feature-set enabled for the device, we'll cache
339 * the register offset the descriptor ring is assigned to.
340 **/
341static void igb_cache_ring_register(struct igb_adapter *adapter)
342{
343 int i;
1bfaf07b 344 unsigned int rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
345
346 switch (adapter->hw.mac.type) {
347 case e1000_82576:
348 /* The queues are allocated for virtualization such that VF 0
349 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
350 * In order to avoid collision we start at the first free queue
351 * and continue consuming queues in the same sequence
352 */
353 for (i = 0; i < adapter->num_rx_queues; i++)
1bfaf07b
AD
354 adapter->rx_ring[i].reg_idx = rbase_offset +
355 Q_IDX_82576(i);
26bc19ec 356 for (i = 0; i < adapter->num_tx_queues; i++)
1bfaf07b
AD
357 adapter->tx_ring[i].reg_idx = rbase_offset +
358 Q_IDX_82576(i);
26bc19ec
AD
359 break;
360 case e1000_82575:
361 default:
362 for (i = 0; i < adapter->num_rx_queues; i++)
363 adapter->rx_ring[i].reg_idx = i;
364 for (i = 0; i < adapter->num_tx_queues; i++)
365 adapter->tx_ring[i].reg_idx = i;
366 break;
367 }
368}
369
9d5c8243
AK
370/**
371 * igb_alloc_queues - Allocate memory for all rings
372 * @adapter: board private structure to initialize
373 *
374 * We allocate one ring per queue at run-time since we don't know the
375 * number of queues at compile-time.
376 **/
377static int igb_alloc_queues(struct igb_adapter *adapter)
378{
379 int i;
380
381 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
382 sizeof(struct igb_ring), GFP_KERNEL);
383 if (!adapter->tx_ring)
384 return -ENOMEM;
385
386 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
387 sizeof(struct igb_ring), GFP_KERNEL);
388 if (!adapter->rx_ring) {
389 kfree(adapter->tx_ring);
390 return -ENOMEM;
391 }
392
6eb5a7f1
AD
393 adapter->rx_ring->buddy = adapter->tx_ring;
394
661086df
PWJ
395 for (i = 0; i < adapter->num_tx_queues; i++) {
396 struct igb_ring *ring = &(adapter->tx_ring[i]);
68fd9910 397 ring->count = adapter->tx_ring_count;
661086df
PWJ
398 ring->adapter = adapter;
399 ring->queue_index = i;
400 }
9d5c8243
AK
401 for (i = 0; i < adapter->num_rx_queues; i++) {
402 struct igb_ring *ring = &(adapter->rx_ring[i]);
68fd9910 403 ring->count = adapter->rx_ring_count;
9d5c8243 404 ring->adapter = adapter;
844290e5 405 ring->queue_index = i;
9d5c8243
AK
406 ring->itr_register = E1000_ITR;
407
844290e5 408 /* set a default napi handler for each rx_ring */
661086df 409 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
9d5c8243 410 }
26bc19ec
AD
411
412 igb_cache_ring_register(adapter);
9d5c8243
AK
413 return 0;
414}
415
a88f10ec
AD
416static void igb_free_queues(struct igb_adapter *adapter)
417{
418 int i;
419
420 for (i = 0; i < adapter->num_rx_queues; i++)
421 netif_napi_del(&adapter->rx_ring[i].napi);
422
d1a8c9e1
AD
423 adapter->num_rx_queues = 0;
424 adapter->num_tx_queues = 0;
425
a88f10ec
AD
426 kfree(adapter->tx_ring);
427 kfree(adapter->rx_ring);
428}
429
9d5c8243
AK
430#define IGB_N0_QUEUE -1
431static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
432 int tx_queue, int msix_vector)
433{
434 u32 msixbm = 0;
435 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
436 u32 ivar, index;
437
438 switch (hw->mac.type) {
439 case e1000_82575:
9d5c8243
AK
440 /* The 82575 assigns vectors using a bitmask, which matches the
441 bitmask for the EICR/EIMS/EIMC registers. To assign one
442 or more queues to a vector, we write the appropriate bits
443 into the MSIXBM register for that vector. */
444 if (rx_queue > IGB_N0_QUEUE) {
445 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
446 adapter->rx_ring[rx_queue].eims_value = msixbm;
447 }
448 if (tx_queue > IGB_N0_QUEUE) {
449 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
450 adapter->tx_ring[tx_queue].eims_value =
451 E1000_EICR_TX_QUEUE0 << tx_queue;
452 }
453 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
2d064c06
AD
454 break;
455 case e1000_82576:
26bc19ec 456 /* 82576 uses a table-based method for assigning vectors.
2d064c06
AD
457 Each queue has a single entry in the table to which we write
458 a vector number along with a "valid" bit. Sadly, the layout
459 of the table is somewhat counterintuitive. */
460 if (rx_queue > IGB_N0_QUEUE) {
1bfaf07b 461 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
2d064c06 462 ivar = array_rd32(E1000_IVAR0, index);
26bc19ec 463 if (rx_queue & 0x1) {
2d064c06
AD
464 /* vector goes into third byte of register */
465 ivar = ivar & 0xFF00FFFF;
466 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
26bc19ec
AD
467 } else {
468 /* vector goes into low byte of register */
469 ivar = ivar & 0xFFFFFF00;
470 ivar |= msix_vector | E1000_IVAR_VALID;
2d064c06
AD
471 }
472 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
473 array_wr32(E1000_IVAR0, index, ivar);
474 }
475 if (tx_queue > IGB_N0_QUEUE) {
1bfaf07b 476 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
2d064c06 477 ivar = array_rd32(E1000_IVAR0, index);
26bc19ec 478 if (tx_queue & 0x1) {
2d064c06
AD
479 /* vector goes into high byte of register */
480 ivar = ivar & 0x00FFFFFF;
481 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
26bc19ec
AD
482 } else {
483 /* vector goes into second byte of register */
484 ivar = ivar & 0xFFFF00FF;
485 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
2d064c06
AD
486 }
487 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
488 array_wr32(E1000_IVAR0, index, ivar);
489 }
490 break;
491 default:
492 BUG();
493 break;
494 }
9d5c8243
AK
495}
496
497/**
498 * igb_configure_msix - Configure MSI-X hardware
499 *
500 * igb_configure_msix sets up the hardware to properly
501 * generate MSI-X interrupts.
502 **/
503static void igb_configure_msix(struct igb_adapter *adapter)
504{
505 u32 tmp;
506 int i, vector = 0;
507 struct e1000_hw *hw = &adapter->hw;
508
509 adapter->eims_enable_mask = 0;
2d064c06
AD
510 if (hw->mac.type == e1000_82576)
511 /* Turn on MSI-X capability first, or our settings
512 * won't stick. And it will take days to debug. */
513 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
eebbbdba 514 E1000_GPIE_PBA | E1000_GPIE_EIAME |
2d064c06 515 E1000_GPIE_NSICR);
9d5c8243
AK
516
517 for (i = 0; i < adapter->num_tx_queues; i++) {
518 struct igb_ring *tx_ring = &adapter->tx_ring[i];
519 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
520 adapter->eims_enable_mask |= tx_ring->eims_value;
521 if (tx_ring->itr_val)
6eb5a7f1 522 writel(tx_ring->itr_val,
9d5c8243
AK
523 hw->hw_addr + tx_ring->itr_register);
524 else
525 writel(1, hw->hw_addr + tx_ring->itr_register);
526 }
527
528 for (i = 0; i < adapter->num_rx_queues; i++) {
529 struct igb_ring *rx_ring = &adapter->rx_ring[i];
25ac3c24 530 rx_ring->buddy = NULL;
9d5c8243
AK
531 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
532 adapter->eims_enable_mask |= rx_ring->eims_value;
533 if (rx_ring->itr_val)
6eb5a7f1 534 writel(rx_ring->itr_val,
9d5c8243
AK
535 hw->hw_addr + rx_ring->itr_register);
536 else
537 writel(1, hw->hw_addr + rx_ring->itr_register);
538 }
539
540
541 /* set vector for other causes, i.e. link changes */
2d064c06
AD
542 switch (hw->mac.type) {
543 case e1000_82575:
9d5c8243
AK
544 array_wr32(E1000_MSIXBM(0), vector++,
545 E1000_EIMS_OTHER);
546
9d5c8243
AK
547 tmp = rd32(E1000_CTRL_EXT);
548 /* enable MSI-X PBA support*/
549 tmp |= E1000_CTRL_EXT_PBA_CLR;
550
551 /* Auto-Mask interrupts upon ICR read. */
552 tmp |= E1000_CTRL_EXT_EIAME;
553 tmp |= E1000_CTRL_EXT_IRCA;
554
555 wr32(E1000_CTRL_EXT, tmp);
556 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
844290e5 557 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 558
2d064c06
AD
559 break;
560
561 case e1000_82576:
562 tmp = (vector++ | E1000_IVAR_VALID) << 8;
563 wr32(E1000_IVAR_MISC, tmp);
564
565 adapter->eims_enable_mask = (1 << (vector)) - 1;
566 adapter->eims_other = 1 << (vector - 1);
567 break;
568 default:
569 /* do nothing, since nothing else supports MSI-X */
570 break;
571 } /* switch (hw->mac.type) */
9d5c8243
AK
572 wrfl();
573}
574
575/**
576 * igb_request_msix - Initialize MSI-X interrupts
577 *
578 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
579 * kernel.
580 **/
581static int igb_request_msix(struct igb_adapter *adapter)
582{
583 struct net_device *netdev = adapter->netdev;
584 int i, err = 0, vector = 0;
585
586 vector = 0;
587
588 for (i = 0; i < adapter->num_tx_queues; i++) {
589 struct igb_ring *ring = &(adapter->tx_ring[i]);
cb7b48f6 590 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
9d5c8243
AK
591 err = request_irq(adapter->msix_entries[vector].vector,
592 &igb_msix_tx, 0, ring->name,
593 &(adapter->tx_ring[i]));
594 if (err)
595 goto out;
596 ring->itr_register = E1000_EITR(0) + (vector << 2);
6eb5a7f1 597 ring->itr_val = 976; /* ~4000 ints/sec */
9d5c8243
AK
598 vector++;
599 }
600 for (i = 0; i < adapter->num_rx_queues; i++) {
601 struct igb_ring *ring = &(adapter->rx_ring[i]);
602 if (strlen(netdev->name) < (IFNAMSIZ - 5))
cb7b48f6 603 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
9d5c8243
AK
604 else
605 memcpy(ring->name, netdev->name, IFNAMSIZ);
606 err = request_irq(adapter->msix_entries[vector].vector,
607 &igb_msix_rx, 0, ring->name,
608 &(adapter->rx_ring[i]));
609 if (err)
610 goto out;
611 ring->itr_register = E1000_EITR(0) + (vector << 2);
612 ring->itr_val = adapter->itr;
613 vector++;
614 }
615
616 err = request_irq(adapter->msix_entries[vector].vector,
617 &igb_msix_other, 0, netdev->name, netdev);
618 if (err)
619 goto out;
620
9d5c8243
AK
621 igb_configure_msix(adapter);
622 return 0;
623out:
624 return err;
625}
626
627static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
628{
629 if (adapter->msix_entries) {
630 pci_disable_msix(adapter->pdev);
631 kfree(adapter->msix_entries);
632 adapter->msix_entries = NULL;
7dfc16fa 633 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
9d5c8243
AK
634 pci_disable_msi(adapter->pdev);
635 return;
636}
637
638
639/**
640 * igb_set_interrupt_capability - set MSI or MSI-X if supported
641 *
642 * Attempt to configure interrupts using the best available
643 * capabilities of the hardware and kernel.
644 **/
645static void igb_set_interrupt_capability(struct igb_adapter *adapter)
646{
647 int err;
648 int numvecs, i;
649
83b7180d
AD
650 /* Number of supported queues. */
651 /* Having more queues than CPUs doesn't make sense. */
652 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
653 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
654
9d5c8243
AK
655 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
656 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
657 GFP_KERNEL);
658 if (!adapter->msix_entries)
659 goto msi_only;
660
661 for (i = 0; i < numvecs; i++)
662 adapter->msix_entries[i].entry = i;
663
664 err = pci_enable_msix(adapter->pdev,
665 adapter->msix_entries,
666 numvecs);
667 if (err == 0)
34a20e89 668 goto out;
9d5c8243
AK
669
670 igb_reset_interrupt_capability(adapter);
671
672 /* If we can't do MSI-X, try MSI */
673msi_only:
674 adapter->num_rx_queues = 1;
661086df 675 adapter->num_tx_queues = 1;
9d5c8243 676 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 677 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 678out:
661086df 679 /* Notify the stack of the (possibly) reduced Tx Queue count. */
fd2ea0a7 680 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
9d5c8243
AK
681 return;
682}
683
684/**
685 * igb_request_irq - initialize interrupts
686 *
687 * Attempts to configure interrupts using the best available
688 * capabilities of the hardware and kernel.
689 **/
690static int igb_request_irq(struct igb_adapter *adapter)
691{
692 struct net_device *netdev = adapter->netdev;
693 struct e1000_hw *hw = &adapter->hw;
694 int err = 0;
695
696 if (adapter->msix_entries) {
697 err = igb_request_msix(adapter);
844290e5 698 if (!err)
9d5c8243 699 goto request_done;
9d5c8243
AK
700 /* fall back to MSI */
701 igb_reset_interrupt_capability(adapter);
702 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 703 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
704 igb_free_all_tx_resources(adapter);
705 igb_free_all_rx_resources(adapter);
706 adapter->num_rx_queues = 1;
707 igb_alloc_queues(adapter);
844290e5 708 } else {
2d064c06
AD
709 switch (hw->mac.type) {
710 case e1000_82575:
711 wr32(E1000_MSIXBM(0),
712 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
713 break;
714 case e1000_82576:
715 wr32(E1000_IVAR0, E1000_IVAR_VALID);
716 break;
717 default:
718 break;
719 }
9d5c8243 720 }
844290e5 721
7dfc16fa 722 if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243
AK
723 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
724 netdev->name, netdev);
725 if (!err)
726 goto request_done;
727 /* fall back to legacy interrupts */
728 igb_reset_interrupt_capability(adapter);
7dfc16fa 729 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
730 }
731
732 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
733 netdev->name, netdev);
734
6cb5e577 735 if (err)
9d5c8243
AK
736 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
737 err);
9d5c8243
AK
738
739request_done:
740 return err;
741}
742
743static void igb_free_irq(struct igb_adapter *adapter)
744{
745 struct net_device *netdev = adapter->netdev;
746
747 if (adapter->msix_entries) {
748 int vector = 0, i;
749
750 for (i = 0; i < adapter->num_tx_queues; i++)
751 free_irq(adapter->msix_entries[vector++].vector,
752 &(adapter->tx_ring[i]));
753 for (i = 0; i < adapter->num_rx_queues; i++)
754 free_irq(adapter->msix_entries[vector++].vector,
755 &(adapter->rx_ring[i]));
756
757 free_irq(adapter->msix_entries[vector++].vector, netdev);
758 return;
759 }
760
761 free_irq(adapter->pdev->irq, netdev);
762}
763
764/**
765 * igb_irq_disable - Mask off interrupt generation on the NIC
766 * @adapter: board private structure
767 **/
768static void igb_irq_disable(struct igb_adapter *adapter)
769{
770 struct e1000_hw *hw = &adapter->hw;
771
772 if (adapter->msix_entries) {
844290e5 773 wr32(E1000_EIAM, 0);
9d5c8243
AK
774 wr32(E1000_EIMC, ~0);
775 wr32(E1000_EIAC, 0);
776 }
844290e5
PW
777
778 wr32(E1000_IAM, 0);
9d5c8243
AK
779 wr32(E1000_IMC, ~0);
780 wrfl();
781 synchronize_irq(adapter->pdev->irq);
782}
783
784/**
785 * igb_irq_enable - Enable default interrupt generation settings
786 * @adapter: board private structure
787 **/
788static void igb_irq_enable(struct igb_adapter *adapter)
789{
790 struct e1000_hw *hw = &adapter->hw;
791
792 if (adapter->msix_entries) {
844290e5
PW
793 wr32(E1000_EIAC, adapter->eims_enable_mask);
794 wr32(E1000_EIAM, adapter->eims_enable_mask);
795 wr32(E1000_EIMS, adapter->eims_enable_mask);
4ae196df
AD
796 if (adapter->vfs_allocated_count)
797 wr32(E1000_MBVFIMR, 0xFF);
798 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
799 E1000_IMS_DOUTSYNC));
844290e5
PW
800 } else {
801 wr32(E1000_IMS, IMS_ENABLE_MASK);
802 wr32(E1000_IAM, IMS_ENABLE_MASK);
803 }
9d5c8243
AK
804}
805
806static void igb_update_mng_vlan(struct igb_adapter *adapter)
807{
808 struct net_device *netdev = adapter->netdev;
809 u16 vid = adapter->hw.mng_cookie.vlan_id;
810 u16 old_vid = adapter->mng_vlan_id;
811 if (adapter->vlgrp) {
812 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
813 if (adapter->hw.mng_cookie.status &
814 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
815 igb_vlan_rx_add_vid(netdev, vid);
816 adapter->mng_vlan_id = vid;
817 } else
818 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
819
820 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
821 (vid != old_vid) &&
822 !vlan_group_get_device(adapter->vlgrp, old_vid))
823 igb_vlan_rx_kill_vid(netdev, old_vid);
824 } else
825 adapter->mng_vlan_id = vid;
826 }
827}
828
829/**
830 * igb_release_hw_control - release control of the h/w to f/w
831 * @adapter: address of board private structure
832 *
833 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
834 * For ASF and Pass Through versions of f/w this means that the
835 * driver is no longer loaded.
836 *
837 **/
838static void igb_release_hw_control(struct igb_adapter *adapter)
839{
840 struct e1000_hw *hw = &adapter->hw;
841 u32 ctrl_ext;
842
843 /* Let firmware take over control of h/w */
844 ctrl_ext = rd32(E1000_CTRL_EXT);
845 wr32(E1000_CTRL_EXT,
846 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
847}
848
849
850/**
851 * igb_get_hw_control - get control of the h/w from f/w
852 * @adapter: address of board private structure
853 *
854 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
855 * For ASF and Pass Through versions of f/w this means that
856 * the driver is loaded.
857 *
858 **/
859static void igb_get_hw_control(struct igb_adapter *adapter)
860{
861 struct e1000_hw *hw = &adapter->hw;
862 u32 ctrl_ext;
863
864 /* Let firmware know the driver has taken over */
865 ctrl_ext = rd32(E1000_CTRL_EXT);
866 wr32(E1000_CTRL_EXT,
867 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
868}
869
9d5c8243
AK
870/**
871 * igb_configure - configure the hardware for RX and TX
872 * @adapter: private board structure
873 **/
874static void igb_configure(struct igb_adapter *adapter)
875{
876 struct net_device *netdev = adapter->netdev;
877 int i;
878
879 igb_get_hw_control(adapter);
880 igb_set_multi(netdev);
881
882 igb_restore_vlan(adapter);
9d5c8243
AK
883
884 igb_configure_tx(adapter);
885 igb_setup_rctl(adapter);
886 igb_configure_rx(adapter);
662d7205
AD
887
888 igb_rx_fifo_flush_82575(&adapter->hw);
889
c493ea45 890 /* call igb_desc_unused which always leaves
9d5c8243
AK
891 * at least 1 descriptor unused to make sure
892 * next_to_use != next_to_clean */
893 for (i = 0; i < adapter->num_rx_queues; i++) {
894 struct igb_ring *ring = &adapter->rx_ring[i];
c493ea45 895 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
9d5c8243
AK
896 }
897
898
899 adapter->tx_queue_len = netdev->tx_queue_len;
900}
901
902
903/**
904 * igb_up - Open the interface and prepare it to handle traffic
905 * @adapter: board private structure
906 **/
907
908int igb_up(struct igb_adapter *adapter)
909{
910 struct e1000_hw *hw = &adapter->hw;
911 int i;
912
913 /* hardware has been reset, we need to reload some things */
914 igb_configure(adapter);
915
916 clear_bit(__IGB_DOWN, &adapter->state);
917
844290e5
PW
918 for (i = 0; i < adapter->num_rx_queues; i++)
919 napi_enable(&adapter->rx_ring[i].napi);
920 if (adapter->msix_entries)
9d5c8243 921 igb_configure_msix(adapter);
9d5c8243 922
4ae196df 923 igb_vmm_control(adapter);
e1739522
AD
924 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
925 igb_set_vmolr(hw, adapter->vfs_allocated_count);
926
9d5c8243
AK
927 /* Clear any pending interrupts. */
928 rd32(E1000_ICR);
929 igb_irq_enable(adapter);
930
931 /* Fire a link change interrupt to start the watchdog. */
932 wr32(E1000_ICS, E1000_ICS_LSC);
933 return 0;
934}
935
936void igb_down(struct igb_adapter *adapter)
937{
938 struct e1000_hw *hw = &adapter->hw;
939 struct net_device *netdev = adapter->netdev;
940 u32 tctl, rctl;
941 int i;
942
943 /* signal that we're down so the interrupt handler does not
944 * reschedule our watchdog timer */
945 set_bit(__IGB_DOWN, &adapter->state);
946
947 /* disable receives in the hardware */
948 rctl = rd32(E1000_RCTL);
949 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
950 /* flush and sleep below */
951
fd2ea0a7 952 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
953
954 /* disable transmits in the hardware */
955 tctl = rd32(E1000_TCTL);
956 tctl &= ~E1000_TCTL_EN;
957 wr32(E1000_TCTL, tctl);
958 /* flush both disables and wait for them to finish */
959 wrfl();
960 msleep(10);
961
844290e5
PW
962 for (i = 0; i < adapter->num_rx_queues; i++)
963 napi_disable(&adapter->rx_ring[i].napi);
9d5c8243 964
9d5c8243
AK
965 igb_irq_disable(adapter);
966
967 del_timer_sync(&adapter->watchdog_timer);
968 del_timer_sync(&adapter->phy_info_timer);
969
970 netdev->tx_queue_len = adapter->tx_queue_len;
971 netif_carrier_off(netdev);
04fe6358
AD
972
973 /* record the stats before reset*/
974 igb_update_stats(adapter);
975
9d5c8243
AK
976 adapter->link_speed = 0;
977 adapter->link_duplex = 0;
978
3023682e
JK
979 if (!pci_channel_offline(adapter->pdev))
980 igb_reset(adapter);
9d5c8243
AK
981 igb_clean_all_tx_rings(adapter);
982 igb_clean_all_rx_rings(adapter);
983}
984
985void igb_reinit_locked(struct igb_adapter *adapter)
986{
987 WARN_ON(in_interrupt());
988 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
989 msleep(1);
990 igb_down(adapter);
991 igb_up(adapter);
992 clear_bit(__IGB_RESETTING, &adapter->state);
993}
994
995void igb_reset(struct igb_adapter *adapter)
996{
997 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
998 struct e1000_mac_info *mac = &hw->mac;
999 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1000 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1001 u16 hwm;
1002
1003 /* Repartition Pba for greater than 9k mtu
1004 * To take effect CTRL.RST is required.
1005 */
fa4dfae0
AD
1006 switch (mac->type) {
1007 case e1000_82576:
2d064c06 1008 pba = E1000_PBA_64K;
fa4dfae0
AD
1009 break;
1010 case e1000_82575:
1011 default:
1012 pba = E1000_PBA_34K;
1013 break;
2d064c06 1014 }
9d5c8243 1015
2d064c06
AD
1016 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1017 (mac->type < e1000_82576)) {
9d5c8243
AK
1018 /* adjust PBA for jumbo frames */
1019 wr32(E1000_PBA, pba);
1020
1021 /* To maintain wire speed transmits, the Tx FIFO should be
1022 * large enough to accommodate two full transmit packets,
1023 * rounded up to the next 1KB and expressed in KB. Likewise,
1024 * the Rx FIFO should be large enough to accommodate at least
1025 * one full receive packet and is similarly rounded up and
1026 * expressed in KB. */
1027 pba = rd32(E1000_PBA);
1028 /* upper 16 bits has Tx packet buffer allocation size in KB */
1029 tx_space = pba >> 16;
1030 /* lower 16 bits has Rx packet buffer allocation size in KB */
1031 pba &= 0xffff;
1032 /* the tx fifo also stores 16 bytes of information about the tx
1033 * but don't include ethernet FCS because hardware appends it */
1034 min_tx_space = (adapter->max_frame_size +
85e8d004 1035 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1036 ETH_FCS_LEN) * 2;
1037 min_tx_space = ALIGN(min_tx_space, 1024);
1038 min_tx_space >>= 10;
1039 /* software strips receive CRC, so leave room for it */
1040 min_rx_space = adapter->max_frame_size;
1041 min_rx_space = ALIGN(min_rx_space, 1024);
1042 min_rx_space >>= 10;
1043
1044 /* If current Tx allocation is less than the min Tx FIFO size,
1045 * and the min Tx FIFO size is less than the current Rx FIFO
1046 * allocation, take space away from current Rx allocation */
1047 if (tx_space < min_tx_space &&
1048 ((min_tx_space - tx_space) < pba)) {
1049 pba = pba - (min_tx_space - tx_space);
1050
1051 /* if short on rx space, rx wins and must trump tx
1052 * adjustment */
1053 if (pba < min_rx_space)
1054 pba = min_rx_space;
1055 }
2d064c06 1056 wr32(E1000_PBA, pba);
9d5c8243 1057 }
9d5c8243
AK
1058
1059 /* flow control settings */
1060 /* The high water mark must be low enough to fit one full frame
1061 * (or the size used for early receive) above it in the Rx FIFO.
1062 * Set it to the lower of:
1063 * - 90% of the Rx FIFO size, or
1064 * - the full Rx FIFO size minus one full frame */
1065 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1066 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1067
2d064c06
AD
1068 if (mac->type < e1000_82576) {
1069 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1070 fc->low_water = fc->high_water - 8;
1071 } else {
1072 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1073 fc->low_water = fc->high_water - 16;
1074 }
9d5c8243
AK
1075 fc->pause_time = 0xFFFF;
1076 fc->send_xon = 1;
1077 fc->type = fc->original_type;
1078
4ae196df
AD
1079 /* disable receive for all VFs and wait one second */
1080 if (adapter->vfs_allocated_count) {
1081 int i;
1082 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1083 adapter->vf_data[i].clear_to_send = false;
1084
1085 /* ping all the active vfs to let them know we are going down */
1086 igb_ping_all_vfs(adapter);
1087
1088 /* disable transmits and receives */
1089 wr32(E1000_VFRE, 0);
1090 wr32(E1000_VFTE, 0);
1091 }
1092
9d5c8243
AK
1093 /* Allow time for pending master requests to run */
1094 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1095 wr32(E1000_WUC, 0);
1096
1097 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1098 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1099
1100 igb_update_mng_vlan(adapter);
1101
1102 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1103 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1104
1105 igb_reset_adaptive(&adapter->hw);
f5f4cf08 1106 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
1107}
1108
2e5c6922
SH
1109static const struct net_device_ops igb_netdev_ops = {
1110 .ndo_open = igb_open,
1111 .ndo_stop = igb_close,
00829823 1112 .ndo_start_xmit = igb_xmit_frame_adv,
2e5c6922
SH
1113 .ndo_get_stats = igb_get_stats,
1114 .ndo_set_multicast_list = igb_set_multi,
1115 .ndo_set_mac_address = igb_set_mac,
1116 .ndo_change_mtu = igb_change_mtu,
1117 .ndo_do_ioctl = igb_ioctl,
1118 .ndo_tx_timeout = igb_tx_timeout,
1119 .ndo_validate_addr = eth_validate_addr,
1120 .ndo_vlan_rx_register = igb_vlan_rx_register,
1121 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1122 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1123#ifdef CONFIG_NET_POLL_CONTROLLER
1124 .ndo_poll_controller = igb_netpoll,
1125#endif
1126};
1127
9d5c8243
AK
1128/**
1129 * igb_probe - Device Initialization Routine
1130 * @pdev: PCI device information struct
1131 * @ent: entry in igb_pci_tbl
1132 *
1133 * Returns 0 on success, negative on failure
1134 *
1135 * igb_probe initializes an adapter identified by a pci_dev structure.
1136 * The OS initialization, configuring of the adapter private structure,
1137 * and a hardware reset occur.
1138 **/
1139static int __devinit igb_probe(struct pci_dev *pdev,
1140 const struct pci_device_id *ent)
1141{
1142 struct net_device *netdev;
1143 struct igb_adapter *adapter;
1144 struct e1000_hw *hw;
1145 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1146 unsigned long mmio_start, mmio_len;
2d6a5e95 1147 int err, pci_using_dac;
682337fe 1148 u16 eeprom_data = 0;
9d5c8243
AK
1149 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1150 u32 part_num;
1151
aed5dec3 1152 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1153 if (err)
1154 return err;
1155
1156 pci_using_dac = 0;
6a35528a 1157 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243 1158 if (!err) {
6a35528a 1159 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243
AK
1160 if (!err)
1161 pci_using_dac = 1;
1162 } else {
284901a9 1163 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243 1164 if (err) {
284901a9 1165 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243
AK
1166 if (err) {
1167 dev_err(&pdev->dev, "No usable DMA "
1168 "configuration, aborting\n");
1169 goto err_dma;
1170 }
1171 }
1172 }
1173
aed5dec3
AD
1174 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1175 IORESOURCE_MEM),
1176 igb_driver_name);
9d5c8243
AK
1177 if (err)
1178 goto err_pci_reg;
1179
ea943d41
JK
1180 err = pci_enable_pcie_error_reporting(pdev);
1181 if (err) {
1182 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1183 "0x%x\n", err);
1184 /* non-fatal, continue */
1185 }
40a914fa 1186
9d5c8243 1187 pci_set_master(pdev);
c682fc23 1188 pci_save_state(pdev);
9d5c8243
AK
1189
1190 err = -ENOMEM;
1bfaf07b
AD
1191 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1192 IGB_ABS_MAX_TX_QUEUES);
9d5c8243
AK
1193 if (!netdev)
1194 goto err_alloc_etherdev;
1195
1196 SET_NETDEV_DEV(netdev, &pdev->dev);
1197
1198 pci_set_drvdata(pdev, netdev);
1199 adapter = netdev_priv(netdev);
1200 adapter->netdev = netdev;
1201 adapter->pdev = pdev;
1202 hw = &adapter->hw;
1203 hw->back = adapter;
1204 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1205
1206 mmio_start = pci_resource_start(pdev, 0);
1207 mmio_len = pci_resource_len(pdev, 0);
1208
1209 err = -EIO;
28b0759c
AD
1210 hw->hw_addr = ioremap(mmio_start, mmio_len);
1211 if (!hw->hw_addr)
9d5c8243
AK
1212 goto err_ioremap;
1213
2e5c6922 1214 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1215 igb_set_ethtool_ops(netdev);
9d5c8243 1216 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1217
1218 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1219
1220 netdev->mem_start = mmio_start;
1221 netdev->mem_end = mmio_start + mmio_len;
1222
9d5c8243
AK
1223 /* PCI config space info */
1224 hw->vendor_id = pdev->vendor;
1225 hw->device_id = pdev->device;
1226 hw->revision_id = pdev->revision;
1227 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1228 hw->subsystem_device_id = pdev->subsystem_device;
1229
1230 /* setup the private structure */
1231 hw->back = adapter;
1232 /* Copy the default MAC, PHY and NVM function pointers */
1233 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1234 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1235 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1236 /* Initialize skew-specific constants */
1237 err = ei->get_invariants(hw);
1238 if (err)
450c87c8 1239 goto err_sw_init;
9d5c8243 1240
450c87c8 1241 /* setup the private structure */
9d5c8243
AK
1242 err = igb_sw_init(adapter);
1243 if (err)
1244 goto err_sw_init;
1245
1246 igb_get_bus_info_pcie(hw);
1247
7dfc16fa
AD
1248 /* set flags */
1249 switch (hw->mac.type) {
7dfc16fa 1250 case e1000_82575:
7dfc16fa
AD
1251 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1252 break;
bbd98fe4 1253 case e1000_82576:
7dfc16fa
AD
1254 default:
1255 break;
1256 }
1257
9d5c8243
AK
1258 hw->phy.autoneg_wait_to_complete = false;
1259 hw->mac.adaptive_ifs = true;
1260
1261 /* Copper options */
1262 if (hw->phy.media_type == e1000_media_type_copper) {
1263 hw->phy.mdix = AUTO_ALL_MODES;
1264 hw->phy.disable_polarity_correction = false;
1265 hw->phy.ms_type = e1000_ms_hw_default;
1266 }
1267
1268 if (igb_check_reset_block(hw))
1269 dev_info(&pdev->dev,
1270 "PHY reset is blocked due to SOL/IDER session.\n");
1271
1272 netdev->features = NETIF_F_SG |
7d8eb29e 1273 NETIF_F_IP_CSUM |
9d5c8243
AK
1274 NETIF_F_HW_VLAN_TX |
1275 NETIF_F_HW_VLAN_RX |
1276 NETIF_F_HW_VLAN_FILTER;
1277
7d8eb29e 1278 netdev->features |= NETIF_F_IPV6_CSUM;
9d5c8243 1279 netdev->features |= NETIF_F_TSO;
9d5c8243 1280 netdev->features |= NETIF_F_TSO6;
48f29ffc 1281
5c0999b7 1282 netdev->features |= NETIF_F_GRO;
d3352520 1283
48f29ffc
JK
1284 netdev->vlan_features |= NETIF_F_TSO;
1285 netdev->vlan_features |= NETIF_F_TSO6;
7d8eb29e 1286 netdev->vlan_features |= NETIF_F_IP_CSUM;
48f29ffc
JK
1287 netdev->vlan_features |= NETIF_F_SG;
1288
9d5c8243
AK
1289 if (pci_using_dac)
1290 netdev->features |= NETIF_F_HIGHDMA;
1291
9d5c8243
AK
1292 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1293
1294 /* before reading the NVM, reset the controller to put the device in a
1295 * known good starting state */
1296 hw->mac.ops.reset_hw(hw);
1297
1298 /* make sure the NVM is good */
1299 if (igb_validate_nvm_checksum(hw) < 0) {
1300 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1301 err = -EIO;
1302 goto err_eeprom;
1303 }
1304
1305 /* copy the MAC address out of the NVM */
1306 if (hw->mac.ops.read_mac_addr(hw))
1307 dev_err(&pdev->dev, "NVM Read Error\n");
1308
1309 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1310 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1311
1312 if (!is_valid_ether_addr(netdev->perm_addr)) {
1313 dev_err(&pdev->dev, "Invalid MAC Address\n");
1314 err = -EIO;
1315 goto err_eeprom;
1316 }
1317
0e340485
AD
1318 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1319 (unsigned long) adapter);
1320 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1321 (unsigned long) adapter);
9d5c8243
AK
1322
1323 INIT_WORK(&adapter->reset_task, igb_reset_task);
1324 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1325
450c87c8 1326 /* Initialize link properties that are user-changeable */
9d5c8243
AK
1327 adapter->fc_autoneg = true;
1328 hw->mac.autoneg = true;
1329 hw->phy.autoneg_advertised = 0x2f;
1330
1331 hw->fc.original_type = e1000_fc_default;
1332 hw->fc.type = e1000_fc_default;
1333
cbd347ad 1334 adapter->itr_setting = IGB_DEFAULT_ITR;
9d5c8243
AK
1335 adapter->itr = IGB_START_ITR;
1336
1337 igb_validate_mdi_setting(hw);
1338
1339 adapter->rx_csum = 1;
1340
1341 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1342 * enable the ACPI Magic Packet filter
1343 */
1344
a2cf8b6c 1345 if (hw->bus.func == 0)
312c75ae 1346 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
a2cf8b6c
AD
1347 else if (hw->bus.func == 1)
1348 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
1349
1350 if (eeprom_data & eeprom_apme_mask)
1351 adapter->eeprom_wol |= E1000_WUFC_MAG;
1352
1353 /* now that we have the eeprom settings, apply the special cases where
1354 * the eeprom may be wrong or the board simply won't support wake on
1355 * lan on a particular port */
1356 switch (pdev->device) {
1357 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1358 adapter->eeprom_wol = 0;
1359 break;
1360 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
1361 case E1000_DEV_ID_82576_FIBER:
1362 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
1363 /* Wake events only supported on port A for dual fiber
1364 * regardless of eeprom setting */
1365 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1366 adapter->eeprom_wol = 0;
1367 break;
c8ea5ea9
AD
1368 case E1000_DEV_ID_82576_QUAD_COPPER:
1369 /* if quad port adapter, disable WoL on all but port A */
1370 if (global_quad_port_a != 0)
1371 adapter->eeprom_wol = 0;
1372 else
1373 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1374 /* Reset for multiple quad port adapters */
1375 if (++global_quad_port_a == 4)
1376 global_quad_port_a = 0;
1377 break;
9d5c8243
AK
1378 }
1379
1380 /* initialize the wol settings based on the eeprom settings */
1381 adapter->wol = adapter->eeprom_wol;
e1b86d84 1382 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
1383
1384 /* reset the hardware with the new settings */
1385 igb_reset(adapter);
1386
1387 /* let the f/w know that the h/w is now under the control of the
1388 * driver. */
1389 igb_get_hw_control(adapter);
1390
1391 /* tell the stack to leave us alone until igb_open() is called */
1392 netif_carrier_off(netdev);
fd2ea0a7 1393 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1394
1395 strcpy(netdev->name, "eth%d");
1396 err = register_netdev(netdev);
1397 if (err)
1398 goto err_register;
1399
37680117
AD
1400#ifdef CONFIG_PCI_IOV
1401 /* since iov functionality isn't critical to base device function we
1402 * can accept failure. If it fails we don't allow iov to be enabled */
1403 if (hw->mac.type == e1000_82576) {
1404 err = pci_enable_sriov(pdev, 0);
1405 if (!err)
1406 err = device_create_file(&netdev->dev,
1407 &dev_attr_num_vfs);
1408 if (err)
1409 dev_err(&pdev->dev, "Failed to initialize IOV\n");
1410 }
1411
1412#endif
421e02f0 1413#ifdef CONFIG_IGB_DCA
bbd98fe4 1414 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 1415 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
1416 dev_info(&pdev->dev, "DCA enabled\n");
1417 /* Always use CB2 mode, difference is masked
1418 * in the CB driver. */
cbd347ad 1419 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
fe4506b6
JC
1420 igb_setup_dca(adapter);
1421 }
1422#endif
1423
38c845c7
PO
1424 /*
1425 * Initialize hardware timer: we keep it running just in case
1426 * that some program needs it later on.
1427 */
1428 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1429 adapter->cycles.read = igb_read_clock;
1430 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1431 adapter->cycles.mult = 1;
1432 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1433 wr32(E1000_TIMINCA,
1434 (1<<24) |
1435 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1436#if 0
1437 /*
1438 * Avoid rollover while we initialize by resetting the time counter.
1439 */
1440 wr32(E1000_SYSTIML, 0x00000000);
1441 wr32(E1000_SYSTIMH, 0x00000000);
1442#else
1443 /*
1444 * Set registers so that rollover occurs soon to test this.
1445 */
1446 wr32(E1000_SYSTIML, 0x00000000);
1447 wr32(E1000_SYSTIMH, 0xFF800000);
1448#endif
1449 wrfl();
1450 timecounter_init(&adapter->clock,
1451 &adapter->cycles,
1452 ktime_to_ns(ktime_get_real()));
1453
33af6bcc
PO
1454 /*
1455 * Synchronize our NIC clock against system wall clock. NIC
1456 * time stamp reading requires ~3us per sample, each sample
1457 * was pretty stable even under load => only require 10
1458 * samples for each offset comparison.
1459 */
1460 memset(&adapter->compare, 0, sizeof(adapter->compare));
1461 adapter->compare.source = &adapter->clock;
1462 adapter->compare.target = ktime_get_real;
1463 adapter->compare.num_samples = 10;
1464 timecompare_update(&adapter->compare, 0);
1465
38c845c7
PO
1466#ifdef DEBUG
1467 {
1468 char buffer[160];
1469 printk(KERN_DEBUG
1470 "igb: %s: hw %p initialized timer\n",
1471 igb_get_time_str(adapter, buffer),
1472 &adapter->hw);
1473 }
1474#endif
1475
9d5c8243
AK
1476 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1477 /* print bus type/speed/width info */
7c510e4b 1478 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243
AK
1479 netdev->name,
1480 ((hw->bus.speed == e1000_bus_speed_2500)
1481 ? "2.5Gb/s" : "unknown"),
59c3de89
AD
1482 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1483 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1484 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1485 "unknown"),
7c510e4b 1486 netdev->dev_addr);
9d5c8243
AK
1487
1488 igb_read_part_num(hw, &part_num);
1489 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1490 (part_num >> 8), (part_num & 0xff));
1491
1492 dev_info(&pdev->dev,
1493 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1494 adapter->msix_entries ? "MSI-X" :
7dfc16fa 1495 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243
AK
1496 adapter->num_rx_queues, adapter->num_tx_queues);
1497
9d5c8243
AK
1498 return 0;
1499
1500err_register:
1501 igb_release_hw_control(adapter);
1502err_eeprom:
1503 if (!igb_check_reset_block(hw))
f5f4cf08 1504 igb_reset_phy(hw);
9d5c8243
AK
1505
1506 if (hw->flash_address)
1507 iounmap(hw->flash_address);
1508
a88f10ec 1509 igb_free_queues(adapter);
9d5c8243 1510err_sw_init:
9d5c8243
AK
1511 iounmap(hw->hw_addr);
1512err_ioremap:
1513 free_netdev(netdev);
1514err_alloc_etherdev:
aed5dec3
AD
1515 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1516 IORESOURCE_MEM));
9d5c8243
AK
1517err_pci_reg:
1518err_dma:
1519 pci_disable_device(pdev);
1520 return err;
1521}
1522
1523/**
1524 * igb_remove - Device Removal Routine
1525 * @pdev: PCI device information struct
1526 *
1527 * igb_remove is called by the PCI subsystem to alert the driver
1528 * that it should release a PCI device. The could be caused by a
1529 * Hot-Plug event, or because the driver is going to be removed from
1530 * memory.
1531 **/
1532static void __devexit igb_remove(struct pci_dev *pdev)
1533{
1534 struct net_device *netdev = pci_get_drvdata(pdev);
1535 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 1536 struct e1000_hw *hw = &adapter->hw;
ea943d41 1537 int err;
9d5c8243
AK
1538
1539 /* flush_scheduled work may reschedule our watchdog task, so
1540 * explicitly disable watchdog tasks from being rescheduled */
1541 set_bit(__IGB_DOWN, &adapter->state);
1542 del_timer_sync(&adapter->watchdog_timer);
1543 del_timer_sync(&adapter->phy_info_timer);
1544
1545 flush_scheduled_work();
1546
421e02f0 1547#ifdef CONFIG_IGB_DCA
7dfc16fa 1548 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
1549 dev_info(&pdev->dev, "DCA disabled\n");
1550 dca_remove_requester(&pdev->dev);
7dfc16fa 1551 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 1552 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
1553 }
1554#endif
1555
9d5c8243
AK
1556 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1557 * would have already happened in close and is redundant. */
1558 igb_release_hw_control(adapter);
1559
1560 unregister_netdev(netdev);
1561
f5f4cf08
AD
1562 if (!igb_check_reset_block(&adapter->hw))
1563 igb_reset_phy(&adapter->hw);
9d5c8243 1564
9d5c8243
AK
1565 igb_reset_interrupt_capability(adapter);
1566
a88f10ec 1567 igb_free_queues(adapter);
9d5c8243 1568
37680117
AD
1569#ifdef CONFIG_PCI_IOV
1570 /* reclaim resources allocated to VFs */
1571 if (adapter->vf_data) {
1572 /* disable iov and allow time for transactions to clear */
1573 pci_disable_sriov(pdev);
1574 msleep(500);
1575
1576 kfree(adapter->vf_data);
1577 adapter->vf_data = NULL;
1578 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1579 msleep(100);
1580 dev_info(&pdev->dev, "IOV Disabled\n");
1581 }
1582#endif
28b0759c
AD
1583 iounmap(hw->hw_addr);
1584 if (hw->flash_address)
1585 iounmap(hw->flash_address);
aed5dec3
AD
1586 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1587 IORESOURCE_MEM));
9d5c8243
AK
1588
1589 free_netdev(netdev);
1590
ea943d41
JK
1591 err = pci_disable_pcie_error_reporting(pdev);
1592 if (err)
1593 dev_err(&pdev->dev,
1594 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
40a914fa 1595
9d5c8243
AK
1596 pci_disable_device(pdev);
1597}
1598
1599/**
1600 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1601 * @adapter: board private structure to initialize
1602 *
1603 * igb_sw_init initializes the Adapter private data structure.
1604 * Fields are initialized based on PCI device information and
1605 * OS network device settings (MTU size).
1606 **/
1607static int __devinit igb_sw_init(struct igb_adapter *adapter)
1608{
1609 struct e1000_hw *hw = &adapter->hw;
1610 struct net_device *netdev = adapter->netdev;
1611 struct pci_dev *pdev = adapter->pdev;
1612
1613 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1614
68fd9910
AD
1615 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1616 adapter->rx_ring_count = IGB_DEFAULT_RXD;
9d5c8243
AK
1617 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1618 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1619 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1620 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1621
661086df
PWJ
1622 /* This call may decrease the number of queues depending on
1623 * interrupt mode. */
9d5c8243
AK
1624 igb_set_interrupt_capability(adapter);
1625
1626 if (igb_alloc_queues(adapter)) {
1627 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1628 return -ENOMEM;
1629 }
1630
1631 /* Explicitly disable IRQ since the NIC can be in any state. */
1632 igb_irq_disable(adapter);
1633
1634 set_bit(__IGB_DOWN, &adapter->state);
1635 return 0;
1636}
1637
1638/**
1639 * igb_open - Called when a network interface is made active
1640 * @netdev: network interface device structure
1641 *
1642 * Returns 0 on success, negative value on failure
1643 *
1644 * The open entry point is called when a network interface is made
1645 * active by the system (IFF_UP). At this point all resources needed
1646 * for transmit and receive operations are allocated, the interrupt
1647 * handler is registered with the OS, the watchdog timer is started,
1648 * and the stack is notified that the interface is ready.
1649 **/
1650static int igb_open(struct net_device *netdev)
1651{
1652 struct igb_adapter *adapter = netdev_priv(netdev);
1653 struct e1000_hw *hw = &adapter->hw;
1654 int err;
1655 int i;
1656
1657 /* disallow open during test */
1658 if (test_bit(__IGB_TESTING, &adapter->state))
1659 return -EBUSY;
1660
1661 /* allocate transmit descriptors */
1662 err = igb_setup_all_tx_resources(adapter);
1663 if (err)
1664 goto err_setup_tx;
1665
1666 /* allocate receive descriptors */
1667 err = igb_setup_all_rx_resources(adapter);
1668 if (err)
1669 goto err_setup_rx;
1670
1671 /* e1000_power_up_phy(adapter); */
1672
1673 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1674 if ((adapter->hw.mng_cookie.status &
1675 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1676 igb_update_mng_vlan(adapter);
1677
1678 /* before we allocate an interrupt, we must be ready to handle it.
1679 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1680 * as soon as we call pci_request_irq, so we have to setup our
1681 * clean_rx handler before we do so. */
1682 igb_configure(adapter);
1683
4ae196df 1684 igb_vmm_control(adapter);
e1739522
AD
1685 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1686 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1687
9d5c8243
AK
1688 err = igb_request_irq(adapter);
1689 if (err)
1690 goto err_req_irq;
1691
1692 /* From here on the code is the same as igb_up() */
1693 clear_bit(__IGB_DOWN, &adapter->state);
1694
844290e5
PW
1695 for (i = 0; i < adapter->num_rx_queues; i++)
1696 napi_enable(&adapter->rx_ring[i].napi);
9d5c8243
AK
1697
1698 /* Clear any pending interrupts. */
1699 rd32(E1000_ICR);
844290e5
PW
1700
1701 igb_irq_enable(adapter);
1702
d55b53ff
JK
1703 netif_tx_start_all_queues(netdev);
1704
9d5c8243
AK
1705 /* Fire a link status change interrupt to start the watchdog. */
1706 wr32(E1000_ICS, E1000_ICS_LSC);
1707
1708 return 0;
1709
1710err_req_irq:
1711 igb_release_hw_control(adapter);
1712 /* e1000_power_down_phy(adapter); */
1713 igb_free_all_rx_resources(adapter);
1714err_setup_rx:
1715 igb_free_all_tx_resources(adapter);
1716err_setup_tx:
1717 igb_reset(adapter);
1718
1719 return err;
1720}
1721
1722/**
1723 * igb_close - Disables a network interface
1724 * @netdev: network interface device structure
1725 *
1726 * Returns 0, this is not allowed to fail
1727 *
1728 * The close entry point is called when an interface is de-activated
1729 * by the OS. The hardware is still under the driver's control, but
1730 * needs to be disabled. A global MAC reset is issued to stop the
1731 * hardware, and all transmit and receive resources are freed.
1732 **/
1733static int igb_close(struct net_device *netdev)
1734{
1735 struct igb_adapter *adapter = netdev_priv(netdev);
1736
1737 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1738 igb_down(adapter);
1739
1740 igb_free_irq(adapter);
1741
1742 igb_free_all_tx_resources(adapter);
1743 igb_free_all_rx_resources(adapter);
1744
1745 /* kill manageability vlan ID if supported, but not if a vlan with
1746 * the same ID is registered on the host OS (let 8021q kill it) */
1747 if ((adapter->hw.mng_cookie.status &
1748 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1749 !(adapter->vlgrp &&
1750 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1751 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1752
1753 return 0;
1754}
1755
1756/**
1757 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1758 * @adapter: board private structure
1759 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1760 *
1761 * Return 0 on success, negative on failure
1762 **/
9d5c8243
AK
1763int igb_setup_tx_resources(struct igb_adapter *adapter,
1764 struct igb_ring *tx_ring)
1765{
1766 struct pci_dev *pdev = adapter->pdev;
1767 int size;
1768
1769 size = sizeof(struct igb_buffer) * tx_ring->count;
1770 tx_ring->buffer_info = vmalloc(size);
1771 if (!tx_ring->buffer_info)
1772 goto err;
1773 memset(tx_ring->buffer_info, 0, size);
1774
1775 /* round up to nearest 4K */
85e8d004 1776 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
1777 tx_ring->size = ALIGN(tx_ring->size, 4096);
1778
1779 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1780 &tx_ring->dma);
1781
1782 if (!tx_ring->desc)
1783 goto err;
1784
1785 tx_ring->adapter = adapter;
1786 tx_ring->next_to_use = 0;
1787 tx_ring->next_to_clean = 0;
9d5c8243
AK
1788 return 0;
1789
1790err:
1791 vfree(tx_ring->buffer_info);
1792 dev_err(&adapter->pdev->dev,
1793 "Unable to allocate memory for the transmit descriptor ring\n");
1794 return -ENOMEM;
1795}
1796
1797/**
1798 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1799 * (Descriptors) for all queues
1800 * @adapter: board private structure
1801 *
1802 * Return 0 on success, negative on failure
1803 **/
1804static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1805{
1806 int i, err = 0;
661086df 1807 int r_idx;
9d5c8243
AK
1808
1809 for (i = 0; i < adapter->num_tx_queues; i++) {
1810 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1811 if (err) {
1812 dev_err(&adapter->pdev->dev,
1813 "Allocation for Tx Queue %u failed\n", i);
1814 for (i--; i >= 0; i--)
3b644cf6 1815 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
1816 break;
1817 }
1818 }
1819
661086df
PWJ
1820 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1821 r_idx = i % adapter->num_tx_queues;
1822 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
eebbbdba 1823 }
9d5c8243
AK
1824 return err;
1825}
1826
1827/**
1828 * igb_configure_tx - Configure transmit Unit after Reset
1829 * @adapter: board private structure
1830 *
1831 * Configure the Tx unit of the MAC after a reset.
1832 **/
1833static void igb_configure_tx(struct igb_adapter *adapter)
1834{
0e014cb1 1835 u64 tdba;
9d5c8243
AK
1836 struct e1000_hw *hw = &adapter->hw;
1837 u32 tctl;
1838 u32 txdctl, txctrl;
26bc19ec 1839 int i, j;
9d5c8243
AK
1840
1841 for (i = 0; i < adapter->num_tx_queues; i++) {
73cd78f1 1842 struct igb_ring *ring = &adapter->tx_ring[i];
26bc19ec
AD
1843 j = ring->reg_idx;
1844 wr32(E1000_TDLEN(j),
85e8d004 1845 ring->count * sizeof(union e1000_adv_tx_desc));
9d5c8243 1846 tdba = ring->dma;
26bc19ec 1847 wr32(E1000_TDBAL(j),
73cd78f1 1848 tdba & 0x00000000ffffffffULL);
26bc19ec 1849 wr32(E1000_TDBAH(j), tdba >> 32);
9d5c8243 1850
26bc19ec
AD
1851 ring->head = E1000_TDH(j);
1852 ring->tail = E1000_TDT(j);
9d5c8243
AK
1853 writel(0, hw->hw_addr + ring->tail);
1854 writel(0, hw->hw_addr + ring->head);
26bc19ec 1855 txdctl = rd32(E1000_TXDCTL(j));
9d5c8243 1856 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
26bc19ec 1857 wr32(E1000_TXDCTL(j), txdctl);
9d5c8243
AK
1858
1859 /* Turn off Relaxed Ordering on head write-backs. The
1860 * writebacks MUST be delivered in order or it will
1861 * completely screw up our bookeeping.
1862 */
26bc19ec 1863 txctrl = rd32(E1000_DCA_TXCTRL(j));
9d5c8243 1864 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
26bc19ec 1865 wr32(E1000_DCA_TXCTRL(j), txctrl);
9d5c8243
AK
1866 }
1867
e1739522
AD
1868 /* disable queue 0 to prevent tail bump w/o re-configuration */
1869 if (adapter->vfs_allocated_count)
1870 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
1871
1872 /* Program the Transmit Control Register */
9d5c8243
AK
1873 tctl = rd32(E1000_TCTL);
1874 tctl &= ~E1000_TCTL_CT;
1875 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1876 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1877
1878 igb_config_collision_dist(hw);
1879
1880 /* Setup Transmit Descriptor Settings for eop descriptor */
1881 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1882
1883 /* Enable transmits */
1884 tctl |= E1000_TCTL_EN;
1885
1886 wr32(E1000_TCTL, tctl);
1887}
1888
1889/**
1890 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1891 * @adapter: board private structure
1892 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1893 *
1894 * Returns 0 on success, negative on failure
1895 **/
9d5c8243
AK
1896int igb_setup_rx_resources(struct igb_adapter *adapter,
1897 struct igb_ring *rx_ring)
1898{
1899 struct pci_dev *pdev = adapter->pdev;
1900 int size, desc_len;
1901
1902 size = sizeof(struct igb_buffer) * rx_ring->count;
1903 rx_ring->buffer_info = vmalloc(size);
1904 if (!rx_ring->buffer_info)
1905 goto err;
1906 memset(rx_ring->buffer_info, 0, size);
1907
1908 desc_len = sizeof(union e1000_adv_rx_desc);
1909
1910 /* Round up to nearest 4K */
1911 rx_ring->size = rx_ring->count * desc_len;
1912 rx_ring->size = ALIGN(rx_ring->size, 4096);
1913
1914 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1915 &rx_ring->dma);
1916
1917 if (!rx_ring->desc)
1918 goto err;
1919
1920 rx_ring->next_to_clean = 0;
1921 rx_ring->next_to_use = 0;
9d5c8243
AK
1922
1923 rx_ring->adapter = adapter;
9d5c8243
AK
1924
1925 return 0;
1926
1927err:
1928 vfree(rx_ring->buffer_info);
1929 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1930 "the receive descriptor ring\n");
1931 return -ENOMEM;
1932}
1933
1934/**
1935 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1936 * (Descriptors) for all queues
1937 * @adapter: board private structure
1938 *
1939 * Return 0 on success, negative on failure
1940 **/
1941static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1942{
1943 int i, err = 0;
1944
1945 for (i = 0; i < adapter->num_rx_queues; i++) {
1946 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1947 if (err) {
1948 dev_err(&adapter->pdev->dev,
1949 "Allocation for Rx Queue %u failed\n", i);
1950 for (i--; i >= 0; i--)
3b644cf6 1951 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
1952 break;
1953 }
1954 }
1955
1956 return err;
1957}
1958
1959/**
1960 * igb_setup_rctl - configure the receive control registers
1961 * @adapter: Board private structure
1962 **/
1963static void igb_setup_rctl(struct igb_adapter *adapter)
1964{
1965 struct e1000_hw *hw = &adapter->hw;
1966 u32 rctl;
1967 u32 srrctl = 0;
26bc19ec 1968 int i, j;
9d5c8243
AK
1969
1970 rctl = rd32(E1000_RCTL);
1971
1972 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 1973 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 1974
69d728ba 1975 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 1976 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 1977
87cb7e8c
AK
1978 /*
1979 * enable stripping of CRC. It's unlikely this will break BMC
1980 * redirection as it did with e1000. Newer features require
1981 * that the HW strips the CRC.
73cd78f1 1982 */
87cb7e8c 1983 rctl |= E1000_RCTL_SECRC;
9d5c8243 1984
9b07f3d3 1985 /*
ec54d7d6 1986 * disable store bad packets and clear size bits.
9b07f3d3 1987 */
ec54d7d6 1988 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 1989
ec54d7d6 1990 /* enable LPE when to prevent packets larger than max_frame_size */
9b07f3d3 1991 rctl |= E1000_RCTL_LPE;
b4557be2
AD
1992
1993 /* Setup buffer sizes */
1994 switch (adapter->rx_buffer_len) {
1995 case IGB_RXBUFFER_256:
1996 rctl |= E1000_RCTL_SZ_256;
1997 break;
1998 case IGB_RXBUFFER_512:
1999 rctl |= E1000_RCTL_SZ_512;
2000 break;
2001 default:
2002 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2003 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2004 break;
9d5c8243
AK
2005 }
2006
2007 /* 82575 and greater support packet-split where the protocol
2008 * header is placed in skb->data and the packet data is
2009 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2010 * In the case of a non-split, skb->data is linearly filled,
2011 * followed by the page buffers. Therefore, skb->data is
2012 * sized to hold the largest protocol header.
2013 */
2014 /* allocations using alloc_page take too long for regular MTU
2015 * so only enable packet split for jumbo frames */
ec54d7d6 2016 if (adapter->netdev->mtu > ETH_DATA_LEN) {
9d5c8243 2017 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
bf36c1a0 2018 srrctl |= adapter->rx_ps_hdr_size <<
9d5c8243 2019 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
9d5c8243
AK
2020 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2021 } else {
2022 adapter->rx_ps_hdr_size = 0;
2023 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2024 }
2025
e1739522
AD
2026 /* Attention!!! For SR-IOV PF driver operations you must enable
2027 * queue drop for all VF and PF queues to prevent head of line blocking
2028 * if an un-trusted VF does not provide descriptors to hardware.
2029 */
2030 if (adapter->vfs_allocated_count) {
2031 u32 vmolr;
2032
2033 j = adapter->rx_ring[0].reg_idx;
2034
2035 /* set all queue drop enable bits */
2036 wr32(E1000_QDE, ALL_QUEUES);
2037 srrctl |= E1000_SRRCTL_DROP_EN;
2038
2039 /* disable queue 0 to prevent tail write w/o re-config */
2040 wr32(E1000_RXDCTL(0), 0);
2041
2042 vmolr = rd32(E1000_VMOLR(j));
2043 if (rctl & E1000_RCTL_LPE)
2044 vmolr |= E1000_VMOLR_LPE;
2045 if (adapter->num_rx_queues > 0)
2046 vmolr |= E1000_VMOLR_RSSE;
2047 wr32(E1000_VMOLR(j), vmolr);
2048 }
2049
26bc19ec
AD
2050 for (i = 0; i < adapter->num_rx_queues; i++) {
2051 j = adapter->rx_ring[i].reg_idx;
2052 wr32(E1000_SRRCTL(j), srrctl);
2053 }
9d5c8243
AK
2054
2055 wr32(E1000_RCTL, rctl);
2056}
2057
e1739522
AD
2058/**
2059 * igb_rlpml_set - set maximum receive packet size
2060 * @adapter: board private structure
2061 *
2062 * Configure maximum receivable packet size.
2063 **/
2064static void igb_rlpml_set(struct igb_adapter *adapter)
2065{
2066 u32 max_frame_size = adapter->max_frame_size;
2067 struct e1000_hw *hw = &adapter->hw;
2068 u16 pf_id = adapter->vfs_allocated_count;
2069
2070 if (adapter->vlgrp)
2071 max_frame_size += VLAN_TAG_SIZE;
2072
2073 /* if vfs are enabled we set RLPML to the largest possible request
2074 * size and set the VMOLR RLPML to the size we need */
2075 if (pf_id) {
2076 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2077 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2078 }
2079
2080 wr32(E1000_RLPML, max_frame_size);
2081}
2082
2083/**
2084 * igb_configure_vt_default_pool - Configure VT default pool
2085 * @adapter: board private structure
2086 *
2087 * Configure the default pool
2088 **/
2089static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2090{
2091 struct e1000_hw *hw = &adapter->hw;
2092 u16 pf_id = adapter->vfs_allocated_count;
2093 u32 vtctl;
2094
2095 /* not in sr-iov mode - do nothing */
2096 if (!pf_id)
2097 return;
2098
2099 vtctl = rd32(E1000_VT_CTL);
2100 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2101 E1000_VT_CTL_DISABLE_DEF_POOL);
2102 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2103 wr32(E1000_VT_CTL, vtctl);
2104}
2105
9d5c8243
AK
2106/**
2107 * igb_configure_rx - Configure receive Unit after Reset
2108 * @adapter: board private structure
2109 *
2110 * Configure the Rx unit of the MAC after a reset.
2111 **/
2112static void igb_configure_rx(struct igb_adapter *adapter)
2113{
2114 u64 rdba;
2115 struct e1000_hw *hw = &adapter->hw;
2116 u32 rctl, rxcsum;
2117 u32 rxdctl;
9107584e 2118 int i;
9d5c8243
AK
2119
2120 /* disable receives while setting up the descriptors */
2121 rctl = rd32(E1000_RCTL);
2122 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2123 wrfl();
2124 mdelay(10);
2125
2126 if (adapter->itr_setting > 3)
6eb5a7f1 2127 wr32(E1000_ITR, adapter->itr);
9d5c8243
AK
2128
2129 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2130 * the Base and Length of the Rx Descriptor Ring */
2131 for (i = 0; i < adapter->num_rx_queues; i++) {
73cd78f1 2132 struct igb_ring *ring = &adapter->rx_ring[i];
9107584e 2133 int j = ring->reg_idx;
9d5c8243 2134 rdba = ring->dma;
26bc19ec 2135 wr32(E1000_RDBAL(j),
73cd78f1 2136 rdba & 0x00000000ffffffffULL);
26bc19ec
AD
2137 wr32(E1000_RDBAH(j), rdba >> 32);
2138 wr32(E1000_RDLEN(j),
73cd78f1 2139 ring->count * sizeof(union e1000_adv_rx_desc));
9d5c8243 2140
26bc19ec
AD
2141 ring->head = E1000_RDH(j);
2142 ring->tail = E1000_RDT(j);
9d5c8243
AK
2143 writel(0, hw->hw_addr + ring->tail);
2144 writel(0, hw->hw_addr + ring->head);
2145
26bc19ec 2146 rxdctl = rd32(E1000_RXDCTL(j));
9d5c8243
AK
2147 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2148 rxdctl &= 0xFFF00000;
2149 rxdctl |= IGB_RX_PTHRESH;
2150 rxdctl |= IGB_RX_HTHRESH << 8;
2151 rxdctl |= IGB_RX_WTHRESH << 16;
26bc19ec 2152 wr32(E1000_RXDCTL(j), rxdctl);
9d5c8243
AK
2153 }
2154
2155 if (adapter->num_rx_queues > 1) {
2156 u32 random[10];
2157 u32 mrqc;
2158 u32 j, shift;
2159 union e1000_reta {
2160 u32 dword;
2161 u8 bytes[4];
2162 } reta;
2163
2164 get_random_bytes(&random[0], 40);
2165
2d064c06
AD
2166 if (hw->mac.type >= e1000_82576)
2167 shift = 0;
2168 else
2169 shift = 6;
9d5c8243
AK
2170 for (j = 0; j < (32 * 4); j++) {
2171 reta.bytes[j & 3] =
26bc19ec 2172 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
9d5c8243
AK
2173 if ((j & 3) == 3)
2174 writel(reta.dword,
2175 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2176 }
e1739522
AD
2177 if (adapter->vfs_allocated_count)
2178 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2179 else
2180 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
9d5c8243
AK
2181
2182 /* Fill out hash function seeds */
2183 for (j = 0; j < 10; j++)
2184 array_wr32(E1000_RSSRK(0), j, random[j]);
2185
2186 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2187 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2188 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2189 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2190 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2191 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2192 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2193 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2194
2195
2196 wr32(E1000_MRQC, mrqc);
2197
2198 /* Multiqueue and raw packet checksumming are mutually
2199 * exclusive. Note that this not the same as TCP/IP
2200 * checksumming, which works fine. */
2201 rxcsum = rd32(E1000_RXCSUM);
2202 rxcsum |= E1000_RXCSUM_PCSD;
2203 wr32(E1000_RXCSUM, rxcsum);
2204 } else {
e1739522
AD
2205 /* Enable multi-queue for sr-iov */
2206 if (adapter->vfs_allocated_count)
2207 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
9d5c8243
AK
2208 /* Enable Receive Checksum Offload for TCP and UDP */
2209 rxcsum = rd32(E1000_RXCSUM);
56fbbb4e
AD
2210 if (adapter->rx_csum)
2211 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
2212 else
2213 rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
2214
9d5c8243
AK
2215 wr32(E1000_RXCSUM, rxcsum);
2216 }
2217
e1739522
AD
2218 /* Set the default pool for the PF's first queue */
2219 igb_configure_vt_default_pool(adapter);
2220
2221 igb_rlpml_set(adapter);
9d5c8243
AK
2222
2223 /* Enable Receives */
2224 wr32(E1000_RCTL, rctl);
2225}
2226
2227/**
2228 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
2229 * @tx_ring: Tx descriptor ring for a specific queue
2230 *
2231 * Free all transmit software resources
2232 **/
68fd9910 2233void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2234{
3b644cf6 2235 struct pci_dev *pdev = tx_ring->adapter->pdev;
9d5c8243 2236
3b644cf6 2237 igb_clean_tx_ring(tx_ring);
9d5c8243
AK
2238
2239 vfree(tx_ring->buffer_info);
2240 tx_ring->buffer_info = NULL;
2241
2242 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2243
2244 tx_ring->desc = NULL;
2245}
2246
2247/**
2248 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2249 * @adapter: board private structure
2250 *
2251 * Free all transmit software resources
2252 **/
2253static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2254{
2255 int i;
2256
2257 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2258 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
2259}
2260
2261static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2262 struct igb_buffer *buffer_info)
2263{
65689fef 2264 buffer_info->dma = 0;
9d5c8243 2265 if (buffer_info->skb) {
65689fef
AD
2266 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
2267 DMA_TO_DEVICE);
9d5c8243
AK
2268 dev_kfree_skb_any(buffer_info->skb);
2269 buffer_info->skb = NULL;
2270 }
2271 buffer_info->time_stamp = 0;
2272 /* buffer_info must be completely set up in the transmit path */
2273}
2274
2275/**
2276 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
2277 * @tx_ring: ring to be cleaned
2278 **/
3b644cf6 2279static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 2280{
3b644cf6 2281 struct igb_adapter *adapter = tx_ring->adapter;
9d5c8243
AK
2282 struct igb_buffer *buffer_info;
2283 unsigned long size;
2284 unsigned int i;
2285
2286 if (!tx_ring->buffer_info)
2287 return;
2288 /* Free all the Tx ring sk_buffs */
2289
2290 for (i = 0; i < tx_ring->count; i++) {
2291 buffer_info = &tx_ring->buffer_info[i];
2292 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2293 }
2294
2295 size = sizeof(struct igb_buffer) * tx_ring->count;
2296 memset(tx_ring->buffer_info, 0, size);
2297
2298 /* Zero out the descriptor ring */
2299
2300 memset(tx_ring->desc, 0, tx_ring->size);
2301
2302 tx_ring->next_to_use = 0;
2303 tx_ring->next_to_clean = 0;
2304
2305 writel(0, adapter->hw.hw_addr + tx_ring->head);
2306 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2307}
2308
2309/**
2310 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2311 * @adapter: board private structure
2312 **/
2313static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2314{
2315 int i;
2316
2317 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2318 igb_clean_tx_ring(&adapter->tx_ring[i]);
9d5c8243
AK
2319}
2320
2321/**
2322 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
2323 * @rx_ring: ring to clean the resources from
2324 *
2325 * Free all receive software resources
2326 **/
68fd9910 2327void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2328{
3b644cf6 2329 struct pci_dev *pdev = rx_ring->adapter->pdev;
9d5c8243 2330
3b644cf6 2331 igb_clean_rx_ring(rx_ring);
9d5c8243
AK
2332
2333 vfree(rx_ring->buffer_info);
2334 rx_ring->buffer_info = NULL;
2335
2336 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2337
2338 rx_ring->desc = NULL;
2339}
2340
2341/**
2342 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2343 * @adapter: board private structure
2344 *
2345 * Free all receive software resources
2346 **/
2347static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2348{
2349 int i;
2350
2351 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2352 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
2353}
2354
2355/**
2356 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
2357 * @rx_ring: ring to free buffers from
2358 **/
3b644cf6 2359static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 2360{
3b644cf6 2361 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
2362 struct igb_buffer *buffer_info;
2363 struct pci_dev *pdev = adapter->pdev;
2364 unsigned long size;
2365 unsigned int i;
2366
2367 if (!rx_ring->buffer_info)
2368 return;
2369 /* Free all the Rx ring sk_buffs */
2370 for (i = 0; i < rx_ring->count; i++) {
2371 buffer_info = &rx_ring->buffer_info[i];
2372 if (buffer_info->dma) {
2373 if (adapter->rx_ps_hdr_size)
2374 pci_unmap_single(pdev, buffer_info->dma,
2375 adapter->rx_ps_hdr_size,
2376 PCI_DMA_FROMDEVICE);
2377 else
2378 pci_unmap_single(pdev, buffer_info->dma,
2379 adapter->rx_buffer_len,
2380 PCI_DMA_FROMDEVICE);
2381 buffer_info->dma = 0;
2382 }
2383
2384 if (buffer_info->skb) {
2385 dev_kfree_skb(buffer_info->skb);
2386 buffer_info->skb = NULL;
2387 }
2388 if (buffer_info->page) {
bf36c1a0
AD
2389 if (buffer_info->page_dma)
2390 pci_unmap_page(pdev, buffer_info->page_dma,
2391 PAGE_SIZE / 2,
2392 PCI_DMA_FROMDEVICE);
9d5c8243
AK
2393 put_page(buffer_info->page);
2394 buffer_info->page = NULL;
2395 buffer_info->page_dma = 0;
bf36c1a0 2396 buffer_info->page_offset = 0;
9d5c8243
AK
2397 }
2398 }
2399
9d5c8243
AK
2400 size = sizeof(struct igb_buffer) * rx_ring->count;
2401 memset(rx_ring->buffer_info, 0, size);
2402
2403 /* Zero out the descriptor ring */
2404 memset(rx_ring->desc, 0, rx_ring->size);
2405
2406 rx_ring->next_to_clean = 0;
2407 rx_ring->next_to_use = 0;
2408
2409 writel(0, adapter->hw.hw_addr + rx_ring->head);
2410 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2411}
2412
2413/**
2414 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2415 * @adapter: board private structure
2416 **/
2417static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2418{
2419 int i;
2420
2421 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2422 igb_clean_rx_ring(&adapter->rx_ring[i]);
9d5c8243
AK
2423}
2424
2425/**
2426 * igb_set_mac - Change the Ethernet Address of the NIC
2427 * @netdev: network interface device structure
2428 * @p: pointer to an address structure
2429 *
2430 * Returns 0 on success, negative on failure
2431 **/
2432static int igb_set_mac(struct net_device *netdev, void *p)
2433{
2434 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 2435 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
2436 struct sockaddr *addr = p;
2437
2438 if (!is_valid_ether_addr(addr->sa_data))
2439 return -EADDRNOTAVAIL;
2440
2441 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 2442 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 2443
28b0759c 2444 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
9d5c8243 2445
e1739522
AD
2446 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2447
9d5c8243
AK
2448 return 0;
2449}
2450
2451/**
2452 * igb_set_multi - Multicast and Promiscuous mode set
2453 * @netdev: network interface device structure
2454 *
2455 * The set_multi entry point is called whenever the multicast address
2456 * list or the network interface flags are updated. This routine is
2457 * responsible for configuring the hardware for proper multicast,
2458 * promiscuous mode, and all-multi behavior.
2459 **/
2460static void igb_set_multi(struct net_device *netdev)
2461{
2462 struct igb_adapter *adapter = netdev_priv(netdev);
2463 struct e1000_hw *hw = &adapter->hw;
2464 struct e1000_mac_info *mac = &hw->mac;
2465 struct dev_mc_list *mc_ptr;
c5cd11e3 2466 u8 *mta_list = NULL;
9d5c8243
AK
2467 u32 rctl;
2468 int i;
2469
2470 /* Check for Promiscuous and All Multicast modes */
2471
2472 rctl = rd32(E1000_RCTL);
2473
746b9f02 2474 if (netdev->flags & IFF_PROMISC) {
9d5c8243 2475 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02
PM
2476 rctl &= ~E1000_RCTL_VFE;
2477 } else {
2478 if (netdev->flags & IFF_ALLMULTI) {
2479 rctl |= E1000_RCTL_MPE;
2480 rctl &= ~E1000_RCTL_UPE;
2481 } else
2482 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
78ed11a5 2483 rctl |= E1000_RCTL_VFE;
746b9f02 2484 }
9d5c8243
AK
2485 wr32(E1000_RCTL, rctl);
2486
c5cd11e3
AD
2487 if (netdev->mc_count) {
2488 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2489 if (!mta_list) {
2490 dev_err(&adapter->pdev->dev,
2491 "failed to allocate multicast filter list\n");
2492 return;
2493 }
9d5c8243
AK
2494 }
2495
9d5c8243
AK
2496 /* The shared function expects a packed array of only addresses. */
2497 mc_ptr = netdev->mc_list;
2498
2499 for (i = 0; i < netdev->mc_count; i++) {
2500 if (!mc_ptr)
2501 break;
2502 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2503 mc_ptr = mc_ptr->next;
2504 }
e1739522
AD
2505 igb_update_mc_addr_list(hw, mta_list, i,
2506 adapter->vfs_allocated_count + 1,
2507 mac->rar_entry_count);
2508
2509 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
4ae196df
AD
2510 igb_restore_vf_multicasts(adapter);
2511
9d5c8243
AK
2512 kfree(mta_list);
2513}
2514
2515/* Need to wait a few seconds after link up to get diagnostic information from
2516 * the phy */
2517static void igb_update_phy_info(unsigned long data)
2518{
2519 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 2520 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
2521}
2522
4d6b725e
AD
2523/**
2524 * igb_has_link - check shared code for link and determine up/down
2525 * @adapter: pointer to driver private info
2526 **/
2527static bool igb_has_link(struct igb_adapter *adapter)
2528{
2529 struct e1000_hw *hw = &adapter->hw;
2530 bool link_active = false;
2531 s32 ret_val = 0;
2532
2533 /* get_link_status is set on LSC (link status) interrupt or
2534 * rx sequence error interrupt. get_link_status will stay
2535 * false until the e1000_check_for_link establishes link
2536 * for copper adapters ONLY
2537 */
2538 switch (hw->phy.media_type) {
2539 case e1000_media_type_copper:
2540 if (hw->mac.get_link_status) {
2541 ret_val = hw->mac.ops.check_for_link(hw);
2542 link_active = !hw->mac.get_link_status;
2543 } else {
2544 link_active = true;
2545 }
2546 break;
2547 case e1000_media_type_fiber:
2548 ret_val = hw->mac.ops.check_for_link(hw);
2549 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2550 break;
2551 case e1000_media_type_internal_serdes:
2552 ret_val = hw->mac.ops.check_for_link(hw);
2553 link_active = hw->mac.serdes_has_link;
2554 break;
2555 default:
2556 case e1000_media_type_unknown:
2557 break;
2558 }
2559
2560 return link_active;
2561}
2562
9d5c8243
AK
2563/**
2564 * igb_watchdog - Timer Call-back
2565 * @data: pointer to adapter cast into an unsigned long
2566 **/
2567static void igb_watchdog(unsigned long data)
2568{
2569 struct igb_adapter *adapter = (struct igb_adapter *)data;
2570 /* Do the rest outside of interrupt context */
2571 schedule_work(&adapter->watchdog_task);
2572}
2573
2574static void igb_watchdog_task(struct work_struct *work)
2575{
2576 struct igb_adapter *adapter = container_of(work,
2577 struct igb_adapter, watchdog_task);
2578 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
2579 struct net_device *netdev = adapter->netdev;
2580 struct igb_ring *tx_ring = adapter->tx_ring;
9d5c8243 2581 u32 link;
7a6ea550 2582 u32 eics = 0;
7a6ea550 2583 int i;
9d5c8243 2584
4d6b725e
AD
2585 link = igb_has_link(adapter);
2586 if ((netif_carrier_ok(netdev)) && link)
9d5c8243
AK
2587 goto link_up;
2588
9d5c8243
AK
2589 if (link) {
2590 if (!netif_carrier_ok(netdev)) {
2591 u32 ctrl;
2592 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2593 &adapter->link_speed,
2594 &adapter->link_duplex);
2595
2596 ctrl = rd32(E1000_CTRL);
527d47c1
AD
2597 /* Links status message must follow this format */
2598 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
9d5c8243 2599 "Flow Control: %s\n",
527d47c1 2600 netdev->name,
9d5c8243
AK
2601 adapter->link_speed,
2602 adapter->link_duplex == FULL_DUPLEX ?
2603 "Full Duplex" : "Half Duplex",
2604 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2605 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2606 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2607 E1000_CTRL_TFCE) ? "TX" : "None")));
2608
2609 /* tweak tx_queue_len according to speed/duplex and
2610 * adjust the timeout factor */
2611 netdev->tx_queue_len = adapter->tx_queue_len;
2612 adapter->tx_timeout_factor = 1;
2613 switch (adapter->link_speed) {
2614 case SPEED_10:
2615 netdev->tx_queue_len = 10;
2616 adapter->tx_timeout_factor = 14;
2617 break;
2618 case SPEED_100:
2619 netdev->tx_queue_len = 100;
2620 /* maybe add some timeout factor ? */
2621 break;
2622 }
2623
2624 netif_carrier_on(netdev);
fd2ea0a7 2625 netif_tx_wake_all_queues(netdev);
9d5c8243 2626
4ae196df
AD
2627 igb_ping_all_vfs(adapter);
2628
4b1a9877 2629 /* link state has changed, schedule phy info update */
9d5c8243
AK
2630 if (!test_bit(__IGB_DOWN, &adapter->state))
2631 mod_timer(&adapter->phy_info_timer,
2632 round_jiffies(jiffies + 2 * HZ));
2633 }
2634 } else {
2635 if (netif_carrier_ok(netdev)) {
2636 adapter->link_speed = 0;
2637 adapter->link_duplex = 0;
527d47c1
AD
2638 /* Links status message must follow this format */
2639 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2640 netdev->name);
9d5c8243 2641 netif_carrier_off(netdev);
fd2ea0a7 2642 netif_tx_stop_all_queues(netdev);
4b1a9877 2643
4ae196df
AD
2644 igb_ping_all_vfs(adapter);
2645
4b1a9877 2646 /* link state has changed, schedule phy info update */
9d5c8243
AK
2647 if (!test_bit(__IGB_DOWN, &adapter->state))
2648 mod_timer(&adapter->phy_info_timer,
2649 round_jiffies(jiffies + 2 * HZ));
2650 }
2651 }
2652
2653link_up:
2654 igb_update_stats(adapter);
2655
4b1a9877 2656 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
9d5c8243 2657 adapter->tpt_old = adapter->stats.tpt;
4b1a9877 2658 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
9d5c8243
AK
2659 adapter->colc_old = adapter->stats.colc;
2660
2661 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2662 adapter->gorc_old = adapter->stats.gorc;
2663 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2664 adapter->gotc_old = adapter->stats.gotc;
2665
2666 igb_update_adaptive(&adapter->hw);
2667
2668 if (!netif_carrier_ok(netdev)) {
c493ea45 2669 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
9d5c8243
AK
2670 /* We've lost link, so the controller stops DMA,
2671 * but we've got queued Tx work that's never going
2672 * to get done, so reset controller to flush Tx.
2673 * (Do the reset outside of interrupt context). */
2674 adapter->tx_timeout_count++;
2675 schedule_work(&adapter->reset_task);
2676 }
2677 }
2678
2679 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550
AD
2680 if (adapter->msix_entries) {
2681 for (i = 0; i < adapter->num_rx_queues; i++)
2682 eics |= adapter->rx_ring[i].eims_value;
2683 wr32(E1000_EICS, eics);
2684 } else {
2685 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2686 }
9d5c8243
AK
2687
2688 /* Force detection of hung controller every watchdog period */
2689 tx_ring->detect_tx_hung = true;
2690
2691 /* Reset the timer */
2692 if (!test_bit(__IGB_DOWN, &adapter->state))
2693 mod_timer(&adapter->watchdog_timer,
2694 round_jiffies(jiffies + 2 * HZ));
2695}
2696
2697enum latency_range {
2698 lowest_latency = 0,
2699 low_latency = 1,
2700 bulk_latency = 2,
2701 latency_invalid = 255
2702};
2703
2704
6eb5a7f1
AD
2705/**
2706 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2707 *
2708 * Stores a new ITR value based on strictly on packet size. This
2709 * algorithm is less sophisticated than that used in igb_update_itr,
2710 * due to the difficulty of synchronizing statistics across multiple
2711 * receive rings. The divisors and thresholds used by this fuction
2712 * were determined based on theoretical maximum wire speed and testing
2713 * data, in order to minimize response time while increasing bulk
2714 * throughput.
2715 * This functionality is controlled by the InterruptThrottleRate module
2716 * parameter (see igb_param.c)
2717 * NOTE: This function is called only when operating in a multiqueue
2718 * receive environment.
2719 * @rx_ring: pointer to ring
2720 **/
2721static void igb_update_ring_itr(struct igb_ring *rx_ring)
9d5c8243 2722{
6eb5a7f1
AD
2723 int new_val = rx_ring->itr_val;
2724 int avg_wire_size = 0;
2725 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 2726
6eb5a7f1
AD
2727 if (!rx_ring->total_packets)
2728 goto clear_counts; /* no packets, so don't do anything */
9d5c8243 2729
6eb5a7f1
AD
2730 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2731 * ints/sec - ITR timer value of 120 ticks.
2732 */
2733 if (adapter->link_speed != SPEED_1000) {
2734 new_val = 120;
2735 goto set_itr_val;
9d5c8243 2736 }
6eb5a7f1 2737 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
9d5c8243 2738
6eb5a7f1
AD
2739 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2740 avg_wire_size += 24;
2741
2742 /* Don't starve jumbo frames */
2743 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 2744
6eb5a7f1
AD
2745 /* Give a little boost to mid-size frames */
2746 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2747 new_val = avg_wire_size / 3;
2748 else
2749 new_val = avg_wire_size / 2;
9d5c8243 2750
6eb5a7f1 2751set_itr_val:
9d5c8243
AK
2752 if (new_val != rx_ring->itr_val) {
2753 rx_ring->itr_val = new_val;
6eb5a7f1 2754 rx_ring->set_itr = 1;
9d5c8243 2755 }
6eb5a7f1
AD
2756clear_counts:
2757 rx_ring->total_bytes = 0;
2758 rx_ring->total_packets = 0;
9d5c8243
AK
2759}
2760
2761/**
2762 * igb_update_itr - update the dynamic ITR value based on statistics
2763 * Stores a new ITR value based on packets and byte
2764 * counts during the last interrupt. The advantage of per interrupt
2765 * computation is faster updates and more accurate ITR for the current
2766 * traffic pattern. Constants in this function were computed
2767 * based on theoretical maximum wire speed and thresholds were set based
2768 * on testing data as well as attempting to minimize response time
2769 * while increasing bulk throughput.
2770 * this functionality is controlled by the InterruptThrottleRate module
2771 * parameter (see igb_param.c)
2772 * NOTE: These calculations are only valid when operating in a single-
2773 * queue environment.
2774 * @adapter: pointer to adapter
2775 * @itr_setting: current adapter->itr
2776 * @packets: the number of packets during this measurement interval
2777 * @bytes: the number of bytes during this measurement interval
2778 **/
2779static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2780 int packets, int bytes)
2781{
2782 unsigned int retval = itr_setting;
2783
2784 if (packets == 0)
2785 goto update_itr_done;
2786
2787 switch (itr_setting) {
2788 case lowest_latency:
2789 /* handle TSO and jumbo frames */
2790 if (bytes/packets > 8000)
2791 retval = bulk_latency;
2792 else if ((packets < 5) && (bytes > 512))
2793 retval = low_latency;
2794 break;
2795 case low_latency: /* 50 usec aka 20000 ints/s */
2796 if (bytes > 10000) {
2797 /* this if handles the TSO accounting */
2798 if (bytes/packets > 8000) {
2799 retval = bulk_latency;
2800 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2801 retval = bulk_latency;
2802 } else if ((packets > 35)) {
2803 retval = lowest_latency;
2804 }
2805 } else if (bytes/packets > 2000) {
2806 retval = bulk_latency;
2807 } else if (packets <= 2 && bytes < 512) {
2808 retval = lowest_latency;
2809 }
2810 break;
2811 case bulk_latency: /* 250 usec aka 4000 ints/s */
2812 if (bytes > 25000) {
2813 if (packets > 35)
2814 retval = low_latency;
1e5c3d21 2815 } else if (bytes < 1500) {
9d5c8243
AK
2816 retval = low_latency;
2817 }
2818 break;
2819 }
2820
2821update_itr_done:
2822 return retval;
2823}
2824
6eb5a7f1 2825static void igb_set_itr(struct igb_adapter *adapter)
9d5c8243
AK
2826{
2827 u16 current_itr;
2828 u32 new_itr = adapter->itr;
2829
2830 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2831 if (adapter->link_speed != SPEED_1000) {
2832 current_itr = 0;
2833 new_itr = 4000;
2834 goto set_itr_now;
2835 }
2836
2837 adapter->rx_itr = igb_update_itr(adapter,
2838 adapter->rx_itr,
2839 adapter->rx_ring->total_packets,
2840 adapter->rx_ring->total_bytes);
9d5c8243 2841
6eb5a7f1 2842 if (adapter->rx_ring->buddy) {
9d5c8243
AK
2843 adapter->tx_itr = igb_update_itr(adapter,
2844 adapter->tx_itr,
2845 adapter->tx_ring->total_packets,
2846 adapter->tx_ring->total_bytes);
9d5c8243
AK
2847 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2848 } else {
2849 current_itr = adapter->rx_itr;
2850 }
2851
6eb5a7f1 2852 /* conservative mode (itr 3) eliminates the lowest_latency setting */
73cd78f1 2853 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
6eb5a7f1
AD
2854 current_itr = low_latency;
2855
9d5c8243
AK
2856 switch (current_itr) {
2857 /* counts and packets in update_itr are dependent on these numbers */
2858 case lowest_latency:
2859 new_itr = 70000;
2860 break;
2861 case low_latency:
2862 new_itr = 20000; /* aka hwitr = ~200 */
2863 break;
2864 case bulk_latency:
2865 new_itr = 4000;
2866 break;
2867 default:
2868 break;
2869 }
2870
2871set_itr_now:
6eb5a7f1
AD
2872 adapter->rx_ring->total_bytes = 0;
2873 adapter->rx_ring->total_packets = 0;
2874 if (adapter->rx_ring->buddy) {
2875 adapter->rx_ring->buddy->total_bytes = 0;
2876 adapter->rx_ring->buddy->total_packets = 0;
2877 }
2878
9d5c8243
AK
2879 if (new_itr != adapter->itr) {
2880 /* this attempts to bias the interrupt rate towards Bulk
2881 * by adding intermediate steps when interrupt rate is
2882 * increasing */
2883 new_itr = new_itr > adapter->itr ?
2884 min(adapter->itr + (new_itr >> 2), new_itr) :
2885 new_itr;
2886 /* Don't write the value here; it resets the adapter's
2887 * internal timer, and causes us to delay far longer than
2888 * we should between interrupts. Instead, we write the ITR
2889 * value at the beginning of the next interrupt so the timing
2890 * ends up being correct.
2891 */
2892 adapter->itr = new_itr;
6eb5a7f1
AD
2893 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2894 adapter->rx_ring->set_itr = 1;
9d5c8243
AK
2895 }
2896
2897 return;
2898}
2899
2900
2901#define IGB_TX_FLAGS_CSUM 0x00000001
2902#define IGB_TX_FLAGS_VLAN 0x00000002
2903#define IGB_TX_FLAGS_TSO 0x00000004
2904#define IGB_TX_FLAGS_IPV4 0x00000008
33af6bcc 2905#define IGB_TX_FLAGS_TSTAMP 0x00000010
9d5c8243
AK
2906#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2907#define IGB_TX_FLAGS_VLAN_SHIFT 16
2908
2909static inline int igb_tso_adv(struct igb_adapter *adapter,
2910 struct igb_ring *tx_ring,
2911 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2912{
2913 struct e1000_adv_tx_context_desc *context_desc;
2914 unsigned int i;
2915 int err;
2916 struct igb_buffer *buffer_info;
2917 u32 info = 0, tu_cmd = 0;
2918 u32 mss_l4len_idx, l4len;
2919 *hdr_len = 0;
2920
2921 if (skb_header_cloned(skb)) {
2922 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2923 if (err)
2924 return err;
2925 }
2926
2927 l4len = tcp_hdrlen(skb);
2928 *hdr_len += l4len;
2929
2930 if (skb->protocol == htons(ETH_P_IP)) {
2931 struct iphdr *iph = ip_hdr(skb);
2932 iph->tot_len = 0;
2933 iph->check = 0;
2934 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2935 iph->daddr, 0,
2936 IPPROTO_TCP,
2937 0);
2938 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2939 ipv6_hdr(skb)->payload_len = 0;
2940 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2941 &ipv6_hdr(skb)->daddr,
2942 0, IPPROTO_TCP, 0);
2943 }
2944
2945 i = tx_ring->next_to_use;
2946
2947 buffer_info = &tx_ring->buffer_info[i];
2948 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2949 /* VLAN MACLEN IPLEN */
2950 if (tx_flags & IGB_TX_FLAGS_VLAN)
2951 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2952 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2953 *hdr_len += skb_network_offset(skb);
2954 info |= skb_network_header_len(skb);
2955 *hdr_len += skb_network_header_len(skb);
2956 context_desc->vlan_macip_lens = cpu_to_le32(info);
2957
2958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2960
2961 if (skb->protocol == htons(ETH_P_IP))
2962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2964
2965 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2966
2967 /* MSS L4LEN IDX */
2968 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2969 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2970
73cd78f1 2971 /* For 82575, context index must be unique per ring. */
7dfc16fa
AD
2972 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2973 mss_l4len_idx |= tx_ring->queue_index << 4;
9d5c8243
AK
2974
2975 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2976 context_desc->seqnum_seed = 0;
2977
2978 buffer_info->time_stamp = jiffies;
0e014cb1 2979 buffer_info->next_to_watch = i;
9d5c8243
AK
2980 buffer_info->dma = 0;
2981 i++;
2982 if (i == tx_ring->count)
2983 i = 0;
2984
2985 tx_ring->next_to_use = i;
2986
2987 return true;
2988}
2989
2990static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2991 struct igb_ring *tx_ring,
2992 struct sk_buff *skb, u32 tx_flags)
2993{
2994 struct e1000_adv_tx_context_desc *context_desc;
2995 unsigned int i;
2996 struct igb_buffer *buffer_info;
2997 u32 info = 0, tu_cmd = 0;
2998
2999 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3000 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3001 i = tx_ring->next_to_use;
3002 buffer_info = &tx_ring->buffer_info[i];
3003 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3004
3005 if (tx_flags & IGB_TX_FLAGS_VLAN)
3006 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3007 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3008 if (skb->ip_summed == CHECKSUM_PARTIAL)
3009 info |= skb_network_header_len(skb);
3010
3011 context_desc->vlan_macip_lens = cpu_to_le32(info);
3012
3013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3014
3015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fa4a7ef3
AJ
3016 __be16 protocol;
3017
3018 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3019 const struct vlan_ethhdr *vhdr =
3020 (const struct vlan_ethhdr*)skb->data;
3021
3022 protocol = vhdr->h_vlan_encapsulated_proto;
3023 } else {
3024 protocol = skb->protocol;
3025 }
3026
3027 switch (protocol) {
09640e63 3028 case cpu_to_be16(ETH_P_IP):
9d5c8243 3029 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
44b0cda3
MW
3030 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3031 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3032 break;
09640e63 3033 case cpu_to_be16(ETH_P_IPV6):
44b0cda3
MW
3034 /* XXX what about other V6 headers?? */
3035 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3036 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3037 break;
3038 default:
3039 if (unlikely(net_ratelimit()))
3040 dev_warn(&adapter->pdev->dev,
3041 "partial checksum but proto=%x!\n",
3042 skb->protocol);
3043 break;
3044 }
9d5c8243
AK
3045 }
3046
3047 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3048 context_desc->seqnum_seed = 0;
7dfc16fa
AD
3049 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3050 context_desc->mss_l4len_idx =
3051 cpu_to_le32(tx_ring->queue_index << 4);
265de409
AD
3052 else
3053 context_desc->mss_l4len_idx = 0;
9d5c8243
AK
3054
3055 buffer_info->time_stamp = jiffies;
0e014cb1 3056 buffer_info->next_to_watch = i;
9d5c8243
AK
3057 buffer_info->dma = 0;
3058
3059 i++;
3060 if (i == tx_ring->count)
3061 i = 0;
3062 tx_ring->next_to_use = i;
3063
3064 return true;
3065 }
9d5c8243
AK
3066 return false;
3067}
3068
3069#define IGB_MAX_TXD_PWR 16
3070#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3071
3072static inline int igb_tx_map_adv(struct igb_adapter *adapter,
0e014cb1
AD
3073 struct igb_ring *tx_ring, struct sk_buff *skb,
3074 unsigned int first)
9d5c8243
AK
3075{
3076 struct igb_buffer *buffer_info;
3077 unsigned int len = skb_headlen(skb);
3078 unsigned int count = 0, i;
3079 unsigned int f;
65689fef 3080 dma_addr_t *map;
9d5c8243
AK
3081
3082 i = tx_ring->next_to_use;
3083
65689fef
AD
3084 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
3085 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3086 return 0;
3087 }
3088
3089 map = skb_shinfo(skb)->dma_maps;
3090
9d5c8243
AK
3091 buffer_info = &tx_ring->buffer_info[i];
3092 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3093 buffer_info->length = len;
3094 /* set time_stamp *before* dma to help avoid a possible race */
3095 buffer_info->time_stamp = jiffies;
0e014cb1 3096 buffer_info->next_to_watch = i;
65689fef 3097 buffer_info->dma = map[count];
9d5c8243 3098 count++;
9d5c8243
AK
3099
3100 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3101 struct skb_frag_struct *frag;
3102
65689fef
AD
3103 i++;
3104 if (i == tx_ring->count)
3105 i = 0;
3106
9d5c8243
AK
3107 frag = &skb_shinfo(skb)->frags[f];
3108 len = frag->size;
3109
3110 buffer_info = &tx_ring->buffer_info[i];
3111 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3112 buffer_info->length = len;
3113 buffer_info->time_stamp = jiffies;
0e014cb1 3114 buffer_info->next_to_watch = i;
65689fef 3115 buffer_info->dma = map[count];
9d5c8243 3116 count++;
9d5c8243
AK
3117 }
3118
9d5c8243 3119 tx_ring->buffer_info[i].skb = skb;
0e014cb1 3120 tx_ring->buffer_info[first].next_to_watch = i;
9d5c8243
AK
3121
3122 return count;
3123}
3124
3125static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3126 struct igb_ring *tx_ring,
3127 int tx_flags, int count, u32 paylen,
3128 u8 hdr_len)
3129{
3130 union e1000_adv_tx_desc *tx_desc = NULL;
3131 struct igb_buffer *buffer_info;
3132 u32 olinfo_status = 0, cmd_type_len;
3133 unsigned int i;
3134
3135 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3136 E1000_ADVTXD_DCMD_DEXT);
3137
3138 if (tx_flags & IGB_TX_FLAGS_VLAN)
3139 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3140
33af6bcc
PO
3141 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3142 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3143
9d5c8243
AK
3144 if (tx_flags & IGB_TX_FLAGS_TSO) {
3145 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3146
3147 /* insert tcp checksum */
3148 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3149
3150 /* insert ip checksum */
3151 if (tx_flags & IGB_TX_FLAGS_IPV4)
3152 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3153
3154 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3155 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3156 }
3157
7dfc16fa
AD
3158 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3159 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3160 IGB_TX_FLAGS_VLAN)))
661086df 3161 olinfo_status |= tx_ring->queue_index << 4;
9d5c8243
AK
3162
3163 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3164
3165 i = tx_ring->next_to_use;
3166 while (count--) {
3167 buffer_info = &tx_ring->buffer_info[i];
3168 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3169 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3170 tx_desc->read.cmd_type_len =
3171 cpu_to_le32(cmd_type_len | buffer_info->length);
3172 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3173 i++;
3174 if (i == tx_ring->count)
3175 i = 0;
3176 }
3177
3178 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3179 /* Force memory writes to complete before letting h/w
3180 * know there are new descriptors to fetch. (Only
3181 * applicable for weak-ordered memory model archs,
3182 * such as IA-64). */
3183 wmb();
3184
3185 tx_ring->next_to_use = i;
3186 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3187 /* we need this if more than one processor can write to our tail
3188 * at a time, it syncronizes IO on IA64/Altix systems */
3189 mmiowb();
3190}
3191
3192static int __igb_maybe_stop_tx(struct net_device *netdev,
3193 struct igb_ring *tx_ring, int size)
3194{
3195 struct igb_adapter *adapter = netdev_priv(netdev);
3196
661086df 3197 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 3198
9d5c8243
AK
3199 /* Herbert's original patch had:
3200 * smp_mb__after_netif_stop_queue();
3201 * but since that doesn't exist yet, just open code it. */
3202 smp_mb();
3203
3204 /* We need to check again in a case another CPU has just
3205 * made room available. */
c493ea45 3206 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
3207 return -EBUSY;
3208
3209 /* A reprieve! */
661086df 3210 netif_wake_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
3211 ++adapter->restart_queue;
3212 return 0;
3213}
3214
3215static int igb_maybe_stop_tx(struct net_device *netdev,
3216 struct igb_ring *tx_ring, int size)
3217{
c493ea45 3218 if (igb_desc_unused(tx_ring) >= size)
9d5c8243
AK
3219 return 0;
3220 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3221}
3222
9d5c8243
AK
3223static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3224 struct net_device *netdev,
3225 struct igb_ring *tx_ring)
3226{
3227 struct igb_adapter *adapter = netdev_priv(netdev);
0e014cb1 3228 unsigned int first;
9d5c8243 3229 unsigned int tx_flags = 0;
9d5c8243 3230 u8 hdr_len = 0;
65689fef 3231 int count = 0;
9d5c8243 3232 int tso = 0;
33af6bcc 3233 union skb_shared_tx *shtx;
9d5c8243 3234
9d5c8243
AK
3235 if (test_bit(__IGB_DOWN, &adapter->state)) {
3236 dev_kfree_skb_any(skb);
3237 return NETDEV_TX_OK;
3238 }
3239
3240 if (skb->len <= 0) {
3241 dev_kfree_skb_any(skb);
3242 return NETDEV_TX_OK;
3243 }
3244
9d5c8243
AK
3245 /* need: 1 descriptor per page,
3246 * + 2 desc gap to keep tail from touching head,
3247 * + 1 desc for skb->data,
3248 * + 1 desc for context descriptor,
3249 * otherwise try next time */
3250 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3251 /* this is a hard error */
9d5c8243
AK
3252 return NETDEV_TX_BUSY;
3253 }
33af6bcc
PO
3254
3255 /*
3256 * TODO: check that there currently is no other packet with
3257 * time stamping in the queue
3258 *
3259 * When doing time stamping, keep the connection to the socket
3260 * a while longer: it is still needed by skb_hwtstamp_tx(),
3261 * called either in igb_tx_hwtstamp() or by our caller when
3262 * doing software time stamping.
3263 */
3264 shtx = skb_tx(skb);
3265 if (unlikely(shtx->hardware)) {
3266 shtx->in_progress = 1;
3267 tx_flags |= IGB_TX_FLAGS_TSTAMP;
33af6bcc 3268 }
9d5c8243
AK
3269
3270 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3271 tx_flags |= IGB_TX_FLAGS_VLAN;
3272 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3273 }
3274
661086df
PWJ
3275 if (skb->protocol == htons(ETH_P_IP))
3276 tx_flags |= IGB_TX_FLAGS_IPV4;
3277
0e014cb1 3278 first = tx_ring->next_to_use;
9d5c8243
AK
3279 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3280 &hdr_len) : 0;
3281
3282 if (tso < 0) {
3283 dev_kfree_skb_any(skb);
9d5c8243
AK
3284 return NETDEV_TX_OK;
3285 }
3286
3287 if (tso)
3288 tx_flags |= IGB_TX_FLAGS_TSO;
bc1cbd34
AD
3289 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3290 (skb->ip_summed == CHECKSUM_PARTIAL))
3291 tx_flags |= IGB_TX_FLAGS_CSUM;
9d5c8243 3292
65689fef
AD
3293 /*
3294 * count reflects descriptors mapped, if 0 then mapping error
3295 * has occured and we need to rewind the descriptor queue
3296 */
3297 count = igb_tx_map_adv(adapter, tx_ring, skb, first);
3298
3299 if (count) {
3300 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3301 skb->len, hdr_len);
3302 netdev->trans_start = jiffies;
3303 /* Make sure there is space in the ring for the next send. */
3304 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3305 } else {
3306 dev_kfree_skb_any(skb);
3307 tx_ring->buffer_info[first].time_stamp = 0;
3308 tx_ring->next_to_use = first;
3309 }
9d5c8243 3310
9d5c8243
AK
3311 return NETDEV_TX_OK;
3312}
3313
3314static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3315{
3316 struct igb_adapter *adapter = netdev_priv(netdev);
661086df
PWJ
3317 struct igb_ring *tx_ring;
3318
661086df 3319 int r_idx = 0;
1bfaf07b 3320 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
661086df 3321 tx_ring = adapter->multi_tx_table[r_idx];
9d5c8243
AK
3322
3323 /* This goes back to the question of how to logically map a tx queue
3324 * to a flow. Right now, performance is impacted slightly negatively
3325 * if using multiple tx queues. If the stack breaks away from a
3326 * single qdisc implementation, we can look at this again. */
3327 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3328}
3329
3330/**
3331 * igb_tx_timeout - Respond to a Tx Hang
3332 * @netdev: network interface device structure
3333 **/
3334static void igb_tx_timeout(struct net_device *netdev)
3335{
3336 struct igb_adapter *adapter = netdev_priv(netdev);
3337 struct e1000_hw *hw = &adapter->hw;
3338
3339 /* Do the reset outside of interrupt context */
3340 adapter->tx_timeout_count++;
3341 schedule_work(&adapter->reset_task);
265de409
AD
3342 wr32(E1000_EICS,
3343 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
3344}
3345
3346static void igb_reset_task(struct work_struct *work)
3347{
3348 struct igb_adapter *adapter;
3349 adapter = container_of(work, struct igb_adapter, reset_task);
3350
3351 igb_reinit_locked(adapter);
3352}
3353
3354/**
3355 * igb_get_stats - Get System Network Statistics
3356 * @netdev: network interface device structure
3357 *
3358 * Returns the address of the device statistics structure.
3359 * The statistics are actually updated from the timer callback.
3360 **/
73cd78f1 3361static struct net_device_stats *igb_get_stats(struct net_device *netdev)
9d5c8243
AK
3362{
3363 struct igb_adapter *adapter = netdev_priv(netdev);
3364
3365 /* only return the current stats */
3366 return &adapter->net_stats;
3367}
3368
3369/**
3370 * igb_change_mtu - Change the Maximum Transfer Unit
3371 * @netdev: network interface device structure
3372 * @new_mtu: new value for maximum frame size
3373 *
3374 * Returns 0 on success, negative on failure
3375 **/
3376static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3377{
3378 struct igb_adapter *adapter = netdev_priv(netdev);
3379 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3380
3381 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3382 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3383 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3384 return -EINVAL;
3385 }
3386
9d5c8243
AK
3387 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3388 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3389 return -EINVAL;
3390 }
3391
3392 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3393 msleep(1);
73cd78f1 3394
9d5c8243
AK
3395 /* igb_down has a dependency on max_frame_size */
3396 adapter->max_frame_size = max_frame;
3397 if (netif_running(netdev))
3398 igb_down(adapter);
3399
3400 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3401 * means we reserve 2 more, this pushes us to allocate from the next
3402 * larger slab size.
3403 * i.e. RXBUFFER_2048 --> size-4096 slab
3404 */
3405
3406 if (max_frame <= IGB_RXBUFFER_256)
3407 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3408 else if (max_frame <= IGB_RXBUFFER_512)
3409 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3410 else if (max_frame <= IGB_RXBUFFER_1024)
3411 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3412 else if (max_frame <= IGB_RXBUFFER_2048)
3413 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3414 else
bf36c1a0
AD
3415#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3416 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3417#else
3418 adapter->rx_buffer_len = PAGE_SIZE / 2;
3419#endif
e1739522
AD
3420
3421 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3422 if (adapter->vfs_allocated_count &&
3423 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3424 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3425
9d5c8243
AK
3426 /* adjust allocation if LPE protects us, and we aren't using SBP */
3427 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3428 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3429 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3430
3431 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3432 netdev->mtu, new_mtu);
3433 netdev->mtu = new_mtu;
3434
3435 if (netif_running(netdev))
3436 igb_up(adapter);
3437 else
3438 igb_reset(adapter);
3439
3440 clear_bit(__IGB_RESETTING, &adapter->state);
3441
3442 return 0;
3443}
3444
3445/**
3446 * igb_update_stats - Update the board statistics counters
3447 * @adapter: board private structure
3448 **/
3449
3450void igb_update_stats(struct igb_adapter *adapter)
3451{
3452 struct e1000_hw *hw = &adapter->hw;
3453 struct pci_dev *pdev = adapter->pdev;
3454 u16 phy_tmp;
3455
3456#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3457
3458 /*
3459 * Prevent stats update while adapter is being reset, or if the pci
3460 * connection is down.
3461 */
3462 if (adapter->link_speed == 0)
3463 return;
3464 if (pci_channel_offline(pdev))
3465 return;
3466
3467 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3468 adapter->stats.gprc += rd32(E1000_GPRC);
3469 adapter->stats.gorc += rd32(E1000_GORCL);
3470 rd32(E1000_GORCH); /* clear GORCL */
3471 adapter->stats.bprc += rd32(E1000_BPRC);
3472 adapter->stats.mprc += rd32(E1000_MPRC);
3473 adapter->stats.roc += rd32(E1000_ROC);
3474
3475 adapter->stats.prc64 += rd32(E1000_PRC64);
3476 adapter->stats.prc127 += rd32(E1000_PRC127);
3477 adapter->stats.prc255 += rd32(E1000_PRC255);
3478 adapter->stats.prc511 += rd32(E1000_PRC511);
3479 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3480 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3481 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3482 adapter->stats.sec += rd32(E1000_SEC);
3483
3484 adapter->stats.mpc += rd32(E1000_MPC);
3485 adapter->stats.scc += rd32(E1000_SCC);
3486 adapter->stats.ecol += rd32(E1000_ECOL);
3487 adapter->stats.mcc += rd32(E1000_MCC);
3488 adapter->stats.latecol += rd32(E1000_LATECOL);
3489 adapter->stats.dc += rd32(E1000_DC);
3490 adapter->stats.rlec += rd32(E1000_RLEC);
3491 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3492 adapter->stats.xontxc += rd32(E1000_XONTXC);
3493 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3494 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3495 adapter->stats.fcruc += rd32(E1000_FCRUC);
3496 adapter->stats.gptc += rd32(E1000_GPTC);
3497 adapter->stats.gotc += rd32(E1000_GOTCL);
3498 rd32(E1000_GOTCH); /* clear GOTCL */
3499 adapter->stats.rnbc += rd32(E1000_RNBC);
3500 adapter->stats.ruc += rd32(E1000_RUC);
3501 adapter->stats.rfc += rd32(E1000_RFC);
3502 adapter->stats.rjc += rd32(E1000_RJC);
3503 adapter->stats.tor += rd32(E1000_TORH);
3504 adapter->stats.tot += rd32(E1000_TOTH);
3505 adapter->stats.tpr += rd32(E1000_TPR);
3506
3507 adapter->stats.ptc64 += rd32(E1000_PTC64);
3508 adapter->stats.ptc127 += rd32(E1000_PTC127);
3509 adapter->stats.ptc255 += rd32(E1000_PTC255);
3510 adapter->stats.ptc511 += rd32(E1000_PTC511);
3511 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3512 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3513
3514 adapter->stats.mptc += rd32(E1000_MPTC);
3515 adapter->stats.bptc += rd32(E1000_BPTC);
3516
3517 /* used for adaptive IFS */
3518
3519 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3520 adapter->stats.tpt += hw->mac.tx_packet_delta;
3521 hw->mac.collision_delta = rd32(E1000_COLC);
3522 adapter->stats.colc += hw->mac.collision_delta;
3523
3524 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3525 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3526 adapter->stats.tncrs += rd32(E1000_TNCRS);
3527 adapter->stats.tsctc += rd32(E1000_TSCTC);
3528 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3529
3530 adapter->stats.iac += rd32(E1000_IAC);
3531 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3532 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3533 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3534 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3535 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3536 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3537 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3538 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3539
3540 /* Fill out the OS statistics structure */
3541 adapter->net_stats.multicast = adapter->stats.mprc;
3542 adapter->net_stats.collisions = adapter->stats.colc;
3543
3544 /* Rx Errors */
3545
3546 /* RLEC on some newer hardware can be incorrect so build
3547 * our own version based on RUC and ROC */
3548 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3549 adapter->stats.crcerrs + adapter->stats.algnerrc +
3550 adapter->stats.ruc + adapter->stats.roc +
3551 adapter->stats.cexterr;
3552 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3553 adapter->stats.roc;
3554 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3555 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3556 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3557
3558 /* Tx Errors */
3559 adapter->net_stats.tx_errors = adapter->stats.ecol +
3560 adapter->stats.latecol;
3561 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3562 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3563 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3564
3565 /* Tx Dropped needs to be maintained elsewhere */
3566
3567 /* Phy Stats */
3568 if (hw->phy.media_type == e1000_media_type_copper) {
3569 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 3570 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
3571 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3572 adapter->phy_stats.idle_errors += phy_tmp;
3573 }
3574 }
3575
3576 /* Management Stats */
3577 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3578 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3579 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3580}
3581
9d5c8243
AK
3582static irqreturn_t igb_msix_other(int irq, void *data)
3583{
3584 struct net_device *netdev = data;
3585 struct igb_adapter *adapter = netdev_priv(netdev);
3586 struct e1000_hw *hw = &adapter->hw;
844290e5 3587 u32 icr = rd32(E1000_ICR);
9d5c8243 3588
844290e5 3589 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083
AD
3590
3591 if(icr & E1000_ICR_DOUTSYNC) {
3592 /* HW is reporting DMA is out of sync */
3593 adapter->stats.doosync++;
3594 }
eebbbdba 3595
4ae196df
AD
3596 /* Check for a mailbox event */
3597 if (icr & E1000_ICR_VMMB)
3598 igb_msg_task(adapter);
3599
3600 if (icr & E1000_ICR_LSC) {
3601 hw->mac.get_link_status = 1;
3602 /* guard against interrupt when we're going down */
3603 if (!test_bit(__IGB_DOWN, &adapter->state))
3604 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3605 }
3606
3607 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
844290e5 3608 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
3609
3610 return IRQ_HANDLED;
3611}
3612
3613static irqreturn_t igb_msix_tx(int irq, void *data)
3614{
3615 struct igb_ring *tx_ring = data;
3616 struct igb_adapter *adapter = tx_ring->adapter;
3617 struct e1000_hw *hw = &adapter->hw;
3618
421e02f0 3619#ifdef CONFIG_IGB_DCA
7dfc16fa 3620 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3621 igb_update_tx_dca(tx_ring);
3622#endif
73cd78f1 3623
9d5c8243
AK
3624 tx_ring->total_bytes = 0;
3625 tx_ring->total_packets = 0;
661086df
PWJ
3626
3627 /* auto mask will automatically reenable the interrupt when we write
3628 * EICS */
3b644cf6 3629 if (!igb_clean_tx_irq(tx_ring))
9d5c8243
AK
3630 /* Ring was not completely cleaned, so fire another interrupt */
3631 wr32(E1000_EICS, tx_ring->eims_value);
661086df 3632 else
9d5c8243 3633 wr32(E1000_EIMS, tx_ring->eims_value);
661086df 3634
9d5c8243
AK
3635 return IRQ_HANDLED;
3636}
3637
6eb5a7f1
AD
3638static void igb_write_itr(struct igb_ring *ring)
3639{
3640 struct e1000_hw *hw = &ring->adapter->hw;
3641 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3642 switch (hw->mac.type) {
3643 case e1000_82576:
73cd78f1 3644 wr32(ring->itr_register, ring->itr_val |
6eb5a7f1
AD
3645 0x80000000);
3646 break;
3647 default:
73cd78f1 3648 wr32(ring->itr_register, ring->itr_val |
6eb5a7f1
AD
3649 (ring->itr_val << 16));
3650 break;
3651 }
3652 ring->set_itr = 0;
3653 }
3654}
3655
9d5c8243
AK
3656static irqreturn_t igb_msix_rx(int irq, void *data)
3657{
3658 struct igb_ring *rx_ring = data;
9d5c8243 3659
844290e5
PW
3660 /* Write the ITR value calculated at the end of the
3661 * previous interrupt.
3662 */
9d5c8243 3663
6eb5a7f1 3664 igb_write_itr(rx_ring);
9d5c8243 3665
288379f0
BH
3666 if (napi_schedule_prep(&rx_ring->napi))
3667 __napi_schedule(&rx_ring->napi);
844290e5 3668
421e02f0 3669#ifdef CONFIG_IGB_DCA
8d253320 3670 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3671 igb_update_rx_dca(rx_ring);
3672#endif
3673 return IRQ_HANDLED;
3674}
3675
421e02f0 3676#ifdef CONFIG_IGB_DCA
fe4506b6
JC
3677static void igb_update_rx_dca(struct igb_ring *rx_ring)
3678{
3679 u32 dca_rxctrl;
3680 struct igb_adapter *adapter = rx_ring->adapter;
3681 struct e1000_hw *hw = &adapter->hw;
3682 int cpu = get_cpu();
26bc19ec 3683 int q = rx_ring->reg_idx;
fe4506b6
JC
3684
3685 if (rx_ring->cpu != cpu) {
3686 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
2d064c06
AD
3687 if (hw->mac.type == e1000_82576) {
3688 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
92be7917 3689 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
2d064c06
AD
3690 E1000_DCA_RXCTRL_CPUID_SHIFT;
3691 } else {
3692 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 3693 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 3694 }
fe4506b6
JC
3695 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3696 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3697 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3698 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3699 rx_ring->cpu = cpu;
3700 }
3701 put_cpu();
3702}
3703
3704static void igb_update_tx_dca(struct igb_ring *tx_ring)
3705{
3706 u32 dca_txctrl;
3707 struct igb_adapter *adapter = tx_ring->adapter;
3708 struct e1000_hw *hw = &adapter->hw;
3709 int cpu = get_cpu();
26bc19ec 3710 int q = tx_ring->reg_idx;
fe4506b6
JC
3711
3712 if (tx_ring->cpu != cpu) {
3713 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
2d064c06
AD
3714 if (hw->mac.type == e1000_82576) {
3715 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
92be7917 3716 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
2d064c06
AD
3717 E1000_DCA_TXCTRL_CPUID_SHIFT;
3718 } else {
3719 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
92be7917 3720 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 3721 }
fe4506b6
JC
3722 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3723 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3724 tx_ring->cpu = cpu;
3725 }
3726 put_cpu();
3727}
3728
3729static void igb_setup_dca(struct igb_adapter *adapter)
3730{
3731 int i;
3732
7dfc16fa 3733 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
3734 return;
3735
3736 for (i = 0; i < adapter->num_tx_queues; i++) {
3737 adapter->tx_ring[i].cpu = -1;
3738 igb_update_tx_dca(&adapter->tx_ring[i]);
3739 }
3740 for (i = 0; i < adapter->num_rx_queues; i++) {
3741 adapter->rx_ring[i].cpu = -1;
3742 igb_update_rx_dca(&adapter->rx_ring[i]);
3743 }
3744}
3745
3746static int __igb_notify_dca(struct device *dev, void *data)
3747{
3748 struct net_device *netdev = dev_get_drvdata(dev);
3749 struct igb_adapter *adapter = netdev_priv(netdev);
3750 struct e1000_hw *hw = &adapter->hw;
3751 unsigned long event = *(unsigned long *)data;
3752
3753 switch (event) {
3754 case DCA_PROVIDER_ADD:
3755 /* if already enabled, don't do it again */
7dfc16fa 3756 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 3757 break;
fe4506b6
JC
3758 /* Always use CB2 mode, difference is masked
3759 * in the CB driver. */
cbd347ad 3760 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
fe4506b6 3761 if (dca_add_requester(dev) == 0) {
bbd98fe4 3762 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
3763 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3764 igb_setup_dca(adapter);
3765 break;
3766 }
3767 /* Fall Through since DCA is disabled. */
3768 case DCA_PROVIDER_REMOVE:
7dfc16fa 3769 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
3770 /* without this a class_device is left
3771 * hanging around in the sysfs model */
3772 dca_remove_requester(dev);
3773 dev_info(&adapter->pdev->dev, "DCA disabled\n");
7dfc16fa 3774 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 3775 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
3776 }
3777 break;
3778 }
bbd98fe4 3779
fe4506b6 3780 return 0;
9d5c8243
AK
3781}
3782
fe4506b6
JC
3783static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3784 void *p)
3785{
3786 int ret_val;
3787
3788 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3789 __igb_notify_dca);
3790
3791 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3792}
421e02f0 3793#endif /* CONFIG_IGB_DCA */
9d5c8243 3794
4ae196df
AD
3795static void igb_ping_all_vfs(struct igb_adapter *adapter)
3796{
3797 struct e1000_hw *hw = &adapter->hw;
3798 u32 ping;
3799 int i;
3800
3801 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3802 ping = E1000_PF_CONTROL_MSG;
3803 if (adapter->vf_data[i].clear_to_send)
3804 ping |= E1000_VT_MSGTYPE_CTS;
3805 igb_write_mbx(hw, &ping, 1, i);
3806 }
3807}
3808
3809static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3810 u32 *msgbuf, u32 vf)
3811{
3812 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3813 u16 *hash_list = (u16 *)&msgbuf[1];
3814 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3815 int i;
3816
3817 /* only up to 30 hash values supported */
3818 if (n > 30)
3819 n = 30;
3820
3821 /* salt away the number of multi cast addresses assigned
3822 * to this VF for later use to restore when the PF multi cast
3823 * list changes
3824 */
3825 vf_data->num_vf_mc_hashes = n;
3826
3827 /* VFs are limited to using the MTA hash table for their multicast
3828 * addresses */
3829 for (i = 0; i < n; i++)
3830 vf_data->vf_mc_hashes[i] = hash_list[i];;
3831
3832 /* Flush and reset the mta with the new values */
3833 igb_set_multi(adapter->netdev);
3834
3835 return 0;
3836}
3837
3838static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3839{
3840 struct e1000_hw *hw = &adapter->hw;
3841 struct vf_data_storage *vf_data;
3842 int i, j;
3843
3844 for (i = 0; i < adapter->vfs_allocated_count; i++) {
3845 vf_data = &adapter->vf_data[i];
75f4f382 3846 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4ae196df
AD
3847 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
3848 }
3849}
3850
3851static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
3852{
3853 struct e1000_hw *hw = &adapter->hw;
3854 u32 pool_mask, reg, vid;
3855 int i;
3856
3857 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3858
3859 /* Find the vlan filter for this id */
3860 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3861 reg = rd32(E1000_VLVF(i));
3862
3863 /* remove the vf from the pool */
3864 reg &= ~pool_mask;
3865
3866 /* if pool is empty then remove entry from vfta */
3867 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
3868 (reg & E1000_VLVF_VLANID_ENABLE)) {
3869 reg = 0;
3870 vid = reg & E1000_VLVF_VLANID_MASK;
3871 igb_vfta_set(hw, vid, false);
3872 }
3873
3874 wr32(E1000_VLVF(i), reg);
3875 }
3876}
3877
3878static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
3879{
3880 struct e1000_hw *hw = &adapter->hw;
3881 u32 reg, i;
3882
3883 /* It is an error to call this function when VFs are not enabled */
3884 if (!adapter->vfs_allocated_count)
3885 return -1;
3886
3887 /* Find the vlan filter for this id */
3888 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3889 reg = rd32(E1000_VLVF(i));
3890 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
3891 vid == (reg & E1000_VLVF_VLANID_MASK))
3892 break;
3893 }
3894
3895 if (add) {
3896 if (i == E1000_VLVF_ARRAY_SIZE) {
3897 /* Did not find a matching VLAN ID entry that was
3898 * enabled. Search for a free filter entry, i.e.
3899 * one without the enable bit set
3900 */
3901 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3902 reg = rd32(E1000_VLVF(i));
3903 if (!(reg & E1000_VLVF_VLANID_ENABLE))
3904 break;
3905 }
3906 }
3907 if (i < E1000_VLVF_ARRAY_SIZE) {
3908 /* Found an enabled/available entry */
3909 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3910
3911 /* if !enabled we need to set this up in vfta */
3912 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
cad6d05f
AD
3913 /* add VID to filter table, if bit already set
3914 * PF must have added it outside of table */
3915 if (igb_vfta_set(hw, vid, true))
3916 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
3917 adapter->vfs_allocated_count);
4ae196df
AD
3918 reg |= E1000_VLVF_VLANID_ENABLE;
3919 }
cad6d05f
AD
3920 reg &= ~E1000_VLVF_VLANID_MASK;
3921 reg |= vid;
4ae196df
AD
3922
3923 wr32(E1000_VLVF(i), reg);
3924 return 0;
3925 }
3926 } else {
3927 if (i < E1000_VLVF_ARRAY_SIZE) {
3928 /* remove vf from the pool */
3929 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
3930 /* if pool is empty then remove entry from vfta */
3931 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
3932 reg = 0;
3933 igb_vfta_set(hw, vid, false);
3934 }
3935 wr32(E1000_VLVF(i), reg);
3936 return 0;
3937 }
3938 }
3939 return -1;
3940}
3941
3942static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
3943{
3944 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3945 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
3946
3947 return igb_vlvf_set(adapter, vid, add, vf);
3948}
3949
3950static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
3951{
3952 struct e1000_hw *hw = &adapter->hw;
3953
3954 /* disable mailbox functionality for vf */
3955 adapter->vf_data[vf].clear_to_send = false;
3956
3957 /* reset offloads to defaults */
3958 igb_set_vmolr(hw, vf);
3959
3960 /* reset vlans for device */
3961 igb_clear_vf_vfta(adapter, vf);
3962
3963 /* reset multicast table array for vf */
3964 adapter->vf_data[vf].num_vf_mc_hashes = 0;
3965
3966 /* Flush and reset the mta with the new values */
3967 igb_set_multi(adapter->netdev);
3968}
3969
3970static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
3971{
3972 struct e1000_hw *hw = &adapter->hw;
3973 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
3974 u32 reg, msgbuf[3];
3975 u8 *addr = (u8 *)(&msgbuf[1]);
3976
3977 /* process all the same items cleared in a function level reset */
3978 igb_vf_reset_event(adapter, vf);
3979
3980 /* set vf mac address */
3981 igb_rar_set(hw, vf_mac, vf + 1);
3982 igb_set_rah_pool(hw, vf, vf + 1);
3983
3984 /* enable transmit and receive for vf */
3985 reg = rd32(E1000_VFTE);
3986 wr32(E1000_VFTE, reg | (1 << vf));
3987 reg = rd32(E1000_VFRE);
3988 wr32(E1000_VFRE, reg | (1 << vf));
3989
3990 /* enable mailbox functionality for vf */
3991 adapter->vf_data[vf].clear_to_send = true;
3992
3993 /* reply to reset with ack and vf mac address */
3994 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
3995 memcpy(addr, vf_mac, 6);
3996 igb_write_mbx(hw, msgbuf, 3, vf);
3997}
3998
3999static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4000{
4001 unsigned char *addr = (char *)&msg[1];
4002 int err = -1;
4003
4004 if (is_valid_ether_addr(addr))
4005 err = igb_set_vf_mac(adapter, vf, addr);
4006
4007 return err;
4008
4009}
4010
4011static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4012{
4013 struct e1000_hw *hw = &adapter->hw;
4014 u32 msg = E1000_VT_MSGTYPE_NACK;
4015
4016 /* if device isn't clear to send it shouldn't be reading either */
4017 if (!adapter->vf_data[vf].clear_to_send)
4018 igb_write_mbx(hw, &msg, 1, vf);
4019}
4020
4021
4022static void igb_msg_task(struct igb_adapter *adapter)
4023{
4024 struct e1000_hw *hw = &adapter->hw;
4025 u32 vf;
4026
4027 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4028 /* process any reset requests */
4029 if (!igb_check_for_rst(hw, vf)) {
4030 adapter->vf_data[vf].clear_to_send = false;
4031 igb_vf_reset_event(adapter, vf);
4032 }
4033
4034 /* process any messages pending */
4035 if (!igb_check_for_msg(hw, vf))
4036 igb_rcv_msg_from_vf(adapter, vf);
4037
4038 /* process any acks */
4039 if (!igb_check_for_ack(hw, vf))
4040 igb_rcv_ack_from_vf(adapter, vf);
4041
4042 }
4043}
4044
4045static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4046{
4047 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4048 u32 msgbuf[mbx_size];
4049 struct e1000_hw *hw = &adapter->hw;
4050 s32 retval;
4051
4052 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4053
4054 if (retval)
4055 dev_err(&adapter->pdev->dev,
4056 "Error receiving message from VF\n");
4057
4058 /* this is a message we already processed, do nothing */
4059 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4060 return retval;
4061
4062 /*
4063 * until the vf completes a reset it should not be
4064 * allowed to start any configuration.
4065 */
4066
4067 if (msgbuf[0] == E1000_VF_RESET) {
4068 igb_vf_reset_msg(adapter, vf);
4069
4070 return retval;
4071 }
4072
4073 if (!adapter->vf_data[vf].clear_to_send) {
4074 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4075 igb_write_mbx(hw, msgbuf, 1, vf);
4076 return retval;
4077 }
4078
4079 switch ((msgbuf[0] & 0xFFFF)) {
4080 case E1000_VF_SET_MAC_ADDR:
4081 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4082 break;
4083 case E1000_VF_SET_MULTICAST:
4084 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4085 break;
4086 case E1000_VF_SET_LPE:
4087 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4088 break;
4089 case E1000_VF_SET_VLAN:
4090 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4091 break;
4092 default:
4093 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4094 retval = -1;
4095 break;
4096 }
4097
4098 /* notify the VF of the results of what it sent us */
4099 if (retval)
4100 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4101 else
4102 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4103
4104 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4105
4106 igb_write_mbx(hw, msgbuf, 1, vf);
4107
4108 return retval;
4109}
4110
9d5c8243
AK
4111/**
4112 * igb_intr_msi - Interrupt Handler
4113 * @irq: interrupt number
4114 * @data: pointer to a network interface device structure
4115 **/
4116static irqreturn_t igb_intr_msi(int irq, void *data)
4117{
4118 struct net_device *netdev = data;
4119 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
4120 struct e1000_hw *hw = &adapter->hw;
4121 /* read ICR disables interrupts using IAM */
4122 u32 icr = rd32(E1000_ICR);
4123
6eb5a7f1 4124 igb_write_itr(adapter->rx_ring);
9d5c8243 4125
dda0e083
AD
4126 if(icr & E1000_ICR_DOUTSYNC) {
4127 /* HW is reporting DMA is out of sync */
4128 adapter->stats.doosync++;
4129 }
4130
9d5c8243
AK
4131 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4132 hw->mac.get_link_status = 1;
4133 if (!test_bit(__IGB_DOWN, &adapter->state))
4134 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4135 }
4136
288379f0 4137 napi_schedule(&adapter->rx_ring[0].napi);
9d5c8243
AK
4138
4139 return IRQ_HANDLED;
4140}
4141
4142/**
4a3c6433 4143 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
4144 * @irq: interrupt number
4145 * @data: pointer to a network interface device structure
4146 **/
4147static irqreturn_t igb_intr(int irq, void *data)
4148{
4149 struct net_device *netdev = data;
4150 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
4151 struct e1000_hw *hw = &adapter->hw;
4152 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4153 * need for the IMC write */
4154 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
4155 if (!icr)
4156 return IRQ_NONE; /* Not our interrupt */
4157
6eb5a7f1 4158 igb_write_itr(adapter->rx_ring);
9d5c8243
AK
4159
4160 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4161 * not set, then the adapter didn't send an interrupt */
4162 if (!(icr & E1000_ICR_INT_ASSERTED))
4163 return IRQ_NONE;
4164
dda0e083
AD
4165 if(icr & E1000_ICR_DOUTSYNC) {
4166 /* HW is reporting DMA is out of sync */
4167 adapter->stats.doosync++;
4168 }
4169
9d5c8243
AK
4170 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4171 hw->mac.get_link_status = 1;
4172 /* guard against interrupt when we're going down */
4173 if (!test_bit(__IGB_DOWN, &adapter->state))
4174 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4175 }
4176
288379f0 4177 napi_schedule(&adapter->rx_ring[0].napi);
9d5c8243
AK
4178
4179 return IRQ_HANDLED;
4180}
4181
46544258 4182static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
9d5c8243 4183{
661086df 4184 struct igb_adapter *adapter = rx_ring->adapter;
46544258 4185 struct e1000_hw *hw = &adapter->hw;
9d5c8243 4186
46544258
AD
4187 if (adapter->itr_setting & 3) {
4188 if (adapter->num_rx_queues == 1)
6eb5a7f1 4189 igb_set_itr(adapter);
46544258
AD
4190 else
4191 igb_update_ring_itr(rx_ring);
9d5c8243
AK
4192 }
4193
46544258
AD
4194 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4195 if (adapter->msix_entries)
4196 wr32(E1000_EIMS, rx_ring->eims_value);
4197 else
4198 igb_irq_enable(adapter);
4199 }
9d5c8243
AK
4200}
4201
46544258
AD
4202/**
4203 * igb_poll - NAPI Rx polling callback
4204 * @napi: napi polling structure
4205 * @budget: count of how many packets we should handle
4206 **/
4207static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243
AK
4208{
4209 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
9d5c8243
AK
4210 int work_done = 0;
4211
421e02f0 4212#ifdef CONFIG_IGB_DCA
bd38e5d1 4213 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
4214 igb_update_rx_dca(rx_ring);
4215#endif
3b644cf6 4216 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
9d5c8243 4217
46544258
AD
4218 if (rx_ring->buddy) {
4219#ifdef CONFIG_IGB_DCA
bd38e5d1 4220 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
46544258
AD
4221 igb_update_tx_dca(rx_ring->buddy);
4222#endif
4223 if (!igb_clean_tx_irq(rx_ring->buddy))
4224 work_done = budget;
4225 }
4226
9d5c8243 4227 /* If not enough Rx work done, exit the polling mode */
5e6d5b17 4228 if (work_done < budget) {
288379f0 4229 napi_complete(napi);
46544258 4230 igb_rx_irq_enable(rx_ring);
9d5c8243
AK
4231 }
4232
46544258 4233 return work_done;
9d5c8243 4234}
6d8126f9 4235
33af6bcc
PO
4236/**
4237 * igb_hwtstamp - utility function which checks for TX time stamp
4238 * @adapter: board private structure
4239 * @skb: packet that was just sent
4240 *
4241 * If we were asked to do hardware stamping and such a time stamp is
4242 * available, then it must have been for this skb here because we only
4243 * allow only one such packet into the queue.
4244 */
4245static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4246{
4247 union skb_shared_tx *shtx = skb_tx(skb);
4248 struct e1000_hw *hw = &adapter->hw;
4249
4250 if (unlikely(shtx->hardware)) {
4251 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4252 if (valid) {
4253 u64 regval = rd32(E1000_TXSTMPL);
4254 u64 ns;
4255 struct skb_shared_hwtstamps shhwtstamps;
4256
4257 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4258 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4259 ns = timecounter_cyc2time(&adapter->clock,
4260 regval);
4261 timecompare_update(&adapter->compare, ns);
4262 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4263 shhwtstamps.syststamp =
4264 timecompare_transform(&adapter->compare, ns);
4265 skb_tstamp_tx(skb, &shhwtstamps);
4266 }
33af6bcc
PO
4267 }
4268}
4269
9d5c8243
AK
4270/**
4271 * igb_clean_tx_irq - Reclaim resources after transmit completes
4272 * @adapter: board private structure
4273 * returns true if ring is completely cleaned
4274 **/
3b644cf6 4275static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
9d5c8243 4276{
3b644cf6 4277 struct igb_adapter *adapter = tx_ring->adapter;
3b644cf6 4278 struct net_device *netdev = adapter->netdev;
0e014cb1 4279 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
4280 struct igb_buffer *buffer_info;
4281 struct sk_buff *skb;
0e014cb1 4282 union e1000_adv_tx_desc *tx_desc, *eop_desc;
9d5c8243 4283 unsigned int total_bytes = 0, total_packets = 0;
0e014cb1
AD
4284 unsigned int i, eop, count = 0;
4285 bool cleaned = false;
9d5c8243 4286
9d5c8243 4287 i = tx_ring->next_to_clean;
0e014cb1
AD
4288 eop = tx_ring->buffer_info[i].next_to_watch;
4289 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4290
4291 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4292 (count < tx_ring->count)) {
4293 for (cleaned = false; !cleaned; count++) {
4294 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
9d5c8243 4295 buffer_info = &tx_ring->buffer_info[i];
0e014cb1 4296 cleaned = (i == eop);
9d5c8243
AK
4297 skb = buffer_info->skb;
4298
4299 if (skb) {
4300 unsigned int segs, bytecount;
4301 /* gso_segs is currently only valid for tcp */
4302 segs = skb_shinfo(skb)->gso_segs ?: 1;
4303 /* multiply data chunks by size of headers */
4304 bytecount = ((segs - 1) * skb_headlen(skb)) +
4305 skb->len;
4306 total_packets += segs;
4307 total_bytes += bytecount;
33af6bcc
PO
4308
4309 igb_tx_hwtstamp(adapter, skb);
9d5c8243
AK
4310 }
4311
4312 igb_unmap_and_free_tx_resource(adapter, buffer_info);
0e014cb1 4313 tx_desc->wb.status = 0;
9d5c8243
AK
4314
4315 i++;
4316 if (i == tx_ring->count)
4317 i = 0;
9d5c8243 4318 }
0e014cb1
AD
4319 eop = tx_ring->buffer_info[i].next_to_watch;
4320 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4321 }
4322
9d5c8243
AK
4323 tx_ring->next_to_clean = i;
4324
fc7d345d 4325 if (unlikely(count &&
9d5c8243 4326 netif_carrier_ok(netdev) &&
c493ea45 4327 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
9d5c8243
AK
4328 /* Make sure that anybody stopping the queue after this
4329 * sees the new next_to_clean.
4330 */
4331 smp_mb();
661086df
PWJ
4332 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4333 !(test_bit(__IGB_DOWN, &adapter->state))) {
4334 netif_wake_subqueue(netdev, tx_ring->queue_index);
4335 ++adapter->restart_queue;
4336 }
9d5c8243
AK
4337 }
4338
4339 if (tx_ring->detect_tx_hung) {
4340 /* Detect a transmit hang in hardware, this serializes the
4341 * check with the clearing of time_stamp and movement of i */
4342 tx_ring->detect_tx_hung = false;
4343 if (tx_ring->buffer_info[i].time_stamp &&
4344 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4345 (adapter->tx_timeout_factor * HZ))
4346 && !(rd32(E1000_STATUS) &
4347 E1000_STATUS_TXOFF)) {
4348
9d5c8243
AK
4349 /* detected Tx unit hang */
4350 dev_err(&adapter->pdev->dev,
4351 "Detected Tx Unit Hang\n"
2d064c06 4352 " Tx Queue <%d>\n"
9d5c8243
AK
4353 " TDH <%x>\n"
4354 " TDT <%x>\n"
4355 " next_to_use <%x>\n"
4356 " next_to_clean <%x>\n"
9d5c8243
AK
4357 "buffer_info[next_to_clean]\n"
4358 " time_stamp <%lx>\n"
0e014cb1 4359 " next_to_watch <%x>\n"
9d5c8243
AK
4360 " jiffies <%lx>\n"
4361 " desc.status <%x>\n",
2d064c06 4362 tx_ring->queue_index,
9d5c8243
AK
4363 readl(adapter->hw.hw_addr + tx_ring->head),
4364 readl(adapter->hw.hw_addr + tx_ring->tail),
4365 tx_ring->next_to_use,
4366 tx_ring->next_to_clean,
9d5c8243 4367 tx_ring->buffer_info[i].time_stamp,
0e014cb1 4368 eop,
9d5c8243 4369 jiffies,
0e014cb1 4370 eop_desc->wb.status);
661086df 4371 netif_stop_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
4372 }
4373 }
4374 tx_ring->total_bytes += total_bytes;
4375 tx_ring->total_packets += total_packets;
e21ed353
AD
4376 tx_ring->tx_stats.bytes += total_bytes;
4377 tx_ring->tx_stats.packets += total_packets;
9d5c8243
AK
4378 adapter->net_stats.tx_bytes += total_bytes;
4379 adapter->net_stats.tx_packets += total_packets;
0e014cb1 4380 return (count < tx_ring->count);
9d5c8243
AK
4381}
4382
9d5c8243
AK
4383/**
4384 * igb_receive_skb - helper function to handle rx indications
eebbbdba 4385 * @ring: pointer to receive ring receving this packet
9d5c8243 4386 * @status: descriptor status field as written by hardware
73cd78f1 4387 * @rx_desc: receive descriptor containing vlan and type information.
9d5c8243
AK
4388 * @skb: pointer to sk_buff to be indicated to stack
4389 **/
d3352520
AD
4390static void igb_receive_skb(struct igb_ring *ring, u8 status,
4391 union e1000_adv_rx_desc * rx_desc,
4392 struct sk_buff *skb)
4393{
4394 struct igb_adapter * adapter = ring->adapter;
4395 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
4396
0c8dfc83 4397 skb_record_rx_queue(skb, ring->queue_index);
5c0999b7 4398 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
d3352520 4399 if (vlan_extracted)
5c0999b7
HX
4400 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4401 le16_to_cpu(rx_desc->wb.upper.vlan),
4402 skb);
d3352520 4403 else
5c0999b7 4404 napi_gro_receive(&ring->napi, skb);
d3352520 4405 } else {
d3352520
AD
4406 if (vlan_extracted)
4407 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4408 le16_to_cpu(rx_desc->wb.upper.vlan));
4409 else
d3352520 4410 netif_receive_skb(skb);
d3352520 4411 }
9d5c8243
AK
4412}
4413
9d5c8243
AK
4414static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4415 u32 status_err, struct sk_buff *skb)
4416{
4417 skb->ip_summed = CHECKSUM_NONE;
4418
4419 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4420 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
4421 return;
4422 /* TCP/UDP checksum error bit is set */
4423 if (status_err &
4424 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4425 /* let the stack verify checksum errors */
4426 adapter->hw_csum_err++;
4427 return;
4428 }
4429 /* It must be a TCP or UDP packet with a valid checksum */
4430 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4431 skb->ip_summed = CHECKSUM_UNNECESSARY;
4432
4433 adapter->hw_csum_good++;
4434}
4435
3b644cf6
MW
4436static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4437 int *work_done, int budget)
9d5c8243 4438{
3b644cf6 4439 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 4440 struct net_device *netdev = adapter->netdev;
33af6bcc 4441 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
4442 struct pci_dev *pdev = adapter->pdev;
4443 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4444 struct igb_buffer *buffer_info , *next_buffer;
4445 struct sk_buff *skb;
9d5c8243
AK
4446 bool cleaned = false;
4447 int cleaned_count = 0;
4448 unsigned int total_bytes = 0, total_packets = 0;
73cd78f1
AD
4449 unsigned int i;
4450 u32 length, hlen, staterr;
9d5c8243
AK
4451
4452 i = rx_ring->next_to_clean;
69d3ca53 4453 buffer_info = &rx_ring->buffer_info[i];
9d5c8243
AK
4454 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4455 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4456
4457 while (staterr & E1000_RXD_STAT_DD) {
4458 if (*work_done >= budget)
4459 break;
4460 (*work_done)++;
9d5c8243 4461
69d3ca53
AD
4462 skb = buffer_info->skb;
4463 prefetch(skb->data - NET_IP_ALIGN);
4464 buffer_info->skb = NULL;
4465
4466 i++;
4467 if (i == rx_ring->count)
4468 i = 0;
4469 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4470 prefetch(next_rxd);
4471 next_buffer = &rx_ring->buffer_info[i];
9d5c8243
AK
4472
4473 length = le16_to_cpu(rx_desc->wb.upper.length);
4474 cleaned = true;
4475 cleaned_count++;
4476
bf36c1a0
AD
4477 if (!adapter->rx_ps_hdr_size) {
4478 pci_unmap_single(pdev, buffer_info->dma,
4479 adapter->rx_buffer_len +
4480 NET_IP_ALIGN,
4481 PCI_DMA_FROMDEVICE);
4482 skb_put(skb, length);
4483 goto send_up;
9d5c8243
AK
4484 }
4485
69d3ca53
AD
4486 /* HW will not DMA in data larger than the given buffer, even
4487 * if it parses the (NFS, of course) header to be larger. In
4488 * that case, it fills the header buffer and spills the rest
4489 * into the page.
4490 */
4491 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4492 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4493 if (hlen > adapter->rx_ps_hdr_size)
4494 hlen = adapter->rx_ps_hdr_size;
4495
bf36c1a0
AD
4496 if (!skb_shinfo(skb)->nr_frags) {
4497 pci_unmap_single(pdev, buffer_info->dma,
73cd78f1 4498 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
bf36c1a0
AD
4499 PCI_DMA_FROMDEVICE);
4500 skb_put(skb, hlen);
4501 }
4502
4503 if (length) {
9d5c8243 4504 pci_unmap_page(pdev, buffer_info->page_dma,
bf36c1a0 4505 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
9d5c8243 4506 buffer_info->page_dma = 0;
bf36c1a0
AD
4507
4508 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4509 buffer_info->page,
4510 buffer_info->page_offset,
4511 length);
4512
4513 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
4514 (page_count(buffer_info->page) != 1))
4515 buffer_info->page = NULL;
4516 else
4517 get_page(buffer_info->page);
9d5c8243
AK
4518
4519 skb->len += length;
4520 skb->data_len += length;
9d5c8243 4521
bf36c1a0 4522 skb->truesize += length;
9d5c8243 4523 }
9d5c8243 4524
bf36c1a0 4525 if (!(staterr & E1000_RXD_STAT_EOP)) {
b2d56536
AD
4526 buffer_info->skb = next_buffer->skb;
4527 buffer_info->dma = next_buffer->dma;
4528 next_buffer->skb = skb;
4529 next_buffer->dma = 0;
bf36c1a0
AD
4530 goto next_desc;
4531 }
69d3ca53 4532send_up:
33af6bcc
PO
4533 /*
4534 * If this bit is set, then the RX registers contain
4535 * the time stamp. No other packet will be time
4536 * stamped until we read these registers, so read the
4537 * registers to make them available again. Because
4538 * only one packet can be time stamped at a time, we
4539 * know that the register values must belong to this
4540 * one here and therefore we don't need to compare
4541 * any of the additional attributes stored for it.
4542 *
4543 * If nothing went wrong, then it should have a
4544 * skb_shared_tx that we can turn into a
4545 * skb_shared_hwtstamps.
4546 *
4547 * TODO: can time stamping be triggered (thus locking
4548 * the registers) without the packet reaching this point
4549 * here? In that case RX time stamping would get stuck.
4550 *
4551 * TODO: in "time stamp all packets" mode this bit is
4552 * not set. Need a global flag for this mode and then
4553 * always read the registers. Cannot be done without
4554 * a race condition.
4555 */
4556 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4557 u64 regval;
4558 u64 ns;
4559 struct skb_shared_hwtstamps *shhwtstamps =
4560 skb_hwtstamps(skb);
4561
4562 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4563 "igb: no RX time stamp available for time stamped packet");
4564 regval = rd32(E1000_RXSTMPL);
4565 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4566 ns = timecounter_cyc2time(&adapter->clock, regval);
4567 timecompare_update(&adapter->compare, ns);
4568 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4569 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4570 shhwtstamps->syststamp =
4571 timecompare_transform(&adapter->compare, ns);
4572 }
4573
9d5c8243
AK
4574 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4575 dev_kfree_skb_irq(skb);
4576 goto next_desc;
4577 }
9d5c8243
AK
4578
4579 total_bytes += skb->len;
4580 total_packets++;
4581
4582 igb_rx_checksum_adv(adapter, staterr, skb);
4583
4584 skb->protocol = eth_type_trans(skb, netdev);
4585
d3352520 4586 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
9d5c8243 4587
9d5c8243
AK
4588next_desc:
4589 rx_desc->wb.upper.status_error = 0;
4590
4591 /* return some buffers to hardware, one at a time is too slow */
4592 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3b644cf6 4593 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
4594 cleaned_count = 0;
4595 }
4596
4597 /* use prefetched values */
4598 rx_desc = next_rxd;
4599 buffer_info = next_buffer;
9d5c8243
AK
4600 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4601 }
bf36c1a0 4602
9d5c8243 4603 rx_ring->next_to_clean = i;
c493ea45 4604 cleaned_count = igb_desc_unused(rx_ring);
9d5c8243
AK
4605
4606 if (cleaned_count)
3b644cf6 4607 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
4608
4609 rx_ring->total_packets += total_packets;
4610 rx_ring->total_bytes += total_bytes;
4611 rx_ring->rx_stats.packets += total_packets;
4612 rx_ring->rx_stats.bytes += total_bytes;
4613 adapter->net_stats.rx_bytes += total_bytes;
4614 adapter->net_stats.rx_packets += total_packets;
4615 return cleaned;
4616}
4617
9d5c8243
AK
4618/**
4619 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4620 * @adapter: address of board private structure
4621 **/
3b644cf6 4622static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
9d5c8243
AK
4623 int cleaned_count)
4624{
3b644cf6 4625 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
4626 struct net_device *netdev = adapter->netdev;
4627 struct pci_dev *pdev = adapter->pdev;
4628 union e1000_adv_rx_desc *rx_desc;
4629 struct igb_buffer *buffer_info;
4630 struct sk_buff *skb;
4631 unsigned int i;
db761762 4632 int bufsz;
9d5c8243
AK
4633
4634 i = rx_ring->next_to_use;
4635 buffer_info = &rx_ring->buffer_info[i];
4636
db761762
AD
4637 if (adapter->rx_ps_hdr_size)
4638 bufsz = adapter->rx_ps_hdr_size;
4639 else
4640 bufsz = adapter->rx_buffer_len;
4641 bufsz += NET_IP_ALIGN;
4642
9d5c8243
AK
4643 while (cleaned_count--) {
4644 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4645
bf36c1a0 4646 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
9d5c8243 4647 if (!buffer_info->page) {
bf36c1a0
AD
4648 buffer_info->page = alloc_page(GFP_ATOMIC);
4649 if (!buffer_info->page) {
4650 adapter->alloc_rx_buff_failed++;
4651 goto no_buffers;
4652 }
4653 buffer_info->page_offset = 0;
4654 } else {
4655 buffer_info->page_offset ^= PAGE_SIZE / 2;
9d5c8243
AK
4656 }
4657 buffer_info->page_dma =
db761762 4658 pci_map_page(pdev, buffer_info->page,
bf36c1a0
AD
4659 buffer_info->page_offset,
4660 PAGE_SIZE / 2,
9d5c8243
AK
4661 PCI_DMA_FROMDEVICE);
4662 }
4663
4664 if (!buffer_info->skb) {
9d5c8243 4665 skb = netdev_alloc_skb(netdev, bufsz);
9d5c8243
AK
4666 if (!skb) {
4667 adapter->alloc_rx_buff_failed++;
4668 goto no_buffers;
4669 }
4670
4671 /* Make buffer alignment 2 beyond a 16 byte boundary
4672 * this will result in a 16 byte aligned IP header after
4673 * the 14 byte MAC header is removed
4674 */
4675 skb_reserve(skb, NET_IP_ALIGN);
4676
4677 buffer_info->skb = skb;
4678 buffer_info->dma = pci_map_single(pdev, skb->data,
4679 bufsz,
4680 PCI_DMA_FROMDEVICE);
9d5c8243
AK
4681 }
4682 /* Refresh the desc even if buffer_addrs didn't change because
4683 * each write-back erases this info. */
4684 if (adapter->rx_ps_hdr_size) {
4685 rx_desc->read.pkt_addr =
4686 cpu_to_le64(buffer_info->page_dma);
4687 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4688 } else {
4689 rx_desc->read.pkt_addr =
4690 cpu_to_le64(buffer_info->dma);
4691 rx_desc->read.hdr_addr = 0;
4692 }
4693
4694 i++;
4695 if (i == rx_ring->count)
4696 i = 0;
4697 buffer_info = &rx_ring->buffer_info[i];
4698 }
4699
4700no_buffers:
4701 if (rx_ring->next_to_use != i) {
4702 rx_ring->next_to_use = i;
4703 if (i == 0)
4704 i = (rx_ring->count - 1);
4705 else
4706 i--;
4707
4708 /* Force memory writes to complete before letting h/w
4709 * know there are new descriptors to fetch. (Only
4710 * applicable for weak-ordered memory model archs,
4711 * such as IA-64). */
4712 wmb();
4713 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4714 }
4715}
4716
4717/**
4718 * igb_mii_ioctl -
4719 * @netdev:
4720 * @ifreq:
4721 * @cmd:
4722 **/
4723static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4724{
4725 struct igb_adapter *adapter = netdev_priv(netdev);
4726 struct mii_ioctl_data *data = if_mii(ifr);
4727
4728 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4729 return -EOPNOTSUPP;
4730
4731 switch (cmd) {
4732 case SIOCGMIIPHY:
4733 data->phy_id = adapter->hw.phy.addr;
4734 break;
4735 case SIOCGMIIREG:
4736 if (!capable(CAP_NET_ADMIN))
4737 return -EPERM;
f5f4cf08
AD
4738 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4739 &data->val_out))
9d5c8243
AK
4740 return -EIO;
4741 break;
4742 case SIOCSMIIREG:
4743 default:
4744 return -EOPNOTSUPP;
4745 }
4746 return 0;
4747}
4748
c6cb090b
PO
4749/**
4750 * igb_hwtstamp_ioctl - control hardware time stamping
4751 * @netdev:
4752 * @ifreq:
4753 * @cmd:
4754 *
33af6bcc
PO
4755 * Outgoing time stamping can be enabled and disabled. Play nice and
4756 * disable it when requested, although it shouldn't case any overhead
4757 * when no packet needs it. At most one packet in the queue may be
4758 * marked for time stamping, otherwise it would be impossible to tell
4759 * for sure to which packet the hardware time stamp belongs.
4760 *
4761 * Incoming time stamping has to be configured via the hardware
4762 * filters. Not all combinations are supported, in particular event
4763 * type has to be specified. Matching the kind of event packet is
4764 * not supported, with the exception of "all V2 events regardless of
4765 * level 2 or 4".
4766 *
c6cb090b
PO
4767 **/
4768static int igb_hwtstamp_ioctl(struct net_device *netdev,
4769 struct ifreq *ifr, int cmd)
4770{
33af6bcc
PO
4771 struct igb_adapter *adapter = netdev_priv(netdev);
4772 struct e1000_hw *hw = &adapter->hw;
c6cb090b 4773 struct hwtstamp_config config;
33af6bcc
PO
4774 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4775 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
4776 u32 tsync_rx_ctl_type = 0;
4777 u32 tsync_rx_cfg = 0;
4778 int is_l4 = 0;
4779 int is_l2 = 0;
4780 short port = 319; /* PTP */
4781 u32 regval;
c6cb090b
PO
4782
4783 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4784 return -EFAULT;
4785
4786 /* reserved for future extensions */
4787 if (config.flags)
4788 return -EINVAL;
4789
33af6bcc
PO
4790 switch (config.tx_type) {
4791 case HWTSTAMP_TX_OFF:
4792 tsync_tx_ctl_bit = 0;
4793 break;
4794 case HWTSTAMP_TX_ON:
4795 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4796 break;
4797 default:
4798 return -ERANGE;
4799 }
4800
4801 switch (config.rx_filter) {
4802 case HWTSTAMP_FILTER_NONE:
4803 tsync_rx_ctl_bit = 0;
4804 break;
4805 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4806 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4807 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4808 case HWTSTAMP_FILTER_ALL:
4809 /*
4810 * register TSYNCRXCFG must be set, therefore it is not
4811 * possible to time stamp both Sync and Delay_Req messages
4812 * => fall back to time stamping all packets
4813 */
4814 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
4815 config.rx_filter = HWTSTAMP_FILTER_ALL;
4816 break;
4817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4818 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4819 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4820 is_l4 = 1;
4821 break;
4822 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4823 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4824 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4825 is_l4 = 1;
4826 break;
4827 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4828 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4829 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4830 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4831 is_l2 = 1;
4832 is_l4 = 1;
4833 config.rx_filter = HWTSTAMP_FILTER_SOME;
4834 break;
4835 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4836 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4837 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4838 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4839 is_l2 = 1;
4840 is_l4 = 1;
4841 config.rx_filter = HWTSTAMP_FILTER_SOME;
4842 break;
4843 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4844 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4845 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4846 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
4847 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4848 is_l2 = 1;
4849 break;
4850 default:
4851 return -ERANGE;
4852 }
4853
4854 /* enable/disable TX */
4855 regval = rd32(E1000_TSYNCTXCTL);
4856 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4857 wr32(E1000_TSYNCTXCTL, regval);
4858
4859 /* enable/disable RX, define which PTP packets are time stamped */
4860 regval = rd32(E1000_TSYNCRXCTL);
4861 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4862 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4863 wr32(E1000_TSYNCRXCTL, regval);
4864 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
4865
4866 /*
4867 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4868 * (Ethertype to filter on)
4869 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4870 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4871 */
4872 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
4873
4874 /* L4 Queue Filter[0]: only filter by source and destination port */
4875 wr32(E1000_SPQF0, htons(port));
4876 wr32(E1000_IMIREXT(0), is_l4 ?
4877 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4878 wr32(E1000_IMIR(0), is_l4 ?
4879 (htons(port)
4880 | (0<<16) /* immediate interrupt disabled */
4881 | 0 /* (1<<17) bit cleared: do not bypass
4882 destination port check */)
4883 : 0);
4884 wr32(E1000_FTQF0, is_l4 ?
4885 (0x11 /* UDP */
4886 | (1<<15) /* VF not compared */
4887 | (1<<27) /* Enable Timestamping */
4888 | (7<<28) /* only source port filter enabled,
4889 source/target address and protocol
4890 masked */)
4891 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4892 enabled */));
4893
4894 wrfl();
4895
4896 adapter->hwtstamp_config = config;
4897
4898 /* clear TX/RX time stamp registers, just to be sure */
4899 regval = rd32(E1000_TXSTMPH);
4900 regval = rd32(E1000_RXSTMPH);
c6cb090b 4901
33af6bcc
PO
4902 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
4903 -EFAULT : 0;
c6cb090b
PO
4904}
4905
9d5c8243
AK
4906/**
4907 * igb_ioctl -
4908 * @netdev:
4909 * @ifreq:
4910 * @cmd:
4911 **/
4912static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4913{
4914 switch (cmd) {
4915 case SIOCGMIIPHY:
4916 case SIOCGMIIREG:
4917 case SIOCSMIIREG:
4918 return igb_mii_ioctl(netdev, ifr, cmd);
c6cb090b
PO
4919 case SIOCSHWTSTAMP:
4920 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
9d5c8243
AK
4921 default:
4922 return -EOPNOTSUPP;
4923 }
4924}
4925
4926static void igb_vlan_rx_register(struct net_device *netdev,
4927 struct vlan_group *grp)
4928{
4929 struct igb_adapter *adapter = netdev_priv(netdev);
4930 struct e1000_hw *hw = &adapter->hw;
4931 u32 ctrl, rctl;
4932
4933 igb_irq_disable(adapter);
4934 adapter->vlgrp = grp;
4935
4936 if (grp) {
4937 /* enable VLAN tag insert/strip */
4938 ctrl = rd32(E1000_CTRL);
4939 ctrl |= E1000_CTRL_VME;
4940 wr32(E1000_CTRL, ctrl);
4941
4942 /* enable VLAN receive filtering */
4943 rctl = rd32(E1000_RCTL);
9d5c8243
AK
4944 rctl &= ~E1000_RCTL_CFIEN;
4945 wr32(E1000_RCTL, rctl);
4946 igb_update_mng_vlan(adapter);
9d5c8243
AK
4947 } else {
4948 /* disable VLAN tag insert/strip */
4949 ctrl = rd32(E1000_CTRL);
4950 ctrl &= ~E1000_CTRL_VME;
4951 wr32(E1000_CTRL, ctrl);
4952
9d5c8243
AK
4953 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4954 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4955 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4956 }
9d5c8243
AK
4957 }
4958
e1739522
AD
4959 igb_rlpml_set(adapter);
4960
9d5c8243
AK
4961 if (!test_bit(__IGB_DOWN, &adapter->state))
4962 igb_irq_enable(adapter);
4963}
4964
4965static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4966{
4967 struct igb_adapter *adapter = netdev_priv(netdev);
4968 struct e1000_hw *hw = &adapter->hw;
4ae196df 4969 int pf_id = adapter->vfs_allocated_count;
9d5c8243 4970
28b0759c 4971 if ((hw->mng_cookie.status &
9d5c8243
AK
4972 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4973 (vid == adapter->mng_vlan_id))
4974 return;
4ae196df
AD
4975
4976 /* add vid to vlvf if sr-iov is enabled,
4977 * if that fails add directly to filter table */
4978 if (igb_vlvf_set(adapter, vid, true, pf_id))
4979 igb_vfta_set(hw, vid, true);
4980
9d5c8243
AK
4981}
4982
4983static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4984{
4985 struct igb_adapter *adapter = netdev_priv(netdev);
4986 struct e1000_hw *hw = &adapter->hw;
4ae196df 4987 int pf_id = adapter->vfs_allocated_count;
9d5c8243
AK
4988
4989 igb_irq_disable(adapter);
4990 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4991
4992 if (!test_bit(__IGB_DOWN, &adapter->state))
4993 igb_irq_enable(adapter);
4994
4995 if ((adapter->hw.mng_cookie.status &
4996 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4997 (vid == adapter->mng_vlan_id)) {
4998 /* release control to f/w */
4999 igb_release_hw_control(adapter);
5000 return;
5001 }
5002
4ae196df
AD
5003 /* remove vid from vlvf if sr-iov is enabled,
5004 * if not in vlvf remove from vfta */
5005 if (igb_vlvf_set(adapter, vid, false, pf_id))
5006 igb_vfta_set(hw, vid, false);
9d5c8243
AK
5007}
5008
5009static void igb_restore_vlan(struct igb_adapter *adapter)
5010{
5011 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5012
5013 if (adapter->vlgrp) {
5014 u16 vid;
5015 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5016 if (!vlan_group_get_device(adapter->vlgrp, vid))
5017 continue;
5018 igb_vlan_rx_add_vid(adapter->netdev, vid);
5019 }
5020 }
5021}
5022
5023int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5024{
5025 struct e1000_mac_info *mac = &adapter->hw.mac;
5026
5027 mac->autoneg = 0;
5028
5029 /* Fiber NICs only allow 1000 gbps Full duplex */
5030 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
5031 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5032 dev_err(&adapter->pdev->dev,
5033 "Unsupported Speed/Duplex configuration\n");
5034 return -EINVAL;
5035 }
5036
5037 switch (spddplx) {
5038 case SPEED_10 + DUPLEX_HALF:
5039 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5040 break;
5041 case SPEED_10 + DUPLEX_FULL:
5042 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5043 break;
5044 case SPEED_100 + DUPLEX_HALF:
5045 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5046 break;
5047 case SPEED_100 + DUPLEX_FULL:
5048 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5049 break;
5050 case SPEED_1000 + DUPLEX_FULL:
5051 mac->autoneg = 1;
5052 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5053 break;
5054 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5055 default:
5056 dev_err(&adapter->pdev->dev,
5057 "Unsupported Speed/Duplex configuration\n");
5058 return -EINVAL;
5059 }
5060 return 0;
5061}
5062
3fe7c4c9 5063static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
9d5c8243
AK
5064{
5065 struct net_device *netdev = pci_get_drvdata(pdev);
5066 struct igb_adapter *adapter = netdev_priv(netdev);
5067 struct e1000_hw *hw = &adapter->hw;
2d064c06 5068 u32 ctrl, rctl, status;
9d5c8243
AK
5069 u32 wufc = adapter->wol;
5070#ifdef CONFIG_PM
5071 int retval = 0;
5072#endif
5073
5074 netif_device_detach(netdev);
5075
a88f10ec
AD
5076 if (netif_running(netdev))
5077 igb_close(netdev);
5078
5079 igb_reset_interrupt_capability(adapter);
5080
5081 igb_free_queues(adapter);
9d5c8243
AK
5082
5083#ifdef CONFIG_PM
5084 retval = pci_save_state(pdev);
5085 if (retval)
5086 return retval;
5087#endif
5088
5089 status = rd32(E1000_STATUS);
5090 if (status & E1000_STATUS_LU)
5091 wufc &= ~E1000_WUFC_LNKC;
5092
5093 if (wufc) {
5094 igb_setup_rctl(adapter);
5095 igb_set_multi(netdev);
5096
5097 /* turn on all-multi mode if wake on multicast is enabled */
5098 if (wufc & E1000_WUFC_MC) {
5099 rctl = rd32(E1000_RCTL);
5100 rctl |= E1000_RCTL_MPE;
5101 wr32(E1000_RCTL, rctl);
5102 }
5103
5104 ctrl = rd32(E1000_CTRL);
5105 /* advertise wake from D3Cold */
5106 #define E1000_CTRL_ADVD3WUC 0x00100000
5107 /* phy power management enable */
5108 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5109 ctrl |= E1000_CTRL_ADVD3WUC;
5110 wr32(E1000_CTRL, ctrl);
5111
9d5c8243
AK
5112 /* Allow time for pending master requests to run */
5113 igb_disable_pcie_master(&adapter->hw);
5114
5115 wr32(E1000_WUC, E1000_WUC_PME_EN);
5116 wr32(E1000_WUFC, wufc);
9d5c8243
AK
5117 } else {
5118 wr32(E1000_WUC, 0);
5119 wr32(E1000_WUFC, 0);
9d5c8243
AK
5120 }
5121
3fe7c4c9
RW
5122 *enable_wake = wufc || adapter->en_mng_pt;
5123 if (!*enable_wake)
2d064c06 5124 igb_shutdown_fiber_serdes_link_82575(hw);
9d5c8243
AK
5125
5126 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5127 * would have already happened in close and is redundant. */
5128 igb_release_hw_control(adapter);
5129
5130 pci_disable_device(pdev);
5131
9d5c8243
AK
5132 return 0;
5133}
5134
5135#ifdef CONFIG_PM
3fe7c4c9
RW
5136static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5137{
5138 int retval;
5139 bool wake;
5140
5141 retval = __igb_shutdown(pdev, &wake);
5142 if (retval)
5143 return retval;
5144
5145 if (wake) {
5146 pci_prepare_to_sleep(pdev);
5147 } else {
5148 pci_wake_from_d3(pdev, false);
5149 pci_set_power_state(pdev, PCI_D3hot);
5150 }
5151
5152 return 0;
5153}
5154
9d5c8243
AK
5155static int igb_resume(struct pci_dev *pdev)
5156{
5157 struct net_device *netdev = pci_get_drvdata(pdev);
5158 struct igb_adapter *adapter = netdev_priv(netdev);
5159 struct e1000_hw *hw = &adapter->hw;
5160 u32 err;
5161
5162 pci_set_power_state(pdev, PCI_D0);
5163 pci_restore_state(pdev);
42bfd33a 5164
aed5dec3 5165 err = pci_enable_device_mem(pdev);
9d5c8243
AK
5166 if (err) {
5167 dev_err(&pdev->dev,
5168 "igb: Cannot enable PCI device from suspend\n");
5169 return err;
5170 }
5171 pci_set_master(pdev);
5172
5173 pci_enable_wake(pdev, PCI_D3hot, 0);
5174 pci_enable_wake(pdev, PCI_D3cold, 0);
5175
a88f10ec
AD
5176 igb_set_interrupt_capability(adapter);
5177
5178 if (igb_alloc_queues(adapter)) {
5179 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5180 return -ENOMEM;
9d5c8243
AK
5181 }
5182
5183 /* e1000_power_up_phy(adapter); */
5184
5185 igb_reset(adapter);
a8564f03
AD
5186
5187 /* let the f/w know that the h/w is now under the control of the
5188 * driver. */
5189 igb_get_hw_control(adapter);
5190
9d5c8243
AK
5191 wr32(E1000_WUS, ~0);
5192
a88f10ec
AD
5193 if (netif_running(netdev)) {
5194 err = igb_open(netdev);
5195 if (err)
5196 return err;
5197 }
9d5c8243
AK
5198
5199 netif_device_attach(netdev);
5200
9d5c8243
AK
5201 return 0;
5202}
5203#endif
5204
5205static void igb_shutdown(struct pci_dev *pdev)
5206{
3fe7c4c9
RW
5207 bool wake;
5208
5209 __igb_shutdown(pdev, &wake);
5210
5211 if (system_state == SYSTEM_POWER_OFF) {
5212 pci_wake_from_d3(pdev, wake);
5213 pci_set_power_state(pdev, PCI_D3hot);
5214 }
9d5c8243
AK
5215}
5216
5217#ifdef CONFIG_NET_POLL_CONTROLLER
5218/*
5219 * Polling 'interrupt' - used by things like netconsole to send skbs
5220 * without having to re-enable interrupts. It's not called while
5221 * the interrupt routine is executing.
5222 */
5223static void igb_netpoll(struct net_device *netdev)
5224{
5225 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 5226 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5227 int i;
9d5c8243 5228
eebbbdba
AD
5229 if (!adapter->msix_entries) {
5230 igb_irq_disable(adapter);
5231 napi_schedule(&adapter->rx_ring[0].napi);
5232 return;
5233 }
9d5c8243 5234
eebbbdba
AD
5235 for (i = 0; i < adapter->num_tx_queues; i++) {
5236 struct igb_ring *tx_ring = &adapter->tx_ring[i];
5237 wr32(E1000_EIMC, tx_ring->eims_value);
5238 igb_clean_tx_irq(tx_ring);
5239 wr32(E1000_EIMS, tx_ring->eims_value);
5240 }
9d5c8243 5241
eebbbdba
AD
5242 for (i = 0; i < adapter->num_rx_queues; i++) {
5243 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5244 wr32(E1000_EIMC, rx_ring->eims_value);
5245 napi_schedule(&rx_ring->napi);
5246 }
9d5c8243
AK
5247}
5248#endif /* CONFIG_NET_POLL_CONTROLLER */
5249
5250/**
5251 * igb_io_error_detected - called when PCI error is detected
5252 * @pdev: Pointer to PCI device
5253 * @state: The current pci connection state
5254 *
5255 * This function is called after a PCI bus error affecting
5256 * this device has been detected.
5257 */
5258static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5259 pci_channel_state_t state)
5260{
5261 struct net_device *netdev = pci_get_drvdata(pdev);
5262 struct igb_adapter *adapter = netdev_priv(netdev);
5263
5264 netif_device_detach(netdev);
5265
5266 if (netif_running(netdev))
5267 igb_down(adapter);
5268 pci_disable_device(pdev);
5269
5270 /* Request a slot slot reset. */
5271 return PCI_ERS_RESULT_NEED_RESET;
5272}
5273
5274/**
5275 * igb_io_slot_reset - called after the pci bus has been reset.
5276 * @pdev: Pointer to PCI device
5277 *
5278 * Restart the card from scratch, as if from a cold-boot. Implementation
5279 * resembles the first-half of the igb_resume routine.
5280 */
5281static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5282{
5283 struct net_device *netdev = pci_get_drvdata(pdev);
5284 struct igb_adapter *adapter = netdev_priv(netdev);
5285 struct e1000_hw *hw = &adapter->hw;
40a914fa 5286 pci_ers_result_t result;
42bfd33a 5287 int err;
9d5c8243 5288
aed5dec3 5289 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
5290 dev_err(&pdev->dev,
5291 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
5292 result = PCI_ERS_RESULT_DISCONNECT;
5293 } else {
5294 pci_set_master(pdev);
5295 pci_restore_state(pdev);
9d5c8243 5296
40a914fa
AD
5297 pci_enable_wake(pdev, PCI_D3hot, 0);
5298 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 5299
40a914fa
AD
5300 igb_reset(adapter);
5301 wr32(E1000_WUS, ~0);
5302 result = PCI_ERS_RESULT_RECOVERED;
5303 }
9d5c8243 5304
ea943d41
JK
5305 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5306 if (err) {
5307 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5308 "failed 0x%0x\n", err);
5309 /* non-fatal, continue */
5310 }
40a914fa
AD
5311
5312 return result;
9d5c8243
AK
5313}
5314
5315/**
5316 * igb_io_resume - called when traffic can start flowing again.
5317 * @pdev: Pointer to PCI device
5318 *
5319 * This callback is called when the error recovery driver tells us that
5320 * its OK to resume normal operation. Implementation resembles the
5321 * second-half of the igb_resume routine.
5322 */
5323static void igb_io_resume(struct pci_dev *pdev)
5324{
5325 struct net_device *netdev = pci_get_drvdata(pdev);
5326 struct igb_adapter *adapter = netdev_priv(netdev);
5327
9d5c8243
AK
5328 if (netif_running(netdev)) {
5329 if (igb_up(adapter)) {
5330 dev_err(&pdev->dev, "igb_up failed after reset\n");
5331 return;
5332 }
5333 }
5334
5335 netif_device_attach(netdev);
5336
5337 /* let the f/w know that the h/w is now under the control of the
5338 * driver. */
5339 igb_get_hw_control(adapter);
9d5c8243
AK
5340}
5341
e1739522
AD
5342static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5343{
5344 u32 reg_data;
5345
5346 reg_data = rd32(E1000_VMOLR(vfn));
5347 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5348 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5349 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5350 E1000_VMOLR_AUPE | /* Accept untagged packets */
5351 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5352 wr32(E1000_VMOLR(vfn), reg_data);
5353}
5354
4ae196df
AD
5355static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5356 int vfn)
e1739522
AD
5357{
5358 struct e1000_hw *hw = &adapter->hw;
5359 u32 vmolr;
5360
5361 vmolr = rd32(E1000_VMOLR(vfn));
5362 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5363 vmolr |= size | E1000_VMOLR_LPE;
5364 wr32(E1000_VMOLR(vfn), vmolr);
4ae196df
AD
5365
5366 return 0;
e1739522
AD
5367}
5368
5369static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5370{
5371 u32 reg_data;
5372
5373 reg_data = rd32(E1000_RAH(entry));
5374 reg_data &= ~E1000_RAH_POOL_MASK;
5375 reg_data |= E1000_RAH_POOL_1 << pool;;
5376 wr32(E1000_RAH(entry), reg_data);
5377}
5378
5379static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5380 int entry_count, u16 total_rar_filters)
5381{
5382 struct e1000_hw *hw = &adapter->hw;
5383 int i = adapter->vfs_allocated_count + 1;
5384
5385 if ((i + entry_count) < total_rar_filters)
5386 total_rar_filters = i + entry_count;
5387
5388 for (; i < total_rar_filters; i++)
5389 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
5390}
5391
4ae196df
AD
5392static int igb_set_vf_mac(struct igb_adapter *adapter,
5393 int vf, unsigned char *mac_addr)
5394{
5395 struct e1000_hw *hw = &adapter->hw;
5396 int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
5397
5398 igb_rar_set(hw, mac_addr, rar_entry);
5399
37680117 5400 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df
AD
5401
5402 igb_set_rah_pool(hw, vf, rar_entry);
5403
5404 return 0;
5405}
5406
5407static void igb_vmm_control(struct igb_adapter *adapter)
5408{
5409 struct e1000_hw *hw = &adapter->hw;
5410 u32 reg_data;
5411
5412 if (!adapter->vfs_allocated_count)
5413 return;
5414
5415 /* VF's need PF reset indication before they
5416 * can send/receive mail */
5417 reg_data = rd32(E1000_CTRL_EXT);
5418 reg_data |= E1000_CTRL_EXT_PFRSTD;
5419 wr32(E1000_CTRL_EXT, reg_data);
5420
5421 igb_vmdq_set_loopback_pf(hw, true);
5422 igb_vmdq_set_replication_pf(hw, true);
5423}
5424
37680117
AD
5425#ifdef CONFIG_PCI_IOV
5426static ssize_t igb_show_num_vfs(struct device *dev,
5427 struct device_attribute *attr, char *buf)
5428{
5429 struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
5430
5431 return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
5432}
5433
5434static ssize_t igb_set_num_vfs(struct device *dev,
5435 struct device_attribute *attr,
5436 const char *buf, size_t count)
5437{
5438 struct net_device *netdev = to_net_dev(dev);
5439 struct igb_adapter *adapter = netdev_priv(netdev);
5440 struct e1000_hw *hw = &adapter->hw;
5441 struct pci_dev *pdev = adapter->pdev;
5442 unsigned int num_vfs, i;
5443 unsigned char mac_addr[ETH_ALEN];
5444 int err;
5445
5446 sscanf(buf, "%u", &num_vfs);
5447
5448 if (num_vfs > 7)
5449 num_vfs = 7;
5450
5451 /* value unchanged do nothing */
5452 if (num_vfs == adapter->vfs_allocated_count)
5453 return count;
5454
5455 if (netdev->flags & IFF_UP)
5456 igb_close(netdev);
5457
5458 igb_reset_interrupt_capability(adapter);
5459 igb_free_queues(adapter);
5460 adapter->tx_ring = NULL;
5461 adapter->rx_ring = NULL;
5462 adapter->vfs_allocated_count = 0;
5463
5464 /* reclaim resources allocated to VFs since we are changing count */
5465 if (adapter->vf_data) {
5466 /* disable iov and allow time for transactions to clear */
5467 pci_disable_sriov(pdev);
5468 msleep(500);
5469
5470 kfree(adapter->vf_data);
5471 adapter->vf_data = NULL;
5472 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
5473 msleep(100);
5474 dev_info(&pdev->dev, "IOV Disabled\n");
5475 }
5476
5477 if (num_vfs) {
5478 adapter->vf_data = kcalloc(num_vfs,
5479 sizeof(struct vf_data_storage),
5480 GFP_KERNEL);
5481 if (!adapter->vf_data) {
5482 dev_err(&pdev->dev, "Could not allocate VF private "
5483 "data - IOV enable failed\n");
5484 } else {
5485 err = pci_enable_sriov(pdev, num_vfs);
5486 if (!err) {
5487 adapter->vfs_allocated_count = num_vfs;
5488 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
5489 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5490 random_ether_addr(mac_addr);
5491 igb_set_vf_mac(adapter, i, mac_addr);
5492 }
5493 } else {
5494 kfree(adapter->vf_data);
5495 adapter->vf_data = NULL;
5496 }
5497 }
5498 }
5499
5500 igb_set_interrupt_capability(adapter);
5501 igb_alloc_queues(adapter);
5502 igb_reset(adapter);
5503
5504 if (netdev->flags & IFF_UP)
5505 igb_open(netdev);
5506
5507 return count;
5508}
5509#endif /* CONFIG_PCI_IOV */
9d5c8243 5510/* igb_main.c */