1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/mii.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos_params.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
55 #define DRV_EXTRAVERSION "-k2"
57 #define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
58 char e1000e_driver_name[] = "e1000e";
59 const char e1000e_driver_version[] = DRV_VERSION;
61 static const struct e1000_info *e1000_info_tbl[] = {
62 [board_82571] = &e1000_82571_info,
63 [board_82572] = &e1000_82572_info,
64 [board_82573] = &e1000_82573_info,
65 [board_82574] = &e1000_82574_info,
66 [board_82583] = &e1000_82583_info,
67 [board_80003es2lan] = &e1000_es2_info,
68 [board_ich8lan] = &e1000_ich8_info,
69 [board_ich9lan] = &e1000_ich9_info,
70 [board_ich10lan] = &e1000_ich10_info,
71 [board_pchlan] = &e1000_pch_info,
72 [board_pch2lan] = &e1000_pch2_info,
75 struct e1000_reg_info {
80 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
81 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
82 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
83 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
84 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
86 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
87 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
88 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
89 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
90 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
92 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
94 /* General Registers */
96 {E1000_STATUS, "STATUS"},
97 {E1000_CTRL_EXT, "CTRL_EXT"},
99 /* Interrupt Registers */
103 {E1000_RCTL, "RCTL"},
104 {E1000_RDLEN, "RDLEN"},
107 {E1000_RDTR, "RDTR"},
108 {E1000_RXDCTL(0), "RXDCTL"},
110 {E1000_RDBAL, "RDBAL"},
111 {E1000_RDBAH, "RDBAH"},
112 {E1000_RDFH, "RDFH"},
113 {E1000_RDFT, "RDFT"},
114 {E1000_RDFHS, "RDFHS"},
115 {E1000_RDFTS, "RDFTS"},
116 {E1000_RDFPC, "RDFPC"},
119 {E1000_TCTL, "TCTL"},
120 {E1000_TDBAL, "TDBAL"},
121 {E1000_TDBAH, "TDBAH"},
122 {E1000_TDLEN, "TDLEN"},
125 {E1000_TIDV, "TIDV"},
126 {E1000_TXDCTL(0), "TXDCTL"},
127 {E1000_TADV, "TADV"},
128 {E1000_TARC(0), "TARC"},
129 {E1000_TDFH, "TDFH"},
130 {E1000_TDFT, "TDFT"},
131 {E1000_TDFHS, "TDFHS"},
132 {E1000_TDFTS, "TDFTS"},
133 {E1000_TDFPC, "TDFPC"},
135 /* List Terminator */
140 * e1000_regdump - register printout routine
142 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
148 switch (reginfo->ofs) {
149 case E1000_RXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_RXDCTL(n));
153 case E1000_TXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TXDCTL(n));
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TARC(n));
162 printk(KERN_INFO "%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs));
167 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
168 printk(KERN_INFO "%-15s ", rname);
169 for (n = 0; n < 2; n++)
170 printk(KERN_CONT "%08x ", regs[n]);
171 printk(KERN_CONT "\n");
176 * e1000e_dump - Print registers, tx-ring and rx-ring
178 static void e1000e_dump(struct e1000_adapter *adapter)
180 struct net_device *netdev = adapter->netdev;
181 struct e1000_hw *hw = &adapter->hw;
182 struct e1000_reg_info *reginfo;
183 struct e1000_ring *tx_ring = adapter->tx_ring;
184 struct e1000_tx_desc *tx_desc;
185 struct my_u0 { u64 a; u64 b; } *u0;
186 struct e1000_buffer *buffer_info;
187 struct e1000_ring *rx_ring = adapter->rx_ring;
188 union e1000_rx_desc_packet_split *rx_desc_ps;
189 struct e1000_rx_desc *rx_desc;
190 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
194 if (!netif_msg_hw(adapter))
197 /* Print netdevice Info */
199 dev_info(&adapter->pdev->dev, "Net device Info\n");
200 printk(KERN_INFO "Device Name state "
201 "trans_start last_rx\n");
202 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
209 /* Print Registers */
210 dev_info(&adapter->pdev->dev, "Register Dump\n");
211 printk(KERN_INFO " Register Name Value\n");
212 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
213 reginfo->name; reginfo++) {
214 e1000_regdump(hw, reginfo);
217 /* Print TX Ring Summary */
218 if (!netdev || !netif_running(netdev))
221 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
222 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
223 " leng ntw timestamp\n");
224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
226 0, tx_ring->next_to_use, tx_ring->next_to_clean,
227 (unsigned long long)buffer_info->dma,
229 buffer_info->next_to_watch,
230 (unsigned long long)buffer_info->time_stamp);
233 if (!netif_msg_tx_done(adapter))
234 goto rx_ring_summary;
236 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
238 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
240 * Legacy Transmit Descriptor
241 * +--------------------------------------------------------------+
242 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
243 * +--------------------------------------------------------------+
244 * 8 | Special | CSS | Status | CMD | CSO | Length |
245 * +--------------------------------------------------------------+
246 * 63 48 47 36 35 32 31 24 23 16 15 0
248 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
249 * 63 48 47 40 39 32 31 16 15 8 7 0
250 * +----------------------------------------------------------------+
251 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
252 * +----------------------------------------------------------------+
253 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
254 * +----------------------------------------------------------------+
255 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
257 * Extended Data Descriptor (DTYP=0x1)
258 * +----------------------------------------------------------------+
259 * 0 | Buffer Address [63:0] |
260 * +----------------------------------------------------------------+
261 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
262 * +----------------------------------------------------------------+
263 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
265 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Legacy format\n");
268 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Context format\n");
271 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
272 " [bi->dma ] leng ntw timestamp bi->skb "
273 "<-- Ext Data format\n");
274 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
275 tx_desc = E1000_TX_DESC(*tx_ring, i);
276 buffer_info = &tx_ring->buffer_info[i];
277 u0 = (struct my_u0 *)tx_desc;
278 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
279 "%04X %3X %016llX %p",
280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
282 (unsigned long long)le64_to_cpu(u0->a),
283 (unsigned long long)le64_to_cpu(u0->b),
284 (unsigned long long)buffer_info->dma,
285 buffer_info->length, buffer_info->next_to_watch,
286 (unsigned long long)buffer_info->time_stamp,
288 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
289 printk(KERN_CONT " NTC/U\n");
290 else if (i == tx_ring->next_to_use)
291 printk(KERN_CONT " NTU\n");
292 else if (i == tx_ring->next_to_clean)
293 printk(KERN_CONT " NTC\n");
295 printk(KERN_CONT "\n");
297 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
298 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
299 16, 1, phys_to_virt(buffer_info->dma),
300 buffer_info->length, true);
303 /* Print RX Rings Summary */
305 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
306 printk(KERN_INFO "Queue [NTU] [NTC]\n");
307 printk(KERN_INFO " %5d %5X %5X\n", 0,
308 rx_ring->next_to_use, rx_ring->next_to_clean);
311 if (!netif_msg_rx_status(adapter))
314 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
315 switch (adapter->rx_ps_pages) {
319 /* [Extended] Packet Split Receive Descriptor Format
321 * +-----------------------------------------------------+
322 * 0 | Buffer Address 0 [63:0] |
323 * +-----------------------------------------------------+
324 * 8 | Buffer Address 1 [63:0] |
325 * +-----------------------------------------------------+
326 * 16 | Buffer Address 2 [63:0] |
327 * +-----------------------------------------------------+
328 * 24 | Buffer Address 3 [63:0] |
329 * +-----------------------------------------------------+
331 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
333 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
334 "[bi->skb] <-- Ext Pkt Split format\n");
335 /* [Extended] Receive Descriptor (Write-Back) Format
337 * 63 48 47 32 31 13 12 8 7 4 3 0
338 * +------------------------------------------------------+
339 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
340 * | Checksum | Ident | | Queue | | Type |
341 * +------------------------------------------------------+
342 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
343 * +------------------------------------------------------+
344 * 63 48 47 32 31 20 19 0
346 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
348 "[ l3 l2 l1 hs] [reserved ] ---------------- "
349 "[bi->skb] <-- Ext Rx Write-Back format\n");
350 for (i = 0; i < rx_ring->count; i++) {
351 buffer_info = &rx_ring->buffer_info[i];
352 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
353 u1 = (struct my_u1 *)rx_desc_ps;
355 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
356 if (staterr & E1000_RXD_STAT_DD) {
357 /* Descriptor Done */
358 printk(KERN_INFO "RWB[0x%03X] %016llX "
359 "%016llX %016llX %016llX "
360 "---------------- %p", i,
361 (unsigned long long)le64_to_cpu(u1->a),
362 (unsigned long long)le64_to_cpu(u1->b),
363 (unsigned long long)le64_to_cpu(u1->c),
364 (unsigned long long)le64_to_cpu(u1->d),
367 printk(KERN_INFO "R [0x%03X] %016llX "
368 "%016llX %016llX %016llX %016llX %p", i,
369 (unsigned long long)le64_to_cpu(u1->a),
370 (unsigned long long)le64_to_cpu(u1->b),
371 (unsigned long long)le64_to_cpu(u1->c),
372 (unsigned long long)le64_to_cpu(u1->d),
373 (unsigned long long)buffer_info->dma,
376 if (netif_msg_pktdata(adapter))
377 print_hex_dump(KERN_INFO, "",
378 DUMP_PREFIX_ADDRESS, 16, 1,
379 phys_to_virt(buffer_info->dma),
380 adapter->rx_ps_bsize0, true);
383 if (i == rx_ring->next_to_use)
384 printk(KERN_CONT " NTU\n");
385 else if (i == rx_ring->next_to_clean)
386 printk(KERN_CONT " NTC\n");
388 printk(KERN_CONT "\n");
393 /* Legacy Receive Descriptor Format
395 * +-----------------------------------------------------+
396 * | Buffer Address [63:0] |
397 * +-----------------------------------------------------+
398 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
399 * +-----------------------------------------------------+
400 * 63 48 47 40 39 32 31 16 15 0
402 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
403 "[vl er S cks ln] [bi->dma ] [bi->skb] "
404 "<-- Legacy format\n");
405 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
406 rx_desc = E1000_RX_DESC(*rx_ring, i);
407 buffer_info = &rx_ring->buffer_info[i];
408 u0 = (struct my_u0 *)rx_desc;
409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
411 (unsigned long long)le64_to_cpu(u0->a),
412 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma,
415 if (i == rx_ring->next_to_use)
416 printk(KERN_CONT " NTU\n");
417 else if (i == rx_ring->next_to_clean)
418 printk(KERN_CONT " NTC\n");
420 printk(KERN_CONT "\n");
422 if (netif_msg_pktdata(adapter))
423 print_hex_dump(KERN_INFO, "",
425 16, 1, phys_to_virt(buffer_info->dma),
426 adapter->rx_buffer_len, true);
435 * e1000_desc_unused - calculate if we have unused descriptors
437 static int e1000_desc_unused(struct e1000_ring *ring)
439 if (ring->next_to_clean > ring->next_to_use)
440 return ring->next_to_clean - ring->next_to_use - 1;
442 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
446 * e1000_receive_skb - helper function to handle Rx indications
447 * @adapter: board private structure
448 * @status: descriptor status field as written by hardware
449 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
450 * @skb: pointer to sk_buff to be indicated to stack
452 static void e1000_receive_skb(struct e1000_adapter *adapter,
453 struct net_device *netdev,
455 u8 status, __le16 vlan)
457 skb->protocol = eth_type_trans(skb, netdev);
459 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
460 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
461 le16_to_cpu(vlan), skb);
463 napi_gro_receive(&adapter->napi, skb);
467 * e1000_rx_checksum - Receive Checksum Offload for 82543
468 * @adapter: board private structure
469 * @status_err: receive descriptor status and error fields
470 * @csum: receive descriptor csum field
471 * @sk_buff: socket buffer with received data
473 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
474 u32 csum, struct sk_buff *skb)
476 u16 status = (u16)status_err;
477 u8 errors = (u8)(status_err >> 24);
478 skb->ip_summed = CHECKSUM_NONE;
480 /* Ignore Checksum bit is set */
481 if (status & E1000_RXD_STAT_IXSM)
483 /* TCP/UDP checksum error bit is set */
484 if (errors & E1000_RXD_ERR_TCPE) {
485 /* let the stack verify checksum errors */
486 adapter->hw_csum_err++;
490 /* TCP/UDP Checksum has not been calculated */
491 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
494 /* It must be a TCP or UDP packet with a valid checksum */
495 if (status & E1000_RXD_STAT_TCPCS) {
496 /* TCP checksum is good */
497 skb->ip_summed = CHECKSUM_UNNECESSARY;
500 * IP fragment with UDP payload
501 * Hardware complements the payload checksum, so we undo it
502 * and then put the value in host order for further stack use.
504 __sum16 sum = (__force __sum16)htons(csum);
505 skb->csum = csum_unfold(~sum);
506 skb->ip_summed = CHECKSUM_COMPLETE;
508 adapter->hw_csum_good++;
512 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
513 * @adapter: address of board private structure
515 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
518 struct net_device *netdev = adapter->netdev;
519 struct pci_dev *pdev = adapter->pdev;
520 struct e1000_ring *rx_ring = adapter->rx_ring;
521 struct e1000_rx_desc *rx_desc;
522 struct e1000_buffer *buffer_info;
525 unsigned int bufsz = adapter->rx_buffer_len;
527 i = rx_ring->next_to_use;
528 buffer_info = &rx_ring->buffer_info[i];
530 while (cleaned_count--) {
531 skb = buffer_info->skb;
537 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
539 /* Better luck next round */
540 adapter->alloc_rx_buff_failed++;
544 buffer_info->skb = skb;
546 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
547 adapter->rx_buffer_len,
549 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
550 dev_err(&pdev->dev, "RX DMA map failed\n");
551 adapter->rx_dma_failed++;
555 rx_desc = E1000_RX_DESC(*rx_ring, i);
556 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
558 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
560 * Force memory writes to complete before letting h/w
561 * know there are new descriptors to fetch. (Only
562 * applicable for weak-ordered memory model archs,
566 writel(i, adapter->hw.hw_addr + rx_ring->tail);
569 if (i == rx_ring->count)
571 buffer_info = &rx_ring->buffer_info[i];
574 rx_ring->next_to_use = i;
578 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
579 * @adapter: address of board private structure
581 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
584 struct net_device *netdev = adapter->netdev;
585 struct pci_dev *pdev = adapter->pdev;
586 union e1000_rx_desc_packet_split *rx_desc;
587 struct e1000_ring *rx_ring = adapter->rx_ring;
588 struct e1000_buffer *buffer_info;
589 struct e1000_ps_page *ps_page;
593 i = rx_ring->next_to_use;
594 buffer_info = &rx_ring->buffer_info[i];
596 while (cleaned_count--) {
597 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
599 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
600 ps_page = &buffer_info->ps_pages[j];
601 if (j >= adapter->rx_ps_pages) {
602 /* all unused desc entries get hw null ptr */
603 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
606 if (!ps_page->page) {
607 ps_page->page = alloc_page(GFP_ATOMIC);
608 if (!ps_page->page) {
609 adapter->alloc_rx_buff_failed++;
612 ps_page->dma = dma_map_page(&pdev->dev,
616 if (dma_mapping_error(&pdev->dev,
618 dev_err(&adapter->pdev->dev,
619 "RX DMA page map failed\n");
620 adapter->rx_dma_failed++;
625 * Refresh the desc even if buffer_addrs
626 * didn't change because each write-back
629 rx_desc->read.buffer_addr[j+1] =
630 cpu_to_le64(ps_page->dma);
633 skb = netdev_alloc_skb_ip_align(netdev,
634 adapter->rx_ps_bsize0);
637 adapter->alloc_rx_buff_failed++;
641 buffer_info->skb = skb;
642 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
643 adapter->rx_ps_bsize0,
645 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
646 dev_err(&pdev->dev, "RX DMA map failed\n");
647 adapter->rx_dma_failed++;
649 dev_kfree_skb_any(skb);
650 buffer_info->skb = NULL;
654 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
656 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
658 * Force memory writes to complete before letting h/w
659 * know there are new descriptors to fetch. (Only
660 * applicable for weak-ordered memory model archs,
664 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
668 if (i == rx_ring->count)
670 buffer_info = &rx_ring->buffer_info[i];
674 rx_ring->next_to_use = i;
678 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
679 * @adapter: address of board private structure
680 * @cleaned_count: number of buffers to allocate this pass
683 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
686 struct net_device *netdev = adapter->netdev;
687 struct pci_dev *pdev = adapter->pdev;
688 struct e1000_rx_desc *rx_desc;
689 struct e1000_ring *rx_ring = adapter->rx_ring;
690 struct e1000_buffer *buffer_info;
693 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
695 i = rx_ring->next_to_use;
696 buffer_info = &rx_ring->buffer_info[i];
698 while (cleaned_count--) {
699 skb = buffer_info->skb;
705 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
706 if (unlikely(!skb)) {
707 /* Better luck next round */
708 adapter->alloc_rx_buff_failed++;
712 buffer_info->skb = skb;
714 /* allocate a new page if necessary */
715 if (!buffer_info->page) {
716 buffer_info->page = alloc_page(GFP_ATOMIC);
717 if (unlikely(!buffer_info->page)) {
718 adapter->alloc_rx_buff_failed++;
723 if (!buffer_info->dma)
724 buffer_info->dma = dma_map_page(&pdev->dev,
725 buffer_info->page, 0,
729 rx_desc = E1000_RX_DESC(*rx_ring, i);
730 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
732 if (unlikely(++i == rx_ring->count))
734 buffer_info = &rx_ring->buffer_info[i];
737 if (likely(rx_ring->next_to_use != i)) {
738 rx_ring->next_to_use = i;
739 if (unlikely(i-- == 0))
740 i = (rx_ring->count - 1);
742 /* Force memory writes to complete before letting h/w
743 * know there are new descriptors to fetch. (Only
744 * applicable for weak-ordered memory model archs,
747 writel(i, adapter->hw.hw_addr + rx_ring->tail);
752 * e1000_clean_rx_irq - Send received data up the network stack; legacy
753 * @adapter: board private structure
755 * the return value indicates whether actual cleaning was done, there
756 * is no guarantee that everything was cleaned
758 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
759 int *work_done, int work_to_do)
761 struct net_device *netdev = adapter->netdev;
762 struct pci_dev *pdev = adapter->pdev;
763 struct e1000_hw *hw = &adapter->hw;
764 struct e1000_ring *rx_ring = adapter->rx_ring;
765 struct e1000_rx_desc *rx_desc, *next_rxd;
766 struct e1000_buffer *buffer_info, *next_buffer;
769 int cleaned_count = 0;
771 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
773 i = rx_ring->next_to_clean;
774 rx_desc = E1000_RX_DESC(*rx_ring, i);
775 buffer_info = &rx_ring->buffer_info[i];
777 while (rx_desc->status & E1000_RXD_STAT_DD) {
781 if (*work_done >= work_to_do)
785 status = rx_desc->status;
786 skb = buffer_info->skb;
787 buffer_info->skb = NULL;
789 prefetch(skb->data - NET_IP_ALIGN);
792 if (i == rx_ring->count)
794 next_rxd = E1000_RX_DESC(*rx_ring, i);
797 next_buffer = &rx_ring->buffer_info[i];
801 dma_unmap_single(&pdev->dev,
803 adapter->rx_buffer_len,
805 buffer_info->dma = 0;
807 length = le16_to_cpu(rx_desc->length);
810 * !EOP means multiple descriptors were used to store a single
811 * packet, if that's the case we need to toss it. In fact, we
812 * need to toss every packet with the EOP bit clear and the
813 * next frame that _does_ have the EOP bit set, as it is by
814 * definition only a frame fragment
816 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
817 adapter->flags2 |= FLAG2_IS_DISCARDING;
819 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
820 /* All receives must fit into a single buffer */
821 e_dbg("Receive packet consumed multiple buffers\n");
823 buffer_info->skb = skb;
824 if (status & E1000_RXD_STAT_EOP)
825 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
829 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
831 buffer_info->skb = skb;
835 /* adjust length to remove Ethernet CRC */
836 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
839 total_rx_bytes += length;
843 * code added for copybreak, this should improve
844 * performance for small packets with large amounts
845 * of reassembly being done in the stack
847 if (length < copybreak) {
848 struct sk_buff *new_skb =
849 netdev_alloc_skb_ip_align(netdev, length);
851 skb_copy_to_linear_data_offset(new_skb,
857 /* save the skb in buffer_info as good */
858 buffer_info->skb = skb;
861 /* else just continue with the old one */
863 /* end copybreak code */
864 skb_put(skb, length);
866 /* Receive Checksum Offload */
867 e1000_rx_checksum(adapter,
869 ((u32)(rx_desc->errors) << 24),
870 le16_to_cpu(rx_desc->csum), skb);
872 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
877 /* return some buffers to hardware, one at a time is too slow */
878 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
879 adapter->alloc_rx_buf(adapter, cleaned_count);
883 /* use prefetched values */
885 buffer_info = next_buffer;
887 rx_ring->next_to_clean = i;
889 cleaned_count = e1000_desc_unused(rx_ring);
891 adapter->alloc_rx_buf(adapter, cleaned_count);
893 adapter->total_rx_bytes += total_rx_bytes;
894 adapter->total_rx_packets += total_rx_packets;
895 netdev->stats.rx_bytes += total_rx_bytes;
896 netdev->stats.rx_packets += total_rx_packets;
900 static void e1000_put_txbuf(struct e1000_adapter *adapter,
901 struct e1000_buffer *buffer_info)
903 if (buffer_info->dma) {
904 if (buffer_info->mapped_as_page)
905 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
906 buffer_info->length, DMA_TO_DEVICE);
908 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
909 buffer_info->length, DMA_TO_DEVICE);
910 buffer_info->dma = 0;
912 if (buffer_info->skb) {
913 dev_kfree_skb_any(buffer_info->skb);
914 buffer_info->skb = NULL;
916 buffer_info->time_stamp = 0;
919 static void e1000_print_hw_hang(struct work_struct *work)
921 struct e1000_adapter *adapter = container_of(work,
922 struct e1000_adapter,
924 struct e1000_ring *tx_ring = adapter->tx_ring;
925 unsigned int i = tx_ring->next_to_clean;
926 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
927 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
928 struct e1000_hw *hw = &adapter->hw;
929 u16 phy_status, phy_1000t_status, phy_ext_status;
932 e1e_rphy(hw, PHY_STATUS, &phy_status);
933 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
934 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
936 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
938 /* detected Hardware unit hang */
939 e_err("Detected Hardware Unit Hang:\n"
942 " next_to_use <%x>\n"
943 " next_to_clean <%x>\n"
944 "buffer_info[next_to_clean]:\n"
945 " time_stamp <%lx>\n"
946 " next_to_watch <%x>\n"
948 " next_to_watch.status <%x>\n"
951 "PHY 1000BASE-T Status <%x>\n"
952 "PHY Extended Status <%x>\n"
954 readl(adapter->hw.hw_addr + tx_ring->head),
955 readl(adapter->hw.hw_addr + tx_ring->tail),
956 tx_ring->next_to_use,
957 tx_ring->next_to_clean,
958 tx_ring->buffer_info[eop].time_stamp,
961 eop_desc->upper.fields.status,
970 * e1000_clean_tx_irq - Reclaim resources after transmit completes
971 * @adapter: board private structure
973 * the return value indicates whether actual cleaning was done, there
974 * is no guarantee that everything was cleaned
976 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
978 struct net_device *netdev = adapter->netdev;
979 struct e1000_hw *hw = &adapter->hw;
980 struct e1000_ring *tx_ring = adapter->tx_ring;
981 struct e1000_tx_desc *tx_desc, *eop_desc;
982 struct e1000_buffer *buffer_info;
984 unsigned int count = 0;
985 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
987 i = tx_ring->next_to_clean;
988 eop = tx_ring->buffer_info[i].next_to_watch;
989 eop_desc = E1000_TX_DESC(*tx_ring, eop);
991 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
992 (count < tx_ring->count)) {
993 bool cleaned = false;
994 for (; !cleaned; count++) {
995 tx_desc = E1000_TX_DESC(*tx_ring, i);
996 buffer_info = &tx_ring->buffer_info[i];
997 cleaned = (i == eop);
1000 total_tx_packets += buffer_info->segs;
1001 total_tx_bytes += buffer_info->bytecount;
1004 e1000_put_txbuf(adapter, buffer_info);
1005 tx_desc->upper.data = 0;
1008 if (i == tx_ring->count)
1012 if (i == tx_ring->next_to_use)
1014 eop = tx_ring->buffer_info[i].next_to_watch;
1015 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1018 tx_ring->next_to_clean = i;
1020 #define TX_WAKE_THRESHOLD 32
1021 if (count && netif_carrier_ok(netdev) &&
1022 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1023 /* Make sure that anybody stopping the queue after this
1024 * sees the new next_to_clean.
1028 if (netif_queue_stopped(netdev) &&
1029 !(test_bit(__E1000_DOWN, &adapter->state))) {
1030 netif_wake_queue(netdev);
1031 ++adapter->restart_queue;
1035 if (adapter->detect_tx_hung) {
1037 * Detect a transmit hang in hardware, this serializes the
1038 * check with the clearing of time_stamp and movement of i
1040 adapter->detect_tx_hung = 0;
1041 if (tx_ring->buffer_info[i].time_stamp &&
1042 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1043 + (adapter->tx_timeout_factor * HZ)) &&
1044 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
1045 schedule_work(&adapter->print_hang_task);
1046 netif_stop_queue(netdev);
1049 adapter->total_tx_bytes += total_tx_bytes;
1050 adapter->total_tx_packets += total_tx_packets;
1051 netdev->stats.tx_bytes += total_tx_bytes;
1052 netdev->stats.tx_packets += total_tx_packets;
1053 return (count < tx_ring->count);
1057 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1058 * @adapter: board private structure
1060 * the return value indicates whether actual cleaning was done, there
1061 * is no guarantee that everything was cleaned
1063 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1064 int *work_done, int work_to_do)
1066 struct e1000_hw *hw = &adapter->hw;
1067 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1068 struct net_device *netdev = adapter->netdev;
1069 struct pci_dev *pdev = adapter->pdev;
1070 struct e1000_ring *rx_ring = adapter->rx_ring;
1071 struct e1000_buffer *buffer_info, *next_buffer;
1072 struct e1000_ps_page *ps_page;
1073 struct sk_buff *skb;
1075 u32 length, staterr;
1076 int cleaned_count = 0;
1078 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1080 i = rx_ring->next_to_clean;
1081 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1082 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1083 buffer_info = &rx_ring->buffer_info[i];
1085 while (staterr & E1000_RXD_STAT_DD) {
1086 if (*work_done >= work_to_do)
1089 skb = buffer_info->skb;
1091 /* in the packet split case this is header only */
1092 prefetch(skb->data - NET_IP_ALIGN);
1095 if (i == rx_ring->count)
1097 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1100 next_buffer = &rx_ring->buffer_info[i];
1104 dma_unmap_single(&pdev->dev, buffer_info->dma,
1105 adapter->rx_ps_bsize0,
1107 buffer_info->dma = 0;
1109 /* see !EOP comment in other rx routine */
1110 if (!(staterr & E1000_RXD_STAT_EOP))
1111 adapter->flags2 |= FLAG2_IS_DISCARDING;
1113 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1114 e_dbg("Packet Split buffers didn't pick up the full "
1116 dev_kfree_skb_irq(skb);
1117 if (staterr & E1000_RXD_STAT_EOP)
1118 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1122 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1123 dev_kfree_skb_irq(skb);
1127 length = le16_to_cpu(rx_desc->wb.middle.length0);
1130 e_dbg("Last part of the packet spanning multiple "
1132 dev_kfree_skb_irq(skb);
1137 skb_put(skb, length);
1141 * this looks ugly, but it seems compiler issues make it
1142 * more efficient than reusing j
1144 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1147 * page alloc/put takes too long and effects small packet
1148 * throughput, so unsplit small packets and save the alloc/put
1149 * only valid in softirq (napi) context to call kmap_*
1151 if (l1 && (l1 <= copybreak) &&
1152 ((length + l1) <= adapter->rx_ps_bsize0)) {
1155 ps_page = &buffer_info->ps_pages[0];
1158 * there is no documentation about how to call
1159 * kmap_atomic, so we can't hold the mapping
1162 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1163 PAGE_SIZE, DMA_FROM_DEVICE);
1164 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1165 memcpy(skb_tail_pointer(skb), vaddr, l1);
1166 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1167 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1168 PAGE_SIZE, DMA_FROM_DEVICE);
1170 /* remove the CRC */
1171 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1179 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1180 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1184 ps_page = &buffer_info->ps_pages[j];
1185 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1188 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1189 ps_page->page = NULL;
1191 skb->data_len += length;
1192 skb->truesize += length;
1195 /* strip the ethernet crc, problem is we're using pages now so
1196 * this whole operation can get a little cpu intensive
1198 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1199 pskb_trim(skb, skb->len - 4);
1202 total_rx_bytes += skb->len;
1205 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1206 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1208 if (rx_desc->wb.upper.header_status &
1209 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1210 adapter->rx_hdr_split++;
1212 e1000_receive_skb(adapter, netdev, skb,
1213 staterr, rx_desc->wb.middle.vlan);
1216 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1217 buffer_info->skb = NULL;
1219 /* return some buffers to hardware, one at a time is too slow */
1220 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1221 adapter->alloc_rx_buf(adapter, cleaned_count);
1225 /* use prefetched values */
1227 buffer_info = next_buffer;
1229 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1231 rx_ring->next_to_clean = i;
1233 cleaned_count = e1000_desc_unused(rx_ring);
1235 adapter->alloc_rx_buf(adapter, cleaned_count);
1237 adapter->total_rx_bytes += total_rx_bytes;
1238 adapter->total_rx_packets += total_rx_packets;
1239 netdev->stats.rx_bytes += total_rx_bytes;
1240 netdev->stats.rx_packets += total_rx_packets;
1245 * e1000_consume_page - helper function
1247 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1252 skb->data_len += length;
1253 skb->truesize += length;
1257 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1258 * @adapter: board private structure
1260 * the return value indicates whether actual cleaning was done, there
1261 * is no guarantee that everything was cleaned
1264 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1265 int *work_done, int work_to_do)
1267 struct net_device *netdev = adapter->netdev;
1268 struct pci_dev *pdev = adapter->pdev;
1269 struct e1000_ring *rx_ring = adapter->rx_ring;
1270 struct e1000_rx_desc *rx_desc, *next_rxd;
1271 struct e1000_buffer *buffer_info, *next_buffer;
1274 int cleaned_count = 0;
1275 bool cleaned = false;
1276 unsigned int total_rx_bytes=0, total_rx_packets=0;
1278 i = rx_ring->next_to_clean;
1279 rx_desc = E1000_RX_DESC(*rx_ring, i);
1280 buffer_info = &rx_ring->buffer_info[i];
1282 while (rx_desc->status & E1000_RXD_STAT_DD) {
1283 struct sk_buff *skb;
1286 if (*work_done >= work_to_do)
1290 status = rx_desc->status;
1291 skb = buffer_info->skb;
1292 buffer_info->skb = NULL;
1295 if (i == rx_ring->count)
1297 next_rxd = E1000_RX_DESC(*rx_ring, i);
1300 next_buffer = &rx_ring->buffer_info[i];
1304 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1306 buffer_info->dma = 0;
1308 length = le16_to_cpu(rx_desc->length);
1310 /* errors is only valid for DD + EOP descriptors */
1311 if (unlikely((status & E1000_RXD_STAT_EOP) &&
1312 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1313 /* recycle both page and skb */
1314 buffer_info->skb = skb;
1315 /* an error means any chain goes out the window
1317 if (rx_ring->rx_skb_top)
1318 dev_kfree_skb(rx_ring->rx_skb_top);
1319 rx_ring->rx_skb_top = NULL;
1323 #define rxtop rx_ring->rx_skb_top
1324 if (!(status & E1000_RXD_STAT_EOP)) {
1325 /* this descriptor is only the beginning (or middle) */
1327 /* this is the beginning of a chain */
1329 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1332 /* this is the middle of a chain */
1333 skb_fill_page_desc(rxtop,
1334 skb_shinfo(rxtop)->nr_frags,
1335 buffer_info->page, 0, length);
1336 /* re-use the skb, only consumed the page */
1337 buffer_info->skb = skb;
1339 e1000_consume_page(buffer_info, rxtop, length);
1343 /* end of the chain */
1344 skb_fill_page_desc(rxtop,
1345 skb_shinfo(rxtop)->nr_frags,
1346 buffer_info->page, 0, length);
1347 /* re-use the current skb, we only consumed the
1349 buffer_info->skb = skb;
1352 e1000_consume_page(buffer_info, skb, length);
1354 /* no chain, got EOP, this buf is the packet
1355 * copybreak to save the put_page/alloc_page */
1356 if (length <= copybreak &&
1357 skb_tailroom(skb) >= length) {
1359 vaddr = kmap_atomic(buffer_info->page,
1360 KM_SKB_DATA_SOFTIRQ);
1361 memcpy(skb_tail_pointer(skb), vaddr,
1363 kunmap_atomic(vaddr,
1364 KM_SKB_DATA_SOFTIRQ);
1365 /* re-use the page, so don't erase
1366 * buffer_info->page */
1367 skb_put(skb, length);
1369 skb_fill_page_desc(skb, 0,
1370 buffer_info->page, 0,
1372 e1000_consume_page(buffer_info, skb,
1378 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1379 e1000_rx_checksum(adapter,
1381 ((u32)(rx_desc->errors) << 24),
1382 le16_to_cpu(rx_desc->csum), skb);
1384 /* probably a little skewed due to removing CRC */
1385 total_rx_bytes += skb->len;
1388 /* eth type trans needs skb->data to point to something */
1389 if (!pskb_may_pull(skb, ETH_HLEN)) {
1390 e_err("pskb_may_pull failed.\n");
1395 e1000_receive_skb(adapter, netdev, skb, status,
1399 rx_desc->status = 0;
1401 /* return some buffers to hardware, one at a time is too slow */
1402 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1403 adapter->alloc_rx_buf(adapter, cleaned_count);
1407 /* use prefetched values */
1409 buffer_info = next_buffer;
1411 rx_ring->next_to_clean = i;
1413 cleaned_count = e1000_desc_unused(rx_ring);
1415 adapter->alloc_rx_buf(adapter, cleaned_count);
1417 adapter->total_rx_bytes += total_rx_bytes;
1418 adapter->total_rx_packets += total_rx_packets;
1419 netdev->stats.rx_bytes += total_rx_bytes;
1420 netdev->stats.rx_packets += total_rx_packets;
1425 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1426 * @adapter: board private structure
1428 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1430 struct e1000_ring *rx_ring = adapter->rx_ring;
1431 struct e1000_buffer *buffer_info;
1432 struct e1000_ps_page *ps_page;
1433 struct pci_dev *pdev = adapter->pdev;
1436 /* Free all the Rx ring sk_buffs */
1437 for (i = 0; i < rx_ring->count; i++) {
1438 buffer_info = &rx_ring->buffer_info[i];
1439 if (buffer_info->dma) {
1440 if (adapter->clean_rx == e1000_clean_rx_irq)
1441 dma_unmap_single(&pdev->dev, buffer_info->dma,
1442 adapter->rx_buffer_len,
1444 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1445 dma_unmap_page(&pdev->dev, buffer_info->dma,
1448 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1449 dma_unmap_single(&pdev->dev, buffer_info->dma,
1450 adapter->rx_ps_bsize0,
1452 buffer_info->dma = 0;
1455 if (buffer_info->page) {
1456 put_page(buffer_info->page);
1457 buffer_info->page = NULL;
1460 if (buffer_info->skb) {
1461 dev_kfree_skb(buffer_info->skb);
1462 buffer_info->skb = NULL;
1465 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1466 ps_page = &buffer_info->ps_pages[j];
1469 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1472 put_page(ps_page->page);
1473 ps_page->page = NULL;
1477 /* there also may be some cached data from a chained receive */
1478 if (rx_ring->rx_skb_top) {
1479 dev_kfree_skb(rx_ring->rx_skb_top);
1480 rx_ring->rx_skb_top = NULL;
1483 /* Zero out the descriptor ring */
1484 memset(rx_ring->desc, 0, rx_ring->size);
1486 rx_ring->next_to_clean = 0;
1487 rx_ring->next_to_use = 0;
1488 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1490 writel(0, adapter->hw.hw_addr + rx_ring->head);
1491 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1494 static void e1000e_downshift_workaround(struct work_struct *work)
1496 struct e1000_adapter *adapter = container_of(work,
1497 struct e1000_adapter, downshift_task);
1499 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1503 * e1000_intr_msi - Interrupt Handler
1504 * @irq: interrupt number
1505 * @data: pointer to a network interface device structure
1507 static irqreturn_t e1000_intr_msi(int irq, void *data)
1509 struct net_device *netdev = data;
1510 struct e1000_adapter *adapter = netdev_priv(netdev);
1511 struct e1000_hw *hw = &adapter->hw;
1512 u32 icr = er32(ICR);
1515 * read ICR disables interrupts using IAM
1518 if (icr & E1000_ICR_LSC) {
1519 hw->mac.get_link_status = 1;
1521 * ICH8 workaround-- Call gig speed drop workaround on cable
1522 * disconnect (LSC) before accessing any PHY registers
1524 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1525 (!(er32(STATUS) & E1000_STATUS_LU)))
1526 schedule_work(&adapter->downshift_task);
1529 * 80003ES2LAN workaround-- For packet buffer work-around on
1530 * link down event; disable receives here in the ISR and reset
1531 * adapter in watchdog
1533 if (netif_carrier_ok(netdev) &&
1534 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1535 /* disable receives */
1536 u32 rctl = er32(RCTL);
1537 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1538 adapter->flags |= FLAG_RX_RESTART_NOW;
1540 /* guard against interrupt when we're going down */
1541 if (!test_bit(__E1000_DOWN, &adapter->state))
1542 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1545 if (napi_schedule_prep(&adapter->napi)) {
1546 adapter->total_tx_bytes = 0;
1547 adapter->total_tx_packets = 0;
1548 adapter->total_rx_bytes = 0;
1549 adapter->total_rx_packets = 0;
1550 __napi_schedule(&adapter->napi);
1557 * e1000_intr - Interrupt Handler
1558 * @irq: interrupt number
1559 * @data: pointer to a network interface device structure
1561 static irqreturn_t e1000_intr(int irq, void *data)
1563 struct net_device *netdev = data;
1564 struct e1000_adapter *adapter = netdev_priv(netdev);
1565 struct e1000_hw *hw = &adapter->hw;
1566 u32 rctl, icr = er32(ICR);
1568 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1569 return IRQ_NONE; /* Not our interrupt */
1572 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1573 * not set, then the adapter didn't send an interrupt
1575 if (!(icr & E1000_ICR_INT_ASSERTED))
1579 * Interrupt Auto-Mask...upon reading ICR,
1580 * interrupts are masked. No need for the
1584 if (icr & E1000_ICR_LSC) {
1585 hw->mac.get_link_status = 1;
1587 * ICH8 workaround-- Call gig speed drop workaround on cable
1588 * disconnect (LSC) before accessing any PHY registers
1590 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1591 (!(er32(STATUS) & E1000_STATUS_LU)))
1592 schedule_work(&adapter->downshift_task);
1595 * 80003ES2LAN workaround--
1596 * For packet buffer work-around on link down event;
1597 * disable receives here in the ISR and
1598 * reset adapter in watchdog
1600 if (netif_carrier_ok(netdev) &&
1601 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1602 /* disable receives */
1604 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1605 adapter->flags |= FLAG_RX_RESTART_NOW;
1607 /* guard against interrupt when we're going down */
1608 if (!test_bit(__E1000_DOWN, &adapter->state))
1609 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1612 if (napi_schedule_prep(&adapter->napi)) {
1613 adapter->total_tx_bytes = 0;
1614 adapter->total_tx_packets = 0;
1615 adapter->total_rx_bytes = 0;
1616 adapter->total_rx_packets = 0;
1617 __napi_schedule(&adapter->napi);
1623 static irqreturn_t e1000_msix_other(int irq, void *data)
1625 struct net_device *netdev = data;
1626 struct e1000_adapter *adapter = netdev_priv(netdev);
1627 struct e1000_hw *hw = &adapter->hw;
1628 u32 icr = er32(ICR);
1630 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1631 if (!test_bit(__E1000_DOWN, &adapter->state))
1632 ew32(IMS, E1000_IMS_OTHER);
1636 if (icr & adapter->eiac_mask)
1637 ew32(ICS, (icr & adapter->eiac_mask));
1639 if (icr & E1000_ICR_OTHER) {
1640 if (!(icr & E1000_ICR_LSC))
1641 goto no_link_interrupt;
1642 hw->mac.get_link_status = 1;
1643 /* guard against interrupt when we're going down */
1644 if (!test_bit(__E1000_DOWN, &adapter->state))
1645 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1649 if (!test_bit(__E1000_DOWN, &adapter->state))
1650 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1656 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1658 struct net_device *netdev = data;
1659 struct e1000_adapter *adapter = netdev_priv(netdev);
1660 struct e1000_hw *hw = &adapter->hw;
1661 struct e1000_ring *tx_ring = adapter->tx_ring;
1664 adapter->total_tx_bytes = 0;
1665 adapter->total_tx_packets = 0;
1667 if (!e1000_clean_tx_irq(adapter))
1668 /* Ring was not completely cleaned, so fire another interrupt */
1669 ew32(ICS, tx_ring->ims_val);
1674 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1676 struct net_device *netdev = data;
1677 struct e1000_adapter *adapter = netdev_priv(netdev);
1679 /* Write the ITR value calculated at the end of the
1680 * previous interrupt.
1682 if (adapter->rx_ring->set_itr) {
1683 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1684 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1685 adapter->rx_ring->set_itr = 0;
1688 if (napi_schedule_prep(&adapter->napi)) {
1689 adapter->total_rx_bytes = 0;
1690 adapter->total_rx_packets = 0;
1691 __napi_schedule(&adapter->napi);
1697 * e1000_configure_msix - Configure MSI-X hardware
1699 * e1000_configure_msix sets up the hardware to properly
1700 * generate MSI-X interrupts.
1702 static void e1000_configure_msix(struct e1000_adapter *adapter)
1704 struct e1000_hw *hw = &adapter->hw;
1705 struct e1000_ring *rx_ring = adapter->rx_ring;
1706 struct e1000_ring *tx_ring = adapter->tx_ring;
1708 u32 ctrl_ext, ivar = 0;
1710 adapter->eiac_mask = 0;
1712 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1713 if (hw->mac.type == e1000_82574) {
1714 u32 rfctl = er32(RFCTL);
1715 rfctl |= E1000_RFCTL_ACK_DIS;
1719 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1720 /* Configure Rx vector */
1721 rx_ring->ims_val = E1000_IMS_RXQ0;
1722 adapter->eiac_mask |= rx_ring->ims_val;
1723 if (rx_ring->itr_val)
1724 writel(1000000000 / (rx_ring->itr_val * 256),
1725 hw->hw_addr + rx_ring->itr_register);
1727 writel(1, hw->hw_addr + rx_ring->itr_register);
1728 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1730 /* Configure Tx vector */
1731 tx_ring->ims_val = E1000_IMS_TXQ0;
1733 if (tx_ring->itr_val)
1734 writel(1000000000 / (tx_ring->itr_val * 256),
1735 hw->hw_addr + tx_ring->itr_register);
1737 writel(1, hw->hw_addr + tx_ring->itr_register);
1738 adapter->eiac_mask |= tx_ring->ims_val;
1739 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1741 /* set vector for Other Causes, e.g. link changes */
1743 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1744 if (rx_ring->itr_val)
1745 writel(1000000000 / (rx_ring->itr_val * 256),
1746 hw->hw_addr + E1000_EITR_82574(vector));
1748 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1750 /* Cause Tx interrupts on every write back */
1755 /* enable MSI-X PBA support */
1756 ctrl_ext = er32(CTRL_EXT);
1757 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1759 /* Auto-Mask Other interrupts upon ICR read */
1760 #define E1000_EIAC_MASK_82574 0x01F00000
1761 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1762 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1763 ew32(CTRL_EXT, ctrl_ext);
1767 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1769 if (adapter->msix_entries) {
1770 pci_disable_msix(adapter->pdev);
1771 kfree(adapter->msix_entries);
1772 adapter->msix_entries = NULL;
1773 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1774 pci_disable_msi(adapter->pdev);
1775 adapter->flags &= ~FLAG_MSI_ENABLED;
1780 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1782 * Attempt to configure interrupts using the best available
1783 * capabilities of the hardware and kernel.
1785 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1791 switch (adapter->int_mode) {
1792 case E1000E_INT_MODE_MSIX:
1793 if (adapter->flags & FLAG_HAS_MSIX) {
1794 numvecs = 3; /* RxQ0, TxQ0 and other */
1795 adapter->msix_entries = kcalloc(numvecs,
1796 sizeof(struct msix_entry),
1798 if (adapter->msix_entries) {
1799 for (i = 0; i < numvecs; i++)
1800 adapter->msix_entries[i].entry = i;
1802 err = pci_enable_msix(adapter->pdev,
1803 adapter->msix_entries,
1808 /* MSI-X failed, so fall through and try MSI */
1809 e_err("Failed to initialize MSI-X interrupts. "
1810 "Falling back to MSI interrupts.\n");
1811 e1000e_reset_interrupt_capability(adapter);
1813 adapter->int_mode = E1000E_INT_MODE_MSI;
1815 case E1000E_INT_MODE_MSI:
1816 if (!pci_enable_msi(adapter->pdev)) {
1817 adapter->flags |= FLAG_MSI_ENABLED;
1819 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1820 e_err("Failed to initialize MSI interrupts. Falling "
1821 "back to legacy interrupts.\n");
1824 case E1000E_INT_MODE_LEGACY:
1825 /* Don't do anything; this is the system default */
1831 * e1000_request_msix - Initialize MSI-X interrupts
1833 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1836 static int e1000_request_msix(struct e1000_adapter *adapter)
1838 struct net_device *netdev = adapter->netdev;
1839 int err = 0, vector = 0;
1841 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1842 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1844 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1845 err = request_irq(adapter->msix_entries[vector].vector,
1846 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1850 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1851 adapter->rx_ring->itr_val = adapter->itr;
1854 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1855 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1857 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1858 err = request_irq(adapter->msix_entries[vector].vector,
1859 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1863 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1864 adapter->tx_ring->itr_val = adapter->itr;
1867 err = request_irq(adapter->msix_entries[vector].vector,
1868 e1000_msix_other, 0, netdev->name, netdev);
1872 e1000_configure_msix(adapter);
1879 * e1000_request_irq - initialize interrupts
1881 * Attempts to configure interrupts using the best available
1882 * capabilities of the hardware and kernel.
1884 static int e1000_request_irq(struct e1000_adapter *adapter)
1886 struct net_device *netdev = adapter->netdev;
1889 if (adapter->msix_entries) {
1890 err = e1000_request_msix(adapter);
1893 /* fall back to MSI */
1894 e1000e_reset_interrupt_capability(adapter);
1895 adapter->int_mode = E1000E_INT_MODE_MSI;
1896 e1000e_set_interrupt_capability(adapter);
1898 if (adapter->flags & FLAG_MSI_ENABLED) {
1899 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1900 netdev->name, netdev);
1904 /* fall back to legacy interrupt */
1905 e1000e_reset_interrupt_capability(adapter);
1906 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1909 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1910 netdev->name, netdev);
1912 e_err("Unable to allocate interrupt, Error: %d\n", err);
1917 static void e1000_free_irq(struct e1000_adapter *adapter)
1919 struct net_device *netdev = adapter->netdev;
1921 if (adapter->msix_entries) {
1924 free_irq(adapter->msix_entries[vector].vector, netdev);
1927 free_irq(adapter->msix_entries[vector].vector, netdev);
1930 /* Other Causes interrupt vector */
1931 free_irq(adapter->msix_entries[vector].vector, netdev);
1935 free_irq(adapter->pdev->irq, netdev);
1939 * e1000_irq_disable - Mask off interrupt generation on the NIC
1941 static void e1000_irq_disable(struct e1000_adapter *adapter)
1943 struct e1000_hw *hw = &adapter->hw;
1946 if (adapter->msix_entries)
1947 ew32(EIAC_82574, 0);
1949 synchronize_irq(adapter->pdev->irq);
1953 * e1000_irq_enable - Enable default interrupt generation settings
1955 static void e1000_irq_enable(struct e1000_adapter *adapter)
1957 struct e1000_hw *hw = &adapter->hw;
1959 if (adapter->msix_entries) {
1960 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1961 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1963 ew32(IMS, IMS_ENABLE_MASK);
1969 * e1000_get_hw_control - get control of the h/w from f/w
1970 * @adapter: address of board private structure
1972 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1973 * For ASF and Pass Through versions of f/w this means that
1974 * the driver is loaded. For AMT version (only with 82573)
1975 * of the f/w this means that the network i/f is open.
1977 static void e1000_get_hw_control(struct e1000_adapter *adapter)
1979 struct e1000_hw *hw = &adapter->hw;
1983 /* Let firmware know the driver has taken over */
1984 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1986 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1987 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1988 ctrl_ext = er32(CTRL_EXT);
1989 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1994 * e1000_release_hw_control - release control of the h/w to f/w
1995 * @adapter: address of board private structure
1997 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1998 * For ASF and Pass Through versions of f/w this means that the
1999 * driver is no longer loaded. For AMT version (only with 82573) i
2000 * of the f/w this means that the network i/f is closed.
2003 static void e1000_release_hw_control(struct e1000_adapter *adapter)
2005 struct e1000_hw *hw = &adapter->hw;
2009 /* Let firmware taken over control of h/w */
2010 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2012 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2013 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2014 ctrl_ext = er32(CTRL_EXT);
2015 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2020 * @e1000_alloc_ring - allocate memory for a ring structure
2022 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2023 struct e1000_ring *ring)
2025 struct pci_dev *pdev = adapter->pdev;
2027 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2036 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2037 * @adapter: board private structure
2039 * Return 0 on success, negative on failure
2041 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2043 struct e1000_ring *tx_ring = adapter->tx_ring;
2044 int err = -ENOMEM, size;
2046 size = sizeof(struct e1000_buffer) * tx_ring->count;
2047 tx_ring->buffer_info = vmalloc(size);
2048 if (!tx_ring->buffer_info)
2050 memset(tx_ring->buffer_info, 0, size);
2052 /* round up to nearest 4K */
2053 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2054 tx_ring->size = ALIGN(tx_ring->size, 4096);
2056 err = e1000_alloc_ring_dma(adapter, tx_ring);
2060 tx_ring->next_to_use = 0;
2061 tx_ring->next_to_clean = 0;
2065 vfree(tx_ring->buffer_info);
2066 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2071 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2072 * @adapter: board private structure
2074 * Returns 0 on success, negative on failure
2076 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2078 struct e1000_ring *rx_ring = adapter->rx_ring;
2079 struct e1000_buffer *buffer_info;
2080 int i, size, desc_len, err = -ENOMEM;
2082 size = sizeof(struct e1000_buffer) * rx_ring->count;
2083 rx_ring->buffer_info = vmalloc(size);
2084 if (!rx_ring->buffer_info)
2086 memset(rx_ring->buffer_info, 0, size);
2088 for (i = 0; i < rx_ring->count; i++) {
2089 buffer_info = &rx_ring->buffer_info[i];
2090 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2091 sizeof(struct e1000_ps_page),
2093 if (!buffer_info->ps_pages)
2097 desc_len = sizeof(union e1000_rx_desc_packet_split);
2099 /* Round up to nearest 4K */
2100 rx_ring->size = rx_ring->count * desc_len;
2101 rx_ring->size = ALIGN(rx_ring->size, 4096);
2103 err = e1000_alloc_ring_dma(adapter, rx_ring);
2107 rx_ring->next_to_clean = 0;
2108 rx_ring->next_to_use = 0;
2109 rx_ring->rx_skb_top = NULL;
2114 for (i = 0; i < rx_ring->count; i++) {
2115 buffer_info = &rx_ring->buffer_info[i];
2116 kfree(buffer_info->ps_pages);
2119 vfree(rx_ring->buffer_info);
2120 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2125 * e1000_clean_tx_ring - Free Tx Buffers
2126 * @adapter: board private structure
2128 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2130 struct e1000_ring *tx_ring = adapter->tx_ring;
2131 struct e1000_buffer *buffer_info;
2135 for (i = 0; i < tx_ring->count; i++) {
2136 buffer_info = &tx_ring->buffer_info[i];
2137 e1000_put_txbuf(adapter, buffer_info);
2140 size = sizeof(struct e1000_buffer) * tx_ring->count;
2141 memset(tx_ring->buffer_info, 0, size);
2143 memset(tx_ring->desc, 0, tx_ring->size);
2145 tx_ring->next_to_use = 0;
2146 tx_ring->next_to_clean = 0;
2148 writel(0, adapter->hw.hw_addr + tx_ring->head);
2149 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2153 * e1000e_free_tx_resources - Free Tx Resources per Queue
2154 * @adapter: board private structure
2156 * Free all transmit software resources
2158 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2160 struct pci_dev *pdev = adapter->pdev;
2161 struct e1000_ring *tx_ring = adapter->tx_ring;
2163 e1000_clean_tx_ring(adapter);
2165 vfree(tx_ring->buffer_info);
2166 tx_ring->buffer_info = NULL;
2168 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2170 tx_ring->desc = NULL;
2174 * e1000e_free_rx_resources - Free Rx Resources
2175 * @adapter: board private structure
2177 * Free all receive software resources
2180 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2182 struct pci_dev *pdev = adapter->pdev;
2183 struct e1000_ring *rx_ring = adapter->rx_ring;
2186 e1000_clean_rx_ring(adapter);
2188 for (i = 0; i < rx_ring->count; i++) {
2189 kfree(rx_ring->buffer_info[i].ps_pages);
2192 vfree(rx_ring->buffer_info);
2193 rx_ring->buffer_info = NULL;
2195 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2197 rx_ring->desc = NULL;
2201 * e1000_update_itr - update the dynamic ITR value based on statistics
2202 * @adapter: pointer to adapter
2203 * @itr_setting: current adapter->itr
2204 * @packets: the number of packets during this measurement interval
2205 * @bytes: the number of bytes during this measurement interval
2207 * Stores a new ITR value based on packets and byte
2208 * counts during the last interrupt. The advantage of per interrupt
2209 * computation is faster updates and more accurate ITR for the current
2210 * traffic pattern. Constants in this function were computed
2211 * based on theoretical maximum wire speed and thresholds were set based
2212 * on testing data as well as attempting to minimize response time
2213 * while increasing bulk throughput. This functionality is controlled
2214 * by the InterruptThrottleRate module parameter.
2216 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2217 u16 itr_setting, int packets,
2220 unsigned int retval = itr_setting;
2223 goto update_itr_done;
2225 switch (itr_setting) {
2226 case lowest_latency:
2227 /* handle TSO and jumbo frames */
2228 if (bytes/packets > 8000)
2229 retval = bulk_latency;
2230 else if ((packets < 5) && (bytes > 512)) {
2231 retval = low_latency;
2234 case low_latency: /* 50 usec aka 20000 ints/s */
2235 if (bytes > 10000) {
2236 /* this if handles the TSO accounting */
2237 if (bytes/packets > 8000) {
2238 retval = bulk_latency;
2239 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2240 retval = bulk_latency;
2241 } else if ((packets > 35)) {
2242 retval = lowest_latency;
2244 } else if (bytes/packets > 2000) {
2245 retval = bulk_latency;
2246 } else if (packets <= 2 && bytes < 512) {
2247 retval = lowest_latency;
2250 case bulk_latency: /* 250 usec aka 4000 ints/s */
2251 if (bytes > 25000) {
2253 retval = low_latency;
2255 } else if (bytes < 6000) {
2256 retval = low_latency;
2265 static void e1000_set_itr(struct e1000_adapter *adapter)
2267 struct e1000_hw *hw = &adapter->hw;
2269 u32 new_itr = adapter->itr;
2271 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2272 if (adapter->link_speed != SPEED_1000) {
2278 adapter->tx_itr = e1000_update_itr(adapter,
2280 adapter->total_tx_packets,
2281 adapter->total_tx_bytes);
2282 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2283 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2284 adapter->tx_itr = low_latency;
2286 adapter->rx_itr = e1000_update_itr(adapter,
2288 adapter->total_rx_packets,
2289 adapter->total_rx_bytes);
2290 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2291 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2292 adapter->rx_itr = low_latency;
2294 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2296 switch (current_itr) {
2297 /* counts and packets in update_itr are dependent on these numbers */
2298 case lowest_latency:
2302 new_itr = 20000; /* aka hwitr = ~200 */
2312 if (new_itr != adapter->itr) {
2314 * this attempts to bias the interrupt rate towards Bulk
2315 * by adding intermediate steps when interrupt rate is
2318 new_itr = new_itr > adapter->itr ?
2319 min(adapter->itr + (new_itr >> 2), new_itr) :
2321 adapter->itr = new_itr;
2322 adapter->rx_ring->itr_val = new_itr;
2323 if (adapter->msix_entries)
2324 adapter->rx_ring->set_itr = 1;
2326 ew32(ITR, 1000000000 / (new_itr * 256));
2331 * e1000_alloc_queues - Allocate memory for all rings
2332 * @adapter: board private structure to initialize
2334 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2336 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2337 if (!adapter->tx_ring)
2340 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2341 if (!adapter->rx_ring)
2346 e_err("Unable to allocate memory for queues\n");
2347 kfree(adapter->rx_ring);
2348 kfree(adapter->tx_ring);
2353 * e1000_clean - NAPI Rx polling callback
2354 * @napi: struct associated with this polling callback
2355 * @budget: amount of packets driver is allowed to process this poll
2357 static int e1000_clean(struct napi_struct *napi, int budget)
2359 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2360 struct e1000_hw *hw = &adapter->hw;
2361 struct net_device *poll_dev = adapter->netdev;
2362 int tx_cleaned = 1, work_done = 0;
2364 adapter = netdev_priv(poll_dev);
2366 if (adapter->msix_entries &&
2367 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2370 tx_cleaned = e1000_clean_tx_irq(adapter);
2373 adapter->clean_rx(adapter, &work_done, budget);
2378 /* If budget not fully consumed, exit the polling mode */
2379 if (work_done < budget) {
2380 if (adapter->itr_setting & 3)
2381 e1000_set_itr(adapter);
2382 napi_complete(napi);
2383 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2384 if (adapter->msix_entries)
2385 ew32(IMS, adapter->rx_ring->ims_val);
2387 e1000_irq_enable(adapter);
2394 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2396 struct e1000_adapter *adapter = netdev_priv(netdev);
2397 struct e1000_hw *hw = &adapter->hw;
2400 /* don't update vlan cookie if already programmed */
2401 if ((adapter->hw.mng_cookie.status &
2402 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2403 (vid == adapter->mng_vlan_id))
2406 /* add VID to filter table */
2407 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2408 index = (vid >> 5) & 0x7F;
2409 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2410 vfta |= (1 << (vid & 0x1F));
2411 hw->mac.ops.write_vfta(hw, index, vfta);
2415 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2417 struct e1000_adapter *adapter = netdev_priv(netdev);
2418 struct e1000_hw *hw = &adapter->hw;
2421 if (!test_bit(__E1000_DOWN, &adapter->state))
2422 e1000_irq_disable(adapter);
2423 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2425 if (!test_bit(__E1000_DOWN, &adapter->state))
2426 e1000_irq_enable(adapter);
2428 if ((adapter->hw.mng_cookie.status &
2429 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2430 (vid == adapter->mng_vlan_id)) {
2431 /* release control to f/w */
2432 e1000_release_hw_control(adapter);
2436 /* remove VID from filter table */
2437 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2438 index = (vid >> 5) & 0x7F;
2439 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2440 vfta &= ~(1 << (vid & 0x1F));
2441 hw->mac.ops.write_vfta(hw, index, vfta);
2445 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2447 struct net_device *netdev = adapter->netdev;
2448 u16 vid = adapter->hw.mng_cookie.vlan_id;
2449 u16 old_vid = adapter->mng_vlan_id;
2451 if (!adapter->vlgrp)
2454 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2455 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2456 if (adapter->hw.mng_cookie.status &
2457 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2458 e1000_vlan_rx_add_vid(netdev, vid);
2459 adapter->mng_vlan_id = vid;
2462 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2464 !vlan_group_get_device(adapter->vlgrp, old_vid))
2465 e1000_vlan_rx_kill_vid(netdev, old_vid);
2467 adapter->mng_vlan_id = vid;
2472 static void e1000_vlan_rx_register(struct net_device *netdev,
2473 struct vlan_group *grp)
2475 struct e1000_adapter *adapter = netdev_priv(netdev);
2476 struct e1000_hw *hw = &adapter->hw;
2479 if (!test_bit(__E1000_DOWN, &adapter->state))
2480 e1000_irq_disable(adapter);
2481 adapter->vlgrp = grp;
2484 /* enable VLAN tag insert/strip */
2486 ctrl |= E1000_CTRL_VME;
2489 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2490 /* enable VLAN receive filtering */
2492 rctl &= ~E1000_RCTL_CFIEN;
2494 e1000_update_mng_vlan(adapter);
2497 /* disable VLAN tag insert/strip */
2499 ctrl &= ~E1000_CTRL_VME;
2502 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2503 if (adapter->mng_vlan_id !=
2504 (u16)E1000_MNG_VLAN_NONE) {
2505 e1000_vlan_rx_kill_vid(netdev,
2506 adapter->mng_vlan_id);
2507 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2512 if (!test_bit(__E1000_DOWN, &adapter->state))
2513 e1000_irq_enable(adapter);
2516 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2520 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2522 if (!adapter->vlgrp)
2525 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2526 if (!vlan_group_get_device(adapter->vlgrp, vid))
2528 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2532 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2534 struct e1000_hw *hw = &adapter->hw;
2535 u32 manc, manc2h, mdef, i, j;
2537 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2543 * enable receiving management packets to the host. this will probably
2544 * generate destination unreachable messages from the host OS, but
2545 * the packets will be handled on SMBUS
2547 manc |= E1000_MANC_EN_MNG2HOST;
2548 manc2h = er32(MANC2H);
2550 switch (hw->mac.type) {
2552 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2557 * Check if IPMI pass-through decision filter already exists;
2560 for (i = 0, j = 0; i < 8; i++) {
2561 mdef = er32(MDEF(i));
2563 /* Ignore filters with anything other than IPMI ports */
2564 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2567 /* Enable this decision filter in MANC2H */
2574 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2577 /* Create new decision filter in an empty filter */
2578 for (i = 0, j = 0; i < 8; i++)
2579 if (er32(MDEF(i)) == 0) {
2580 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2581 E1000_MDEF_PORT_664));
2588 e_warn("Unable to create IPMI pass-through filter\n");
2592 ew32(MANC2H, manc2h);
2597 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2598 * @adapter: board private structure
2600 * Configure the Tx unit of the MAC after a reset.
2602 static void e1000_configure_tx(struct e1000_adapter *adapter)
2604 struct e1000_hw *hw = &adapter->hw;
2605 struct e1000_ring *tx_ring = adapter->tx_ring;
2607 u32 tdlen, tctl, tipg, tarc;
2610 /* Setup the HW Tx Head and Tail descriptor pointers */
2611 tdba = tx_ring->dma;
2612 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2613 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2614 ew32(TDBAH, (tdba >> 32));
2618 tx_ring->head = E1000_TDH;
2619 tx_ring->tail = E1000_TDT;
2621 /* Set the default values for the Tx Inter Packet Gap timer */
2622 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2623 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2624 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2626 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2627 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2629 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2630 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2633 /* Set the Tx Interrupt Delay register */
2634 ew32(TIDV, adapter->tx_int_delay);
2635 /* Tx irq moderation */
2636 ew32(TADV, adapter->tx_abs_int_delay);
2638 /* Program the Transmit Control Register */
2640 tctl &= ~E1000_TCTL_CT;
2641 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2642 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2644 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2645 tarc = er32(TARC(0));
2647 * set the speed mode bit, we'll clear it if we're not at
2648 * gigabit link later
2650 #define SPEED_MODE_BIT (1 << 21)
2651 tarc |= SPEED_MODE_BIT;
2652 ew32(TARC(0), tarc);
2655 /* errata: program both queues to unweighted RR */
2656 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2657 tarc = er32(TARC(0));
2659 ew32(TARC(0), tarc);
2660 tarc = er32(TARC(1));
2662 ew32(TARC(1), tarc);
2665 /* Setup Transmit Descriptor Settings for eop descriptor */
2666 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2668 /* only set IDE if we are delaying interrupts using the timers */
2669 if (adapter->tx_int_delay)
2670 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2672 /* enable Report Status bit */
2673 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2677 e1000e_config_collision_dist(hw);
2681 * e1000_setup_rctl - configure the receive control registers
2682 * @adapter: Board private structure
2684 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2685 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2686 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2688 struct e1000_hw *hw = &adapter->hw;
2693 /* Program MC offset vector base */
2695 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2696 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2697 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2698 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2700 /* Do not Store bad packets */
2701 rctl &= ~E1000_RCTL_SBP;
2703 /* Enable Long Packet receive */
2704 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2705 rctl &= ~E1000_RCTL_LPE;
2707 rctl |= E1000_RCTL_LPE;
2709 /* Some systems expect that the CRC is included in SMBUS traffic. The
2710 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2711 * host memory when this is enabled
2713 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2714 rctl |= E1000_RCTL_SECRC;
2716 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2717 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2720 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2722 phy_data |= (1 << 2);
2723 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2725 e1e_rphy(hw, 22, &phy_data);
2727 phy_data |= (1 << 14);
2728 e1e_wphy(hw, 0x10, 0x2823);
2729 e1e_wphy(hw, 0x11, 0x0003);
2730 e1e_wphy(hw, 22, phy_data);
2733 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2734 if (hw->mac.type == e1000_pch2lan) {
2737 if (rctl & E1000_RCTL_LPE)
2738 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2740 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2743 /* Setup buffer sizes */
2744 rctl &= ~E1000_RCTL_SZ_4096;
2745 rctl |= E1000_RCTL_BSEX;
2746 switch (adapter->rx_buffer_len) {
2749 rctl |= E1000_RCTL_SZ_2048;
2750 rctl &= ~E1000_RCTL_BSEX;
2753 rctl |= E1000_RCTL_SZ_4096;
2756 rctl |= E1000_RCTL_SZ_8192;
2759 rctl |= E1000_RCTL_SZ_16384;
2764 * 82571 and greater support packet-split where the protocol
2765 * header is placed in skb->data and the packet data is
2766 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2767 * In the case of a non-split, skb->data is linearly filled,
2768 * followed by the page buffers. Therefore, skb->data is
2769 * sized to hold the largest protocol header.
2771 * allocations using alloc_page take too long for regular MTU
2772 * so only enable packet split for jumbo frames
2774 * Using pages when the page size is greater than 16k wastes
2775 * a lot of memory, since we allocate 3 pages at all times
2778 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2779 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2780 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2781 adapter->rx_ps_pages = pages;
2783 adapter->rx_ps_pages = 0;
2785 if (adapter->rx_ps_pages) {
2786 /* Configure extra packet-split registers */
2787 rfctl = er32(RFCTL);
2788 rfctl |= E1000_RFCTL_EXTEN;
2790 * disable packet split support for IPv6 extension headers,
2791 * because some malformed IPv6 headers can hang the Rx
2793 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2794 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2798 /* Enable Packet split descriptors */
2799 rctl |= E1000_RCTL_DTYP_PS;
2801 psrctl |= adapter->rx_ps_bsize0 >>
2802 E1000_PSRCTL_BSIZE0_SHIFT;
2804 switch (adapter->rx_ps_pages) {
2806 psrctl |= PAGE_SIZE <<
2807 E1000_PSRCTL_BSIZE3_SHIFT;
2809 psrctl |= PAGE_SIZE <<
2810 E1000_PSRCTL_BSIZE2_SHIFT;
2812 psrctl |= PAGE_SIZE >>
2813 E1000_PSRCTL_BSIZE1_SHIFT;
2817 ew32(PSRCTL, psrctl);
2821 /* just started the receive unit, no need to restart */
2822 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2826 * e1000_configure_rx - Configure Receive Unit after Reset
2827 * @adapter: board private structure
2829 * Configure the Rx unit of the MAC after a reset.
2831 static void e1000_configure_rx(struct e1000_adapter *adapter)
2833 struct e1000_hw *hw = &adapter->hw;
2834 struct e1000_ring *rx_ring = adapter->rx_ring;
2836 u32 rdlen, rctl, rxcsum, ctrl_ext;
2838 if (adapter->rx_ps_pages) {
2839 /* this is a 32 byte descriptor */
2840 rdlen = rx_ring->count *
2841 sizeof(union e1000_rx_desc_packet_split);
2842 adapter->clean_rx = e1000_clean_rx_irq_ps;
2843 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2844 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2845 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2846 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2847 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2849 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2850 adapter->clean_rx = e1000_clean_rx_irq;
2851 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2854 /* disable receives while setting up the descriptors */
2856 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2860 /* set the Receive Delay Timer Register */
2861 ew32(RDTR, adapter->rx_int_delay);
2863 /* irq moderation */
2864 ew32(RADV, adapter->rx_abs_int_delay);
2865 if (adapter->itr_setting != 0)
2866 ew32(ITR, 1000000000 / (adapter->itr * 256));
2868 ctrl_ext = er32(CTRL_EXT);
2869 /* Auto-Mask interrupts upon ICR access */
2870 ctrl_ext |= E1000_CTRL_EXT_IAME;
2871 ew32(IAM, 0xffffffff);
2872 ew32(CTRL_EXT, ctrl_ext);
2876 * Setup the HW Rx Head and Tail Descriptor Pointers and
2877 * the Base and Length of the Rx Descriptor Ring
2879 rdba = rx_ring->dma;
2880 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
2881 ew32(RDBAH, (rdba >> 32));
2885 rx_ring->head = E1000_RDH;
2886 rx_ring->tail = E1000_RDT;
2888 /* Enable Receive Checksum Offload for TCP and UDP */
2889 rxcsum = er32(RXCSUM);
2890 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2891 rxcsum |= E1000_RXCSUM_TUOFL;
2894 * IPv4 payload checksum for UDP fragments must be
2895 * used in conjunction with packet-split.
2897 if (adapter->rx_ps_pages)
2898 rxcsum |= E1000_RXCSUM_IPPCSE;
2900 rxcsum &= ~E1000_RXCSUM_TUOFL;
2901 /* no need to clear IPPCSE as it defaults to 0 */
2903 ew32(RXCSUM, rxcsum);
2906 * Enable early receives on supported devices, only takes effect when
2907 * packet size is equal or larger than the specified value (in 8 byte
2908 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2910 if (adapter->flags & FLAG_HAS_ERT) {
2911 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2912 u32 rxdctl = er32(RXDCTL(0));
2913 ew32(RXDCTL(0), rxdctl | 0x3);
2914 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2916 * With jumbo frames and early-receive enabled,
2917 * excessive C-state transition latencies result in
2918 * dropped transactions.
2920 pm_qos_update_request(
2921 adapter->netdev->pm_qos_req, 55);
2923 pm_qos_update_request(
2924 adapter->netdev->pm_qos_req,
2925 PM_QOS_DEFAULT_VALUE);
2929 /* Enable Receives */
2934 * e1000_update_mc_addr_list - Update Multicast addresses
2935 * @hw: pointer to the HW structure
2936 * @mc_addr_list: array of multicast addresses to program
2937 * @mc_addr_count: number of multicast addresses to program
2939 * Updates the Multicast Table Array.
2940 * The caller must have a packed mc_addr_list of multicast addresses.
2942 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2945 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2949 * e1000_set_multi - Multicast and Promiscuous mode set
2950 * @netdev: network interface device structure
2952 * The set_multi entry point is called whenever the multicast address
2953 * list or the network interface flags are updated. This routine is
2954 * responsible for configuring the hardware for proper multicast,
2955 * promiscuous mode, and all-multi behavior.
2957 static void e1000_set_multi(struct net_device *netdev)
2959 struct e1000_adapter *adapter = netdev_priv(netdev);
2960 struct e1000_hw *hw = &adapter->hw;
2961 struct netdev_hw_addr *ha;
2966 /* Check for Promiscuous and All Multicast modes */
2970 if (netdev->flags & IFF_PROMISC) {
2971 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2972 rctl &= ~E1000_RCTL_VFE;
2974 if (netdev->flags & IFF_ALLMULTI) {
2975 rctl |= E1000_RCTL_MPE;
2976 rctl &= ~E1000_RCTL_UPE;
2978 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2980 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
2981 rctl |= E1000_RCTL_VFE;
2986 if (!netdev_mc_empty(netdev)) {
2987 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2991 /* prepare a packed array of only addresses. */
2993 netdev_for_each_mc_addr(ha, netdev)
2994 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2996 e1000_update_mc_addr_list(hw, mta_list, i);
3000 * if we're called from probe, we might not have
3001 * anything to do here, so clear out the list
3003 e1000_update_mc_addr_list(hw, NULL, 0);
3008 * e1000_configure - configure the hardware for Rx and Tx
3009 * @adapter: private board structure
3011 static void e1000_configure(struct e1000_adapter *adapter)
3013 e1000_set_multi(adapter->netdev);
3015 e1000_restore_vlan(adapter);
3016 e1000_init_manageability_pt(adapter);
3018 e1000_configure_tx(adapter);
3019 e1000_setup_rctl(adapter);
3020 e1000_configure_rx(adapter);
3021 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
3025 * e1000e_power_up_phy - restore link in case the phy was powered down
3026 * @adapter: address of board private structure
3028 * The phy may be powered down to save power and turn off link when the
3029 * driver is unloaded and wake on lan is not enabled (among others)
3030 * *** this routine MUST be followed by a call to e1000e_reset ***
3032 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3034 if (adapter->hw.phy.ops.power_up)
3035 adapter->hw.phy.ops.power_up(&adapter->hw);
3037 adapter->hw.mac.ops.setup_link(&adapter->hw);
3041 * e1000_power_down_phy - Power down the PHY
3043 * Power down the PHY so no link is implied when interface is down.
3044 * The PHY cannot be powered down if management or WoL is active.
3046 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3048 /* WoL is enabled */
3052 if (adapter->hw.phy.ops.power_down)
3053 adapter->hw.phy.ops.power_down(&adapter->hw);
3057 * e1000e_reset - bring the hardware into a known good state
3059 * This function boots the hardware and enables some settings that
3060 * require a configuration cycle of the hardware - those cannot be
3061 * set/changed during runtime. After reset the device needs to be
3062 * properly configured for Rx, Tx etc.
3064 void e1000e_reset(struct e1000_adapter *adapter)
3066 struct e1000_mac_info *mac = &adapter->hw.mac;
3067 struct e1000_fc_info *fc = &adapter->hw.fc;
3068 struct e1000_hw *hw = &adapter->hw;
3069 u32 tx_space, min_tx_space, min_rx_space;
3070 u32 pba = adapter->pba;
3073 /* reset Packet Buffer Allocation to default */
3076 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3078 * To maintain wire speed transmits, the Tx FIFO should be
3079 * large enough to accommodate two full transmit packets,
3080 * rounded up to the next 1KB and expressed in KB. Likewise,
3081 * the Rx FIFO should be large enough to accommodate at least
3082 * one full receive packet and is similarly rounded up and
3086 /* upper 16 bits has Tx packet buffer allocation size in KB */
3087 tx_space = pba >> 16;
3088 /* lower 16 bits has Rx packet buffer allocation size in KB */
3091 * the Tx fifo also stores 16 bytes of information about the tx
3092 * but don't include ethernet FCS because hardware appends it
3094 min_tx_space = (adapter->max_frame_size +
3095 sizeof(struct e1000_tx_desc) -
3097 min_tx_space = ALIGN(min_tx_space, 1024);
3098 min_tx_space >>= 10;
3099 /* software strips receive CRC, so leave room for it */
3100 min_rx_space = adapter->max_frame_size;
3101 min_rx_space = ALIGN(min_rx_space, 1024);
3102 min_rx_space >>= 10;
3105 * If current Tx allocation is less than the min Tx FIFO size,
3106 * and the min Tx FIFO size is less than the current Rx FIFO
3107 * allocation, take space away from current Rx allocation
3109 if ((tx_space < min_tx_space) &&
3110 ((min_tx_space - tx_space) < pba)) {
3111 pba -= min_tx_space - tx_space;
3114 * if short on Rx space, Rx wins and must trump tx
3115 * adjustment or use Early Receive if available
3117 if ((pba < min_rx_space) &&
3118 (!(adapter->flags & FLAG_HAS_ERT)))
3119 /* ERT enabled in e1000_configure_rx */
3128 * flow control settings
3130 * The high water mark must be low enough to fit one full frame
3131 * (or the size used for early receive) above it in the Rx FIFO.
3132 * Set it to the lower of:
3133 * - 90% of the Rx FIFO size, and
3134 * - the full Rx FIFO size minus the early receive size (for parts
3135 * with ERT support assuming ERT set to E1000_ERT_2048), or
3136 * - the full Rx FIFO size minus one full frame
3138 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3139 fc->pause_time = 0xFFFF;
3141 fc->pause_time = E1000_FC_PAUSE_TIME;
3143 fc->current_mode = fc->requested_mode;
3145 switch (hw->mac.type) {
3147 if ((adapter->flags & FLAG_HAS_ERT) &&
3148 (adapter->netdev->mtu > ETH_DATA_LEN))
3149 hwm = min(((pba << 10) * 9 / 10),
3150 ((pba << 10) - (E1000_ERT_2048 << 3)));
3152 hwm = min(((pba << 10) * 9 / 10),
3153 ((pba << 10) - adapter->max_frame_size));
3155 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3156 fc->low_water = fc->high_water - 8;
3160 * Workaround PCH LOM adapter hangs with certain network
3161 * loads. If hangs persist, try disabling Tx flow control.
3163 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3164 fc->high_water = 0x3500;
3165 fc->low_water = 0x1500;
3167 fc->high_water = 0x5000;
3168 fc->low_water = 0x3000;
3170 fc->refresh_time = 0x1000;
3173 fc->high_water = 0x05C20;
3174 fc->low_water = 0x05048;
3175 fc->pause_time = 0x0650;
3176 fc->refresh_time = 0x0400;
3180 /* Allow time for pending master requests to run */
3181 mac->ops.reset_hw(hw);
3184 * For parts with AMT enabled, let the firmware know
3185 * that the network interface is in control
3187 if (adapter->flags & FLAG_HAS_AMT)
3188 e1000_get_hw_control(adapter);
3192 if (mac->ops.init_hw(hw))
3193 e_err("Hardware Error\n");
3195 e1000_update_mng_vlan(adapter);
3197 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3198 ew32(VET, ETH_P_8021Q);
3200 e1000e_reset_adaptive(hw);
3201 e1000_get_phy_info(hw);
3203 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3204 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3207 * speed up time to link by disabling smart power down, ignore
3208 * the return value of this function because there is nothing
3209 * different we would do if it failed
3211 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3212 phy_data &= ~IGP02E1000_PM_SPD;
3213 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3217 int e1000e_up(struct e1000_adapter *adapter)
3219 struct e1000_hw *hw = &adapter->hw;
3221 /* DMA latency requirement to workaround early-receive/jumbo issue */
3222 if (adapter->flags & FLAG_HAS_ERT)
3223 adapter->netdev->pm_qos_req =
3224 pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
3225 PM_QOS_DEFAULT_VALUE);
3227 /* hardware has been reset, we need to reload some things */
3228 e1000_configure(adapter);
3230 clear_bit(__E1000_DOWN, &adapter->state);
3232 napi_enable(&adapter->napi);
3233 if (adapter->msix_entries)
3234 e1000_configure_msix(adapter);
3235 e1000_irq_enable(adapter);
3237 netif_wake_queue(adapter->netdev);
3239 /* fire a link change interrupt to start the watchdog */
3240 if (adapter->msix_entries)
3241 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3243 ew32(ICS, E1000_ICS_LSC);
3248 void e1000e_down(struct e1000_adapter *adapter)
3250 struct net_device *netdev = adapter->netdev;
3251 struct e1000_hw *hw = &adapter->hw;
3255 * signal that we're down so the interrupt handler does not
3256 * reschedule our watchdog timer
3258 set_bit(__E1000_DOWN, &adapter->state);
3260 /* disable receives in the hardware */
3262 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3263 /* flush and sleep below */
3265 netif_stop_queue(netdev);
3267 /* disable transmits in the hardware */
3269 tctl &= ~E1000_TCTL_EN;
3271 /* flush both disables and wait for them to finish */
3275 napi_disable(&adapter->napi);
3276 e1000_irq_disable(adapter);
3278 del_timer_sync(&adapter->watchdog_timer);
3279 del_timer_sync(&adapter->phy_info_timer);
3281 netif_carrier_off(netdev);
3282 adapter->link_speed = 0;
3283 adapter->link_duplex = 0;
3285 if (!pci_channel_offline(adapter->pdev))
3286 e1000e_reset(adapter);
3287 e1000_clean_tx_ring(adapter);
3288 e1000_clean_rx_ring(adapter);
3290 if (adapter->flags & FLAG_HAS_ERT) {
3291 pm_qos_remove_request(
3292 adapter->netdev->pm_qos_req);
3293 adapter->netdev->pm_qos_req = NULL;
3297 * TODO: for power management, we could drop the link and
3298 * pci_disable_device here.
3302 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3305 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3307 e1000e_down(adapter);
3309 clear_bit(__E1000_RESETTING, &adapter->state);
3313 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3314 * @adapter: board private structure to initialize
3316 * e1000_sw_init initializes the Adapter private data structure.
3317 * Fields are initialized based on PCI device information and
3318 * OS network device settings (MTU size).
3320 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3322 struct net_device *netdev = adapter->netdev;
3324 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3325 adapter->rx_ps_bsize0 = 128;
3326 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3327 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3329 e1000e_set_interrupt_capability(adapter);
3331 if (e1000_alloc_queues(adapter))
3334 /* Explicitly disable IRQ since the NIC can be in any state. */
3335 e1000_irq_disable(adapter);
3337 set_bit(__E1000_DOWN, &adapter->state);
3342 * e1000_intr_msi_test - Interrupt Handler
3343 * @irq: interrupt number
3344 * @data: pointer to a network interface device structure
3346 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3348 struct net_device *netdev = data;
3349 struct e1000_adapter *adapter = netdev_priv(netdev);
3350 struct e1000_hw *hw = &adapter->hw;
3351 u32 icr = er32(ICR);
3353 e_dbg("icr is %08X\n", icr);
3354 if (icr & E1000_ICR_RXSEQ) {
3355 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3363 * e1000_test_msi_interrupt - Returns 0 for successful test
3364 * @adapter: board private struct
3366 * code flow taken from tg3.c
3368 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3370 struct net_device *netdev = adapter->netdev;
3371 struct e1000_hw *hw = &adapter->hw;
3374 /* poll_enable hasn't been called yet, so don't need disable */
3375 /* clear any pending events */
3378 /* free the real vector and request a test handler */
3379 e1000_free_irq(adapter);
3380 e1000e_reset_interrupt_capability(adapter);
3382 /* Assume that the test fails, if it succeeds then the test
3383 * MSI irq handler will unset this flag */
3384 adapter->flags |= FLAG_MSI_TEST_FAILED;
3386 err = pci_enable_msi(adapter->pdev);
3388 goto msi_test_failed;
3390 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3391 netdev->name, netdev);
3393 pci_disable_msi(adapter->pdev);
3394 goto msi_test_failed;
3399 e1000_irq_enable(adapter);
3401 /* fire an unusual interrupt on the test handler */
3402 ew32(ICS, E1000_ICS_RXSEQ);
3406 e1000_irq_disable(adapter);
3410 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3411 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3413 e_info("MSI interrupt test failed!\n");
3416 free_irq(adapter->pdev->irq, netdev);
3417 pci_disable_msi(adapter->pdev);
3420 goto msi_test_failed;
3422 /* okay so the test worked, restore settings */
3423 e_dbg("MSI interrupt test succeeded!\n");
3425 e1000e_set_interrupt_capability(adapter);
3426 e1000_request_irq(adapter);
3431 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3432 * @adapter: board private struct
3434 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3436 static int e1000_test_msi(struct e1000_adapter *adapter)
3441 if (!(adapter->flags & FLAG_MSI_ENABLED))
3444 /* disable SERR in case the MSI write causes a master abort */
3445 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3446 if (pci_cmd & PCI_COMMAND_SERR)
3447 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3448 pci_cmd & ~PCI_COMMAND_SERR);
3450 err = e1000_test_msi_interrupt(adapter);
3452 /* re-enable SERR */
3453 if (pci_cmd & PCI_COMMAND_SERR) {
3454 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3455 pci_cmd |= PCI_COMMAND_SERR;
3456 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3463 /* EIO means MSI test failed */
3467 /* back to INTx mode */
3468 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3470 e1000_free_irq(adapter);
3472 err = e1000_request_irq(adapter);
3478 * e1000_open - Called when a network interface is made active
3479 * @netdev: network interface device structure
3481 * Returns 0 on success, negative value on failure
3483 * The open entry point is called when a network interface is made
3484 * active by the system (IFF_UP). At this point all resources needed
3485 * for transmit and receive operations are allocated, the interrupt
3486 * handler is registered with the OS, the watchdog timer is started,
3487 * and the stack is notified that the interface is ready.
3489 static int e1000_open(struct net_device *netdev)
3491 struct e1000_adapter *adapter = netdev_priv(netdev);
3492 struct e1000_hw *hw = &adapter->hw;
3493 struct pci_dev *pdev = adapter->pdev;
3496 /* disallow open during test */
3497 if (test_bit(__E1000_TESTING, &adapter->state))
3500 pm_runtime_get_sync(&pdev->dev);
3502 netif_carrier_off(netdev);
3504 /* allocate transmit descriptors */
3505 err = e1000e_setup_tx_resources(adapter);
3509 /* allocate receive descriptors */
3510 err = e1000e_setup_rx_resources(adapter);
3515 * If AMT is enabled, let the firmware know that the network
3516 * interface is now open and reset the part to a known state.
3518 if (adapter->flags & FLAG_HAS_AMT) {
3519 e1000_get_hw_control(adapter);
3520 e1000e_reset(adapter);
3523 e1000e_power_up_phy(adapter);
3525 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3526 if ((adapter->hw.mng_cookie.status &
3527 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3528 e1000_update_mng_vlan(adapter);
3531 * before we allocate an interrupt, we must be ready to handle it.
3532 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3533 * as soon as we call pci_request_irq, so we have to setup our
3534 * clean_rx handler before we do so.
3536 e1000_configure(adapter);
3538 err = e1000_request_irq(adapter);
3543 * Work around PCIe errata with MSI interrupts causing some chipsets to
3544 * ignore e1000e MSI messages, which means we need to test our MSI
3547 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3548 err = e1000_test_msi(adapter);
3550 e_err("Interrupt allocation failed\n");
3555 /* From here on the code is the same as e1000e_up() */
3556 clear_bit(__E1000_DOWN, &adapter->state);
3558 napi_enable(&adapter->napi);
3560 e1000_irq_enable(adapter);
3562 netif_start_queue(netdev);
3564 adapter->idle_check = true;
3565 pm_runtime_put(&pdev->dev);
3567 /* fire a link status change interrupt to start the watchdog */
3568 if (adapter->msix_entries)
3569 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3571 ew32(ICS, E1000_ICS_LSC);
3576 e1000_release_hw_control(adapter);
3577 e1000_power_down_phy(adapter);
3578 e1000e_free_rx_resources(adapter);
3580 e1000e_free_tx_resources(adapter);
3582 e1000e_reset(adapter);
3583 pm_runtime_put_sync(&pdev->dev);
3589 * e1000_close - Disables a network interface
3590 * @netdev: network interface device structure
3592 * Returns 0, this is not allowed to fail
3594 * The close entry point is called when an interface is de-activated
3595 * by the OS. The hardware is still under the drivers control, but
3596 * needs to be disabled. A global MAC reset is issued to stop the
3597 * hardware, and all transmit and receive resources are freed.
3599 static int e1000_close(struct net_device *netdev)
3601 struct e1000_adapter *adapter = netdev_priv(netdev);
3602 struct pci_dev *pdev = adapter->pdev;
3604 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3606 pm_runtime_get_sync(&pdev->dev);
3608 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3609 e1000e_down(adapter);
3610 e1000_free_irq(adapter);
3612 e1000_power_down_phy(adapter);
3614 e1000e_free_tx_resources(adapter);
3615 e1000e_free_rx_resources(adapter);
3618 * kill manageability vlan ID if supported, but not if a vlan with
3619 * the same ID is registered on the host OS (let 8021q kill it)
3621 if ((adapter->hw.mng_cookie.status &
3622 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3624 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3625 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3628 * If AMT is enabled, let the firmware know that the network
3629 * interface is now closed
3631 if (adapter->flags & FLAG_HAS_AMT)
3632 e1000_release_hw_control(adapter);
3634 pm_runtime_put_sync(&pdev->dev);
3639 * e1000_set_mac - Change the Ethernet Address of the NIC
3640 * @netdev: network interface device structure
3641 * @p: pointer to an address structure
3643 * Returns 0 on success, negative on failure
3645 static int e1000_set_mac(struct net_device *netdev, void *p)
3647 struct e1000_adapter *adapter = netdev_priv(netdev);
3648 struct sockaddr *addr = p;
3650 if (!is_valid_ether_addr(addr->sa_data))
3651 return -EADDRNOTAVAIL;
3653 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3654 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3656 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3658 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3659 /* activate the work around */
3660 e1000e_set_laa_state_82571(&adapter->hw, 1);
3663 * Hold a copy of the LAA in RAR[14] This is done so that
3664 * between the time RAR[0] gets clobbered and the time it
3665 * gets fixed (in e1000_watchdog), the actual LAA is in one
3666 * of the RARs and no incoming packets directed to this port
3667 * are dropped. Eventually the LAA will be in RAR[0] and
3670 e1000e_rar_set(&adapter->hw,
3671 adapter->hw.mac.addr,
3672 adapter->hw.mac.rar_entry_count - 1);
3679 * e1000e_update_phy_task - work thread to update phy
3680 * @work: pointer to our work struct
3682 * this worker thread exists because we must acquire a
3683 * semaphore to read the phy, which we could msleep while
3684 * waiting for it, and we can't msleep in a timer.
3686 static void e1000e_update_phy_task(struct work_struct *work)
3688 struct e1000_adapter *adapter = container_of(work,
3689 struct e1000_adapter, update_phy_task);
3690 e1000_get_phy_info(&adapter->hw);
3694 * Need to wait a few seconds after link up to get diagnostic information from
3697 static void e1000_update_phy_info(unsigned long data)
3699 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3700 schedule_work(&adapter->update_phy_task);
3704 * e1000e_update_phy_stats - Update the PHY statistics counters
3705 * @adapter: board private structure
3707 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3709 struct e1000_hw *hw = &adapter->hw;
3713 ret_val = hw->phy.ops.acquire(hw);
3719 #define HV_PHY_STATS_PAGE 778
3721 * A page set is expensive so check if already on desired page.
3722 * If not, set to the page with the PHY status registers.
3724 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3728 if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
3729 ret_val = e1000e_write_phy_reg_mdic(hw,
3730 IGP01E1000_PHY_PAGE_SELECT,
3731 (HV_PHY_STATS_PAGE <<
3737 /* Read/clear the upper 16-bit registers and read/accumulate lower */
3739 /* Single Collision Count */
3740 e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
3742 ret_val = e1000e_read_phy_reg_mdic(hw,
3743 HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
3746 adapter->stats.scc += phy_data;
3748 /* Excessive Collision Count */
3749 e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
3751 ret_val = e1000e_read_phy_reg_mdic(hw,
3752 HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
3755 adapter->stats.ecol += phy_data;
3757 /* Multiple Collision Count */
3758 e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
3760 ret_val = e1000e_read_phy_reg_mdic(hw,
3761 HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
3764 adapter->stats.mcc += phy_data;
3766 /* Late Collision Count */
3767 e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
3769 ret_val = e1000e_read_phy_reg_mdic(hw,
3771 MAX_PHY_REG_ADDRESS,
3774 adapter->stats.latecol += phy_data;
3776 /* Collision Count - also used for adaptive IFS */
3777 e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
3779 ret_val = e1000e_read_phy_reg_mdic(hw,
3780 HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
3783 hw->mac.collision_delta = phy_data;
3786 e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
3788 ret_val = e1000e_read_phy_reg_mdic(hw,
3789 HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
3792 adapter->stats.dc += phy_data;
3794 /* Transmit with no CRS */
3795 e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
3797 ret_val = e1000e_read_phy_reg_mdic(hw,
3798 HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
3801 adapter->stats.tncrs += phy_data;
3804 hw->phy.ops.release(hw);
3808 * e1000e_update_stats - Update the board statistics counters
3809 * @adapter: board private structure
3811 void e1000e_update_stats(struct e1000_adapter *adapter)
3813 struct net_device *netdev = adapter->netdev;
3814 struct e1000_hw *hw = &adapter->hw;
3815 struct pci_dev *pdev = adapter->pdev;
3818 * Prevent stats update while adapter is being reset, or if the pci
3819 * connection is down.
3821 if (adapter->link_speed == 0)
3823 if (pci_channel_offline(pdev))
3826 adapter->stats.crcerrs += er32(CRCERRS);
3827 adapter->stats.gprc += er32(GPRC);
3828 adapter->stats.gorc += er32(GORCL);
3829 er32(GORCH); /* Clear gorc */
3830 adapter->stats.bprc += er32(BPRC);
3831 adapter->stats.mprc += er32(MPRC);
3832 adapter->stats.roc += er32(ROC);
3834 adapter->stats.mpc += er32(MPC);
3836 /* Half-duplex statistics */
3837 if (adapter->link_duplex == HALF_DUPLEX) {
3838 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
3839 e1000e_update_phy_stats(adapter);
3841 adapter->stats.scc += er32(SCC);
3842 adapter->stats.ecol += er32(ECOL);
3843 adapter->stats.mcc += er32(MCC);
3844 adapter->stats.latecol += er32(LATECOL);
3845 adapter->stats.dc += er32(DC);
3847 hw->mac.collision_delta = er32(COLC);
3849 if ((hw->mac.type != e1000_82574) &&
3850 (hw->mac.type != e1000_82583))
3851 adapter->stats.tncrs += er32(TNCRS);
3853 adapter->stats.colc += hw->mac.collision_delta;
3856 adapter->stats.xonrxc += er32(XONRXC);
3857 adapter->stats.xontxc += er32(XONTXC);
3858 adapter->stats.xoffrxc += er32(XOFFRXC);
3859 adapter->stats.xofftxc += er32(XOFFTXC);
3860 adapter->stats.gptc += er32(GPTC);
3861 adapter->stats.gotc += er32(GOTCL);
3862 er32(GOTCH); /* Clear gotc */
3863 adapter->stats.rnbc += er32(RNBC);
3864 adapter->stats.ruc += er32(RUC);
3866 adapter->stats.mptc += er32(MPTC);
3867 adapter->stats.bptc += er32(BPTC);
3869 /* used for adaptive IFS */
3871 hw->mac.tx_packet_delta = er32(TPT);
3872 adapter->stats.tpt += hw->mac.tx_packet_delta;
3874 adapter->stats.algnerrc += er32(ALGNERRC);
3875 adapter->stats.rxerrc += er32(RXERRC);
3876 adapter->stats.cexterr += er32(CEXTERR);
3877 adapter->stats.tsctc += er32(TSCTC);
3878 adapter->stats.tsctfc += er32(TSCTFC);
3880 /* Fill out the OS statistics structure */
3881 netdev->stats.multicast = adapter->stats.mprc;
3882 netdev->stats.collisions = adapter->stats.colc;
3887 * RLEC on some newer hardware can be incorrect so build
3888 * our own version based on RUC and ROC
3890 netdev->stats.rx_errors = adapter->stats.rxerrc +
3891 adapter->stats.crcerrs + adapter->stats.algnerrc +
3892 adapter->stats.ruc + adapter->stats.roc +
3893 adapter->stats.cexterr;
3894 netdev->stats.rx_length_errors = adapter->stats.ruc +
3896 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3897 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3898 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3901 netdev->stats.tx_errors = adapter->stats.ecol +
3902 adapter->stats.latecol;
3903 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3904 netdev->stats.tx_window_errors = adapter->stats.latecol;
3905 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3907 /* Tx Dropped needs to be maintained elsewhere */
3909 /* Management Stats */
3910 adapter->stats.mgptc += er32(MGTPTC);
3911 adapter->stats.mgprc += er32(MGTPRC);
3912 adapter->stats.mgpdc += er32(MGTPDC);
3916 * e1000_phy_read_status - Update the PHY register status snapshot
3917 * @adapter: board private structure
3919 static void e1000_phy_read_status(struct e1000_adapter *adapter)
3921 struct e1000_hw *hw = &adapter->hw;
3922 struct e1000_phy_regs *phy = &adapter->phy_regs;
3925 if ((er32(STATUS) & E1000_STATUS_LU) &&
3926 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3927 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3928 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3929 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3930 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3931 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3932 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3933 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3934 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3936 e_warn("Error reading PHY register\n");
3939 * Do not read PHY registers if link is not up
3940 * Set values to typical power-on defaults
3942 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3943 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3944 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3946 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3947 ADVERTISE_ALL | ADVERTISE_CSMA);
3949 phy->expansion = EXPANSION_ENABLENPAGE;
3950 phy->ctrl1000 = ADVERTISE_1000FULL;
3952 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3956 static void e1000_print_link_info(struct e1000_adapter *adapter)
3958 struct e1000_hw *hw = &adapter->hw;
3959 u32 ctrl = er32(CTRL);
3961 /* Link status message must follow this format for user tools */
3962 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3963 "Flow Control: %s\n",
3964 adapter->netdev->name,
3965 adapter->link_speed,
3966 (adapter->link_duplex == FULL_DUPLEX) ?
3967 "Full Duplex" : "Half Duplex",
3968 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3970 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3971 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3974 static bool e1000e_has_link(struct e1000_adapter *adapter)
3976 struct e1000_hw *hw = &adapter->hw;
3977 bool link_active = 0;
3981 * get_link_status is set on LSC (link status) interrupt or
3982 * Rx sequence error interrupt. get_link_status will stay
3983 * false until the check_for_link establishes link
3984 * for copper adapters ONLY
3986 switch (hw->phy.media_type) {
3987 case e1000_media_type_copper:
3988 if (hw->mac.get_link_status) {
3989 ret_val = hw->mac.ops.check_for_link(hw);
3990 link_active = !hw->mac.get_link_status;
3995 case e1000_media_type_fiber:
3996 ret_val = hw->mac.ops.check_for_link(hw);
3997 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
3999 case e1000_media_type_internal_serdes:
4000 ret_val = hw->mac.ops.check_for_link(hw);
4001 link_active = adapter->hw.mac.serdes_has_link;
4004 case e1000_media_type_unknown:
4008 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4009 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4010 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4011 e_info("Gigabit has been disabled, downgrading speed\n");
4017 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4019 /* make sure the receive unit is started */
4020 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4021 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4022 struct e1000_hw *hw = &adapter->hw;
4023 u32 rctl = er32(RCTL);
4024 ew32(RCTL, rctl | E1000_RCTL_EN);
4025 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4030 * e1000_watchdog - Timer Call-back
4031 * @data: pointer to adapter cast into an unsigned long
4033 static void e1000_watchdog(unsigned long data)
4035 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4037 /* Do the rest outside of interrupt context */
4038 schedule_work(&adapter->watchdog_task);
4040 /* TODO: make this use queue_delayed_work() */
4043 static void e1000_watchdog_task(struct work_struct *work)
4045 struct e1000_adapter *adapter = container_of(work,
4046 struct e1000_adapter, watchdog_task);
4047 struct net_device *netdev = adapter->netdev;
4048 struct e1000_mac_info *mac = &adapter->hw.mac;
4049 struct e1000_phy_info *phy = &adapter->hw.phy;
4050 struct e1000_ring *tx_ring = adapter->tx_ring;
4051 struct e1000_hw *hw = &adapter->hw;
4055 link = e1000e_has_link(adapter);
4056 if ((netif_carrier_ok(netdev)) && link) {
4057 /* Cancel scheduled suspend requests. */
4058 pm_runtime_resume(netdev->dev.parent);
4060 e1000e_enable_receives(adapter);
4064 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4065 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4066 e1000_update_mng_vlan(adapter);
4069 if (!netif_carrier_ok(netdev)) {
4072 /* Cancel scheduled suspend requests. */
4073 pm_runtime_resume(netdev->dev.parent);
4075 /* update snapshot of PHY registers on LSC */
4076 e1000_phy_read_status(adapter);
4077 mac->ops.get_link_up_info(&adapter->hw,
4078 &adapter->link_speed,
4079 &adapter->link_duplex);
4080 e1000_print_link_info(adapter);
4082 * On supported PHYs, check for duplex mismatch only
4083 * if link has autonegotiated at 10/100 half
4085 if ((hw->phy.type == e1000_phy_igp_3 ||
4086 hw->phy.type == e1000_phy_bm) &&
4087 (hw->mac.autoneg == true) &&
4088 (adapter->link_speed == SPEED_10 ||
4089 adapter->link_speed == SPEED_100) &&
4090 (adapter->link_duplex == HALF_DUPLEX)) {
4093 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4095 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4096 e_info("Autonegotiated half duplex but"
4097 " link partner cannot autoneg. "
4098 " Try forcing full duplex if "
4099 "link gets many collisions.\n");
4102 /* adjust timeout factor according to speed/duplex */
4103 adapter->tx_timeout_factor = 1;
4104 switch (adapter->link_speed) {
4107 adapter->tx_timeout_factor = 16;
4111 adapter->tx_timeout_factor = 10;
4116 * workaround: re-program speed mode bit after
4119 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4122 tarc0 = er32(TARC(0));
4123 tarc0 &= ~SPEED_MODE_BIT;
4124 ew32(TARC(0), tarc0);
4128 * disable TSO for pcie and 10/100 speeds, to avoid
4129 * some hardware issues
4131 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4132 switch (adapter->link_speed) {
4135 e_info("10/100 speed: disabling TSO\n");
4136 netdev->features &= ~NETIF_F_TSO;
4137 netdev->features &= ~NETIF_F_TSO6;
4140 netdev->features |= NETIF_F_TSO;
4141 netdev->features |= NETIF_F_TSO6;
4150 * enable transmits in the hardware, need to do this
4151 * after setting TARC(0)
4154 tctl |= E1000_TCTL_EN;
4158 * Perform any post-link-up configuration before
4159 * reporting link up.
4161 if (phy->ops.cfg_on_link_up)
4162 phy->ops.cfg_on_link_up(hw);
4164 netif_carrier_on(netdev);
4166 if (!test_bit(__E1000_DOWN, &adapter->state))
4167 mod_timer(&adapter->phy_info_timer,
4168 round_jiffies(jiffies + 2 * HZ));
4171 if (netif_carrier_ok(netdev)) {
4172 adapter->link_speed = 0;
4173 adapter->link_duplex = 0;
4174 /* Link status message must follow this format */
4175 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4176 adapter->netdev->name);
4177 netif_carrier_off(netdev);
4178 if (!test_bit(__E1000_DOWN, &adapter->state))
4179 mod_timer(&adapter->phy_info_timer,
4180 round_jiffies(jiffies + 2 * HZ));
4182 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4183 schedule_work(&adapter->reset_task);
4185 pm_schedule_suspend(netdev->dev.parent,
4191 e1000e_update_stats(adapter);
4193 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4194 adapter->tpt_old = adapter->stats.tpt;
4195 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4196 adapter->colc_old = adapter->stats.colc;
4198 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4199 adapter->gorc_old = adapter->stats.gorc;
4200 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4201 adapter->gotc_old = adapter->stats.gotc;
4203 e1000e_update_adaptive(&adapter->hw);
4205 if (!netif_carrier_ok(netdev)) {
4206 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
4210 * We've lost link, so the controller stops DMA,
4211 * but we've got queued Tx work that's never going
4212 * to get done, so reset controller to flush Tx.
4213 * (Do the reset outside of interrupt context).
4215 adapter->tx_timeout_count++;
4216 schedule_work(&adapter->reset_task);
4217 /* return immediately since reset is imminent */
4222 /* Simple mode for Interrupt Throttle Rate (ITR) */
4223 if (adapter->itr_setting == 4) {
4225 * Symmetric Tx/Rx gets a reduced ITR=2000;
4226 * Total asymmetrical Tx or Rx gets ITR=8000;
4227 * everyone else is between 2000-8000.
4229 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4230 u32 dif = (adapter->gotc > adapter->gorc ?
4231 adapter->gotc - adapter->gorc :
4232 adapter->gorc - adapter->gotc) / 10000;
4233 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4235 ew32(ITR, 1000000000 / (itr * 256));
4238 /* Cause software interrupt to ensure Rx ring is cleaned */
4239 if (adapter->msix_entries)
4240 ew32(ICS, adapter->rx_ring->ims_val);
4242 ew32(ICS, E1000_ICS_RXDMT0);
4244 /* Force detection of hung controller every watchdog period */
4245 adapter->detect_tx_hung = 1;
4248 * With 82571 controllers, LAA may be overwritten due to controller
4249 * reset from the other port. Set the appropriate LAA in RAR[0]
4251 if (e1000e_get_laa_state_82571(hw))
4252 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4254 /* Reset the timer */
4255 if (!test_bit(__E1000_DOWN, &adapter->state))
4256 mod_timer(&adapter->watchdog_timer,
4257 round_jiffies(jiffies + 2 * HZ));
4260 #define E1000_TX_FLAGS_CSUM 0x00000001
4261 #define E1000_TX_FLAGS_VLAN 0x00000002
4262 #define E1000_TX_FLAGS_TSO 0x00000004
4263 #define E1000_TX_FLAGS_IPV4 0x00000008
4264 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4265 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4267 static int e1000_tso(struct e1000_adapter *adapter,
4268 struct sk_buff *skb)
4270 struct e1000_ring *tx_ring = adapter->tx_ring;
4271 struct e1000_context_desc *context_desc;
4272 struct e1000_buffer *buffer_info;
4275 u16 ipcse = 0, tucse, mss;
4276 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4279 if (!skb_is_gso(skb))
4282 if (skb_header_cloned(skb)) {
4283 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4288 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4289 mss = skb_shinfo(skb)->gso_size;
4290 if (skb->protocol == htons(ETH_P_IP)) {
4291 struct iphdr *iph = ip_hdr(skb);
4294 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4296 cmd_length = E1000_TXD_CMD_IP;
4297 ipcse = skb_transport_offset(skb) - 1;
4298 } else if (skb_is_gso_v6(skb)) {
4299 ipv6_hdr(skb)->payload_len = 0;
4300 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4301 &ipv6_hdr(skb)->daddr,
4305 ipcss = skb_network_offset(skb);
4306 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4307 tucss = skb_transport_offset(skb);
4308 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4311 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4312 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4314 i = tx_ring->next_to_use;
4315 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4316 buffer_info = &tx_ring->buffer_info[i];
4318 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4319 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4320 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4321 context_desc->upper_setup.tcp_fields.tucss = tucss;
4322 context_desc->upper_setup.tcp_fields.tucso = tucso;
4323 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4324 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4325 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4326 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4328 buffer_info->time_stamp = jiffies;
4329 buffer_info->next_to_watch = i;
4332 if (i == tx_ring->count)
4334 tx_ring->next_to_use = i;
4339 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4341 struct e1000_ring *tx_ring = adapter->tx_ring;
4342 struct e1000_context_desc *context_desc;
4343 struct e1000_buffer *buffer_info;
4346 u32 cmd_len = E1000_TXD_CMD_DEXT;
4349 if (skb->ip_summed != CHECKSUM_PARTIAL)
4352 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4353 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4355 protocol = skb->protocol;
4358 case cpu_to_be16(ETH_P_IP):
4359 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4360 cmd_len |= E1000_TXD_CMD_TCP;
4362 case cpu_to_be16(ETH_P_IPV6):
4363 /* XXX not handling all IPV6 headers */
4364 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4365 cmd_len |= E1000_TXD_CMD_TCP;
4368 if (unlikely(net_ratelimit()))
4369 e_warn("checksum_partial proto=%x!\n",
4370 be16_to_cpu(protocol));
4374 css = skb_transport_offset(skb);
4376 i = tx_ring->next_to_use;
4377 buffer_info = &tx_ring->buffer_info[i];
4378 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4380 context_desc->lower_setup.ip_config = 0;
4381 context_desc->upper_setup.tcp_fields.tucss = css;
4382 context_desc->upper_setup.tcp_fields.tucso =
4383 css + skb->csum_offset;
4384 context_desc->upper_setup.tcp_fields.tucse = 0;
4385 context_desc->tcp_seg_setup.data = 0;
4386 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4388 buffer_info->time_stamp = jiffies;
4389 buffer_info->next_to_watch = i;
4392 if (i == tx_ring->count)
4394 tx_ring->next_to_use = i;
4399 #define E1000_MAX_PER_TXD 8192
4400 #define E1000_MAX_TXD_PWR 12
4402 static int e1000_tx_map(struct e1000_adapter *adapter,
4403 struct sk_buff *skb, unsigned int first,
4404 unsigned int max_per_txd, unsigned int nr_frags,
4407 struct e1000_ring *tx_ring = adapter->tx_ring;
4408 struct pci_dev *pdev = adapter->pdev;
4409 struct e1000_buffer *buffer_info;
4410 unsigned int len = skb_headlen(skb);
4411 unsigned int offset = 0, size, count = 0, i;
4412 unsigned int f, bytecount, segs;
4414 i = tx_ring->next_to_use;
4417 buffer_info = &tx_ring->buffer_info[i];
4418 size = min(len, max_per_txd);
4420 buffer_info->length = size;
4421 buffer_info->time_stamp = jiffies;
4422 buffer_info->next_to_watch = i;
4423 buffer_info->dma = dma_map_single(&pdev->dev,
4425 size, DMA_TO_DEVICE);
4426 buffer_info->mapped_as_page = false;
4427 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4436 if (i == tx_ring->count)
4441 for (f = 0; f < nr_frags; f++) {
4442 struct skb_frag_struct *frag;
4444 frag = &skb_shinfo(skb)->frags[f];
4446 offset = frag->page_offset;
4450 if (i == tx_ring->count)
4453 buffer_info = &tx_ring->buffer_info[i];
4454 size = min(len, max_per_txd);
4456 buffer_info->length = size;
4457 buffer_info->time_stamp = jiffies;
4458 buffer_info->next_to_watch = i;
4459 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
4462 buffer_info->mapped_as_page = true;
4463 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4472 segs = skb_shinfo(skb)->gso_segs ?: 1;
4473 /* multiply data chunks by size of headers */
4474 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4476 tx_ring->buffer_info[i].skb = skb;
4477 tx_ring->buffer_info[i].segs = segs;
4478 tx_ring->buffer_info[i].bytecount = bytecount;
4479 tx_ring->buffer_info[first].next_to_watch = i;
4484 dev_err(&pdev->dev, "TX DMA map failed\n");
4485 buffer_info->dma = 0;
4491 i += tx_ring->count;
4493 buffer_info = &tx_ring->buffer_info[i];
4494 e1000_put_txbuf(adapter, buffer_info);;
4500 static void e1000_tx_queue(struct e1000_adapter *adapter,
4501 int tx_flags, int count)
4503 struct e1000_ring *tx_ring = adapter->tx_ring;
4504 struct e1000_tx_desc *tx_desc = NULL;
4505 struct e1000_buffer *buffer_info;
4506 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4509 if (tx_flags & E1000_TX_FLAGS_TSO) {
4510 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4512 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4514 if (tx_flags & E1000_TX_FLAGS_IPV4)
4515 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4518 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4519 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4520 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4523 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4524 txd_lower |= E1000_TXD_CMD_VLE;
4525 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4528 i = tx_ring->next_to_use;
4531 buffer_info = &tx_ring->buffer_info[i];
4532 tx_desc = E1000_TX_DESC(*tx_ring, i);
4533 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4534 tx_desc->lower.data =
4535 cpu_to_le32(txd_lower | buffer_info->length);
4536 tx_desc->upper.data = cpu_to_le32(txd_upper);
4539 if (i == tx_ring->count)
4543 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4546 * Force memory writes to complete before letting h/w
4547 * know there are new descriptors to fetch. (Only
4548 * applicable for weak-ordered memory model archs,
4553 tx_ring->next_to_use = i;
4554 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4556 * we need this if more than one processor can write to our tail
4557 * at a time, it synchronizes IO on IA64/Altix systems
4562 #define MINIMUM_DHCP_PACKET_SIZE 282
4563 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4564 struct sk_buff *skb)
4566 struct e1000_hw *hw = &adapter->hw;
4569 if (vlan_tx_tag_present(skb)) {
4570 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4571 (adapter->hw.mng_cookie.status &
4572 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4576 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4579 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4583 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4586 if (ip->protocol != IPPROTO_UDP)
4589 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4590 if (ntohs(udp->dest) != 67)
4593 offset = (u8 *)udp + 8 - skb->data;
4594 length = skb->len - offset;
4595 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4601 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4603 struct e1000_adapter *adapter = netdev_priv(netdev);
4605 netif_stop_queue(netdev);
4607 * Herbert's original patch had:
4608 * smp_mb__after_netif_stop_queue();
4609 * but since that doesn't exist yet, just open code it.
4614 * We need to check again in a case another CPU has just
4615 * made room available.
4617 if (e1000_desc_unused(adapter->tx_ring) < size)
4621 netif_start_queue(netdev);
4622 ++adapter->restart_queue;
4626 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4628 struct e1000_adapter *adapter = netdev_priv(netdev);
4630 if (e1000_desc_unused(adapter->tx_ring) >= size)
4632 return __e1000_maybe_stop_tx(netdev, size);
4635 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4636 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4637 struct net_device *netdev)
4639 struct e1000_adapter *adapter = netdev_priv(netdev);
4640 struct e1000_ring *tx_ring = adapter->tx_ring;
4642 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4643 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4644 unsigned int tx_flags = 0;
4645 unsigned int len = skb_headlen(skb);
4646 unsigned int nr_frags;
4652 if (test_bit(__E1000_DOWN, &adapter->state)) {
4653 dev_kfree_skb_any(skb);
4654 return NETDEV_TX_OK;
4657 if (skb->len <= 0) {
4658 dev_kfree_skb_any(skb);
4659 return NETDEV_TX_OK;
4662 mss = skb_shinfo(skb)->gso_size;
4664 * The controller does a simple calculation to
4665 * make sure there is enough room in the FIFO before
4666 * initiating the DMA for each buffer. The calc is:
4667 * 4 = ceil(buffer len/mss). To make sure we don't
4668 * overrun the FIFO, adjust the max buffer len if mss
4673 max_per_txd = min(mss << 2, max_per_txd);
4674 max_txd_pwr = fls(max_per_txd) - 1;
4677 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4678 * points to just header, pull a few bytes of payload from
4679 * frags into skb->data
4681 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4683 * we do this workaround for ES2LAN, but it is un-necessary,
4684 * avoiding it could save a lot of cycles
4686 if (skb->data_len && (hdr_len == len)) {
4687 unsigned int pull_size;
4689 pull_size = min((unsigned int)4, skb->data_len);
4690 if (!__pskb_pull_tail(skb, pull_size)) {
4691 e_err("__pskb_pull_tail failed.\n");
4692 dev_kfree_skb_any(skb);
4693 return NETDEV_TX_OK;
4695 len = skb_headlen(skb);
4699 /* reserve a descriptor for the offload context */
4700 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4704 count += TXD_USE_COUNT(len, max_txd_pwr);
4706 nr_frags = skb_shinfo(skb)->nr_frags;
4707 for (f = 0; f < nr_frags; f++)
4708 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4711 if (adapter->hw.mac.tx_pkt_filtering)
4712 e1000_transfer_dhcp_info(adapter, skb);
4715 * need: count + 2 desc gap to keep tail from touching
4716 * head, otherwise try next time
4718 if (e1000_maybe_stop_tx(netdev, count + 2))
4719 return NETDEV_TX_BUSY;
4721 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4722 tx_flags |= E1000_TX_FLAGS_VLAN;
4723 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4726 first = tx_ring->next_to_use;
4728 tso = e1000_tso(adapter, skb);
4730 dev_kfree_skb_any(skb);
4731 return NETDEV_TX_OK;
4735 tx_flags |= E1000_TX_FLAGS_TSO;
4736 else if (e1000_tx_csum(adapter, skb))
4737 tx_flags |= E1000_TX_FLAGS_CSUM;
4740 * Old method was to assume IPv4 packet by default if TSO was enabled.
4741 * 82571 hardware supports TSO capabilities for IPv6 as well...
4742 * no longer assume, we must.
4744 if (skb->protocol == htons(ETH_P_IP))
4745 tx_flags |= E1000_TX_FLAGS_IPV4;
4747 /* if count is 0 then mapping error has occured */
4748 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4750 e1000_tx_queue(adapter, tx_flags, count);
4751 /* Make sure there is space in the ring for the next send. */
4752 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4755 dev_kfree_skb_any(skb);
4756 tx_ring->buffer_info[first].time_stamp = 0;
4757 tx_ring->next_to_use = first;
4760 return NETDEV_TX_OK;
4764 * e1000_tx_timeout - Respond to a Tx Hang
4765 * @netdev: network interface device structure
4767 static void e1000_tx_timeout(struct net_device *netdev)
4769 struct e1000_adapter *adapter = netdev_priv(netdev);
4771 /* Do the reset outside of interrupt context */
4772 adapter->tx_timeout_count++;
4773 schedule_work(&adapter->reset_task);
4776 static void e1000_reset_task(struct work_struct *work)
4778 struct e1000_adapter *adapter;
4779 adapter = container_of(work, struct e1000_adapter, reset_task);
4781 e1000e_dump(adapter);
4782 e_err("Reset adapter\n");
4783 e1000e_reinit_locked(adapter);
4787 * e1000_get_stats - Get System Network Statistics
4788 * @netdev: network interface device structure
4790 * Returns the address of the device statistics structure.
4791 * The statistics are actually updated from the timer callback.
4793 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4795 /* only return the current stats */
4796 return &netdev->stats;
4800 * e1000_change_mtu - Change the Maximum Transfer Unit
4801 * @netdev: network interface device structure
4802 * @new_mtu: new value for maximum frame size
4804 * Returns 0 on success, negative on failure
4806 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4808 struct e1000_adapter *adapter = netdev_priv(netdev);
4809 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4811 /* Jumbo frame support */
4812 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4813 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4814 e_err("Jumbo Frames not supported.\n");
4818 /* Supported frame sizes */
4819 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4820 (max_frame > adapter->max_hw_frame_size)) {
4821 e_err("Unsupported MTU setting\n");
4825 /* 82573 Errata 17 */
4826 if (((adapter->hw.mac.type == e1000_82573) ||
4827 (adapter->hw.mac.type == e1000_82574)) &&
4828 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4829 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4830 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4833 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4835 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4836 adapter->max_frame_size = max_frame;
4837 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4838 netdev->mtu = new_mtu;
4839 if (netif_running(netdev))
4840 e1000e_down(adapter);
4843 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4844 * means we reserve 2 more, this pushes us to allocate from the next
4846 * i.e. RXBUFFER_2048 --> size-4096 slab
4847 * However with the new *_jumbo_rx* routines, jumbo receives will use
4851 if (max_frame <= 2048)
4852 adapter->rx_buffer_len = 2048;
4854 adapter->rx_buffer_len = 4096;
4856 /* adjust allocation if LPE protects us, and we aren't using SBP */
4857 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4858 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4859 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4862 if (netif_running(netdev))
4865 e1000e_reset(adapter);
4867 clear_bit(__E1000_RESETTING, &adapter->state);
4872 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4875 struct e1000_adapter *adapter = netdev_priv(netdev);
4876 struct mii_ioctl_data *data = if_mii(ifr);
4878 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4883 data->phy_id = adapter->hw.phy.addr;
4886 e1000_phy_read_status(adapter);
4888 switch (data->reg_num & 0x1F) {
4890 data->val_out = adapter->phy_regs.bmcr;
4893 data->val_out = adapter->phy_regs.bmsr;
4896 data->val_out = (adapter->hw.phy.id >> 16);
4899 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4902 data->val_out = adapter->phy_regs.advertise;
4905 data->val_out = adapter->phy_regs.lpa;
4908 data->val_out = adapter->phy_regs.expansion;
4911 data->val_out = adapter->phy_regs.ctrl1000;
4914 data->val_out = adapter->phy_regs.stat1000;
4917 data->val_out = adapter->phy_regs.estatus;
4930 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4936 return e1000_mii_ioctl(netdev, ifr, cmd);
4942 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4944 struct e1000_hw *hw = &adapter->hw;
4949 /* copy MAC RARs to PHY RARs */
4950 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4952 /* copy MAC MTA to PHY MTA */
4953 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4954 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4955 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4956 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4959 /* configure PHY Rx Control register */
4960 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4961 mac_reg = er32(RCTL);
4962 if (mac_reg & E1000_RCTL_UPE)
4963 phy_reg |= BM_RCTL_UPE;
4964 if (mac_reg & E1000_RCTL_MPE)
4965 phy_reg |= BM_RCTL_MPE;
4966 phy_reg &= ~(BM_RCTL_MO_MASK);
4967 if (mac_reg & E1000_RCTL_MO_3)
4968 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4969 << BM_RCTL_MO_SHIFT);
4970 if (mac_reg & E1000_RCTL_BAM)
4971 phy_reg |= BM_RCTL_BAM;
4972 if (mac_reg & E1000_RCTL_PMCF)
4973 phy_reg |= BM_RCTL_PMCF;
4974 mac_reg = er32(CTRL);
4975 if (mac_reg & E1000_CTRL_RFCE)
4976 phy_reg |= BM_RCTL_RFCE;
4977 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4979 /* enable PHY wakeup in MAC register */
4981 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4983 /* configure and enable PHY wakeup in PHY registers */
4984 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4985 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4987 /* activate PHY wakeup */
4988 retval = hw->phy.ops.acquire(hw);
4990 e_err("Could not acquire PHY\n");
4993 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4994 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4995 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
4997 e_err("Could not read PHY page 769\n");
5000 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5001 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
5003 e_err("Could not set PHY Host Wakeup bit\n");
5005 hw->phy.ops.release(hw);
5010 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5013 struct net_device *netdev = pci_get_drvdata(pdev);
5014 struct e1000_adapter *adapter = netdev_priv(netdev);
5015 struct e1000_hw *hw = &adapter->hw;
5016 u32 ctrl, ctrl_ext, rctl, status;
5017 /* Runtime suspend should only enable wakeup for link changes */
5018 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5021 netif_device_detach(netdev);
5023 if (netif_running(netdev)) {
5024 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5025 e1000e_down(adapter);
5026 e1000_free_irq(adapter);
5028 e1000e_reset_interrupt_capability(adapter);
5030 retval = pci_save_state(pdev);
5034 status = er32(STATUS);
5035 if (status & E1000_STATUS_LU)
5036 wufc &= ~E1000_WUFC_LNKC;
5039 e1000_setup_rctl(adapter);
5040 e1000_set_multi(netdev);
5042 /* turn on all-multi mode if wake on multicast is enabled */
5043 if (wufc & E1000_WUFC_MC) {
5045 rctl |= E1000_RCTL_MPE;
5050 /* advertise wake from D3Cold */
5051 #define E1000_CTRL_ADVD3WUC 0x00100000
5052 /* phy power management enable */
5053 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5054 ctrl |= E1000_CTRL_ADVD3WUC;
5055 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5056 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5059 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5060 adapter->hw.phy.media_type ==
5061 e1000_media_type_internal_serdes) {
5062 /* keep the laser running in D3 */
5063 ctrl_ext = er32(CTRL_EXT);
5064 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5065 ew32(CTRL_EXT, ctrl_ext);
5068 if (adapter->flags & FLAG_IS_ICH)
5069 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
5071 /* Allow time for pending master requests to run */
5072 e1000e_disable_pcie_master(&adapter->hw);
5074 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5075 /* enable wakeup by the PHY */
5076 retval = e1000_init_phy_wakeup(adapter, wufc);
5080 /* enable wakeup by the MAC */
5082 ew32(WUC, E1000_WUC_PME_EN);
5089 *enable_wake = !!wufc;
5091 /* make sure adapter isn't asleep if manageability is enabled */
5092 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5093 (hw->mac.ops.check_mng_mode(hw)))
5094 *enable_wake = true;
5096 if (adapter->hw.phy.type == e1000_phy_igp_3)
5097 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5100 * Release control of h/w to f/w. If f/w is AMT enabled, this
5101 * would have already happened in close and is redundant.
5103 e1000_release_hw_control(adapter);
5105 pci_disable_device(pdev);
5110 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5112 if (sleep && wake) {
5113 pci_prepare_to_sleep(pdev);
5117 pci_wake_from_d3(pdev, wake);
5118 pci_set_power_state(pdev, PCI_D3hot);
5121 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5124 struct net_device *netdev = pci_get_drvdata(pdev);
5125 struct e1000_adapter *adapter = netdev_priv(netdev);
5128 * The pci-e switch on some quad port adapters will report a
5129 * correctable error when the MAC transitions from D0 to D3. To
5130 * prevent this we need to mask off the correctable errors on the
5131 * downstream port of the pci-e switch.
5133 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5134 struct pci_dev *us_dev = pdev->bus->self;
5135 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
5138 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5139 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5140 (devctl & ~PCI_EXP_DEVCTL_CERE));
5142 e1000_power_off(pdev, sleep, wake);
5144 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5146 e1000_power_off(pdev, sleep, wake);
5150 #ifdef CONFIG_PCIEASPM
5151 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5153 pci_disable_link_state(pdev, state);
5156 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5162 * Both device and parent should have the same ASPM setting.
5163 * Disable ASPM in downstream component first and then upstream.
5165 pos = pci_pcie_cap(pdev);
5166 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5168 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5170 if (!pdev->bus->self)
5173 pos = pci_pcie_cap(pdev->bus->self);
5174 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5176 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5179 void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5181 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5182 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5183 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5185 __e1000e_disable_aspm(pdev, state);
5188 #ifdef CONFIG_PM_OPS
5189 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5191 return !!adapter->tx_ring->buffer_info;
5194 static int __e1000_resume(struct pci_dev *pdev)
5196 struct net_device *netdev = pci_get_drvdata(pdev);
5197 struct e1000_adapter *adapter = netdev_priv(netdev);
5198 struct e1000_hw *hw = &adapter->hw;
5201 pci_set_power_state(pdev, PCI_D0);
5202 pci_restore_state(pdev);
5203 pci_save_state(pdev);
5204 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5205 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5207 e1000e_set_interrupt_capability(adapter);
5208 if (netif_running(netdev)) {
5209 err = e1000_request_irq(adapter);
5214 e1000e_power_up_phy(adapter);
5216 /* report the system wakeup cause from S3/S4 */
5217 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5220 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5222 e_info("PHY Wakeup cause - %s\n",
5223 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5224 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5225 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5226 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5227 phy_data & E1000_WUS_LNKC ? "Link Status "
5228 " Change" : "other");
5230 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5232 u32 wus = er32(WUS);
5234 e_info("MAC Wakeup cause - %s\n",
5235 wus & E1000_WUS_EX ? "Unicast Packet" :
5236 wus & E1000_WUS_MC ? "Multicast Packet" :
5237 wus & E1000_WUS_BC ? "Broadcast Packet" :
5238 wus & E1000_WUS_MAG ? "Magic Packet" :
5239 wus & E1000_WUS_LNKC ? "Link Status Change" :
5245 e1000e_reset(adapter);
5247 e1000_init_manageability_pt(adapter);
5249 if (netif_running(netdev))
5252 netif_device_attach(netdev);
5255 * If the controller has AMT, do not set DRV_LOAD until the interface
5256 * is up. For all other cases, let the f/w know that the h/w is now
5257 * under the control of the driver.
5259 if (!(adapter->flags & FLAG_HAS_AMT))
5260 e1000_get_hw_control(adapter);
5265 #ifdef CONFIG_PM_SLEEP
5266 static int e1000_suspend(struct device *dev)
5268 struct pci_dev *pdev = to_pci_dev(dev);
5272 retval = __e1000_shutdown(pdev, &wake, false);
5274 e1000_complete_shutdown(pdev, true, wake);
5279 static int e1000_resume(struct device *dev)
5281 struct pci_dev *pdev = to_pci_dev(dev);
5282 struct net_device *netdev = pci_get_drvdata(pdev);
5283 struct e1000_adapter *adapter = netdev_priv(netdev);
5285 if (e1000e_pm_ready(adapter))
5286 adapter->idle_check = true;
5288 return __e1000_resume(pdev);
5290 #endif /* CONFIG_PM_SLEEP */
5292 #ifdef CONFIG_PM_RUNTIME
5293 static int e1000_runtime_suspend(struct device *dev)
5295 struct pci_dev *pdev = to_pci_dev(dev);
5296 struct net_device *netdev = pci_get_drvdata(pdev);
5297 struct e1000_adapter *adapter = netdev_priv(netdev);
5299 if (e1000e_pm_ready(adapter)) {
5302 __e1000_shutdown(pdev, &wake, true);
5308 static int e1000_idle(struct device *dev)
5310 struct pci_dev *pdev = to_pci_dev(dev);
5311 struct net_device *netdev = pci_get_drvdata(pdev);
5312 struct e1000_adapter *adapter = netdev_priv(netdev);
5314 if (!e1000e_pm_ready(adapter))
5317 if (adapter->idle_check) {
5318 adapter->idle_check = false;
5319 if (!e1000e_has_link(adapter))
5320 pm_schedule_suspend(dev, MSEC_PER_SEC);
5326 static int e1000_runtime_resume(struct device *dev)
5328 struct pci_dev *pdev = to_pci_dev(dev);
5329 struct net_device *netdev = pci_get_drvdata(pdev);
5330 struct e1000_adapter *adapter = netdev_priv(netdev);
5332 if (!e1000e_pm_ready(adapter))
5335 adapter->idle_check = !dev->power.runtime_auto;
5336 return __e1000_resume(pdev);
5338 #endif /* CONFIG_PM_RUNTIME */
5339 #endif /* CONFIG_PM_OPS */
5341 static void e1000_shutdown(struct pci_dev *pdev)
5345 __e1000_shutdown(pdev, &wake, false);
5347 if (system_state == SYSTEM_POWER_OFF)
5348 e1000_complete_shutdown(pdev, false, wake);
5351 #ifdef CONFIG_NET_POLL_CONTROLLER
5353 * Polling 'interrupt' - used by things like netconsole to send skbs
5354 * without having to re-enable interrupts. It's not called while
5355 * the interrupt routine is executing.
5357 static void e1000_netpoll(struct net_device *netdev)
5359 struct e1000_adapter *adapter = netdev_priv(netdev);
5361 disable_irq(adapter->pdev->irq);
5362 e1000_intr(adapter->pdev->irq, netdev);
5364 enable_irq(adapter->pdev->irq);
5369 * e1000_io_error_detected - called when PCI error is detected
5370 * @pdev: Pointer to PCI device
5371 * @state: The current pci connection state
5373 * This function is called after a PCI bus error affecting
5374 * this device has been detected.
5376 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5377 pci_channel_state_t state)
5379 struct net_device *netdev = pci_get_drvdata(pdev);
5380 struct e1000_adapter *adapter = netdev_priv(netdev);
5382 netif_device_detach(netdev);
5384 if (state == pci_channel_io_perm_failure)
5385 return PCI_ERS_RESULT_DISCONNECT;
5387 if (netif_running(netdev))
5388 e1000e_down(adapter);
5389 pci_disable_device(pdev);
5391 /* Request a slot slot reset. */
5392 return PCI_ERS_RESULT_NEED_RESET;
5396 * e1000_io_slot_reset - called after the pci bus has been reset.
5397 * @pdev: Pointer to PCI device
5399 * Restart the card from scratch, as if from a cold-boot. Implementation
5400 * resembles the first-half of the e1000_resume routine.
5402 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5404 struct net_device *netdev = pci_get_drvdata(pdev);
5405 struct e1000_adapter *adapter = netdev_priv(netdev);
5406 struct e1000_hw *hw = &adapter->hw;
5408 pci_ers_result_t result;
5410 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5411 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5412 err = pci_enable_device_mem(pdev);
5415 "Cannot re-enable PCI device after reset.\n");
5416 result = PCI_ERS_RESULT_DISCONNECT;
5418 pci_set_master(pdev);
5419 pdev->state_saved = true;
5420 pci_restore_state(pdev);
5422 pci_enable_wake(pdev, PCI_D3hot, 0);
5423 pci_enable_wake(pdev, PCI_D3cold, 0);
5425 e1000e_reset(adapter);
5427 result = PCI_ERS_RESULT_RECOVERED;
5430 pci_cleanup_aer_uncorrect_error_status(pdev);
5436 * e1000_io_resume - called when traffic can start flowing again.
5437 * @pdev: Pointer to PCI device
5439 * This callback is called when the error recovery driver tells us that
5440 * its OK to resume normal operation. Implementation resembles the
5441 * second-half of the e1000_resume routine.
5443 static void e1000_io_resume(struct pci_dev *pdev)
5445 struct net_device *netdev = pci_get_drvdata(pdev);
5446 struct e1000_adapter *adapter = netdev_priv(netdev);
5448 e1000_init_manageability_pt(adapter);
5450 if (netif_running(netdev)) {
5451 if (e1000e_up(adapter)) {
5453 "can't bring device back up after reset\n");
5458 netif_device_attach(netdev);
5461 * If the controller has AMT, do not set DRV_LOAD until the interface
5462 * is up. For all other cases, let the f/w know that the h/w is now
5463 * under the control of the driver.
5465 if (!(adapter->flags & FLAG_HAS_AMT))
5466 e1000_get_hw_control(adapter);
5470 static void e1000_print_device_info(struct e1000_adapter *adapter)
5472 struct e1000_hw *hw = &adapter->hw;
5473 struct net_device *netdev = adapter->netdev;
5476 /* print bus type/speed/width info */
5477 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
5479 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5483 e_info("Intel(R) PRO/%s Network Connection\n",
5484 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5485 e1000e_read_pba_num(hw, &pba_num);
5486 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5487 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
5490 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5492 struct e1000_hw *hw = &adapter->hw;
5496 if (hw->mac.type != e1000_82573)
5499 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5500 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5501 /* Deep Smart Power Down (DSPD) */
5502 dev_warn(&adapter->pdev->dev,
5503 "Warning: detected DSPD enabled in EEPROM\n");
5507 static const struct net_device_ops e1000e_netdev_ops = {
5508 .ndo_open = e1000_open,
5509 .ndo_stop = e1000_close,
5510 .ndo_start_xmit = e1000_xmit_frame,
5511 .ndo_get_stats = e1000_get_stats,
5512 .ndo_set_multicast_list = e1000_set_multi,
5513 .ndo_set_mac_address = e1000_set_mac,
5514 .ndo_change_mtu = e1000_change_mtu,
5515 .ndo_do_ioctl = e1000_ioctl,
5516 .ndo_tx_timeout = e1000_tx_timeout,
5517 .ndo_validate_addr = eth_validate_addr,
5519 .ndo_vlan_rx_register = e1000_vlan_rx_register,
5520 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5521 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5522 #ifdef CONFIG_NET_POLL_CONTROLLER
5523 .ndo_poll_controller = e1000_netpoll,
5528 * e1000_probe - Device Initialization Routine
5529 * @pdev: PCI device information struct
5530 * @ent: entry in e1000_pci_tbl
5532 * Returns 0 on success, negative on failure
5534 * e1000_probe initializes an adapter identified by a pci_dev structure.
5535 * The OS initialization, configuring of the adapter private structure,
5536 * and a hardware reset occur.
5538 static int __devinit e1000_probe(struct pci_dev *pdev,
5539 const struct pci_device_id *ent)
5541 struct net_device *netdev;
5542 struct e1000_adapter *adapter;
5543 struct e1000_hw *hw;
5544 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5545 resource_size_t mmio_start, mmio_len;
5546 resource_size_t flash_start, flash_len;
5548 static int cards_found;
5549 int i, err, pci_using_dac;
5550 u16 eeprom_data = 0;
5551 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5553 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5554 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5556 err = pci_enable_device_mem(pdev);
5561 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5563 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5567 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5569 err = dma_set_coherent_mask(&pdev->dev,
5572 dev_err(&pdev->dev, "No usable DMA "
5573 "configuration, aborting\n");
5579 err = pci_request_selected_regions_exclusive(pdev,
5580 pci_select_bars(pdev, IORESOURCE_MEM),
5581 e1000e_driver_name);
5585 /* AER (Advanced Error Reporting) hooks */
5586 pci_enable_pcie_error_reporting(pdev);
5588 pci_set_master(pdev);
5589 /* PCI config space info */
5590 err = pci_save_state(pdev);
5592 goto err_alloc_etherdev;
5595 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5597 goto err_alloc_etherdev;
5599 SET_NETDEV_DEV(netdev, &pdev->dev);
5601 netdev->irq = pdev->irq;
5603 pci_set_drvdata(pdev, netdev);
5604 adapter = netdev_priv(netdev);
5606 adapter->netdev = netdev;
5607 adapter->pdev = pdev;
5609 adapter->pba = ei->pba;
5610 adapter->flags = ei->flags;
5611 adapter->flags2 = ei->flags2;
5612 adapter->hw.adapter = adapter;
5613 adapter->hw.mac.type = ei->mac;
5614 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5615 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5617 mmio_start = pci_resource_start(pdev, 0);
5618 mmio_len = pci_resource_len(pdev, 0);
5621 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5622 if (!adapter->hw.hw_addr)
5625 if ((adapter->flags & FLAG_HAS_FLASH) &&
5626 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5627 flash_start = pci_resource_start(pdev, 1);
5628 flash_len = pci_resource_len(pdev, 1);
5629 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5630 if (!adapter->hw.flash_address)
5634 /* construct the net_device struct */
5635 netdev->netdev_ops = &e1000e_netdev_ops;
5636 e1000e_set_ethtool_ops(netdev);
5637 netdev->watchdog_timeo = 5 * HZ;
5638 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5639 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5641 netdev->mem_start = mmio_start;
5642 netdev->mem_end = mmio_start + mmio_len;
5644 adapter->bd_number = cards_found++;
5646 e1000e_check_options(adapter);
5648 /* setup adapter struct */
5649 err = e1000_sw_init(adapter);
5655 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5656 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5657 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5659 err = ei->get_variants(adapter);
5663 if ((adapter->flags & FLAG_IS_ICH) &&
5664 (adapter->flags & FLAG_READ_ONLY_NVM))
5665 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5667 hw->mac.ops.get_bus_info(&adapter->hw);
5669 adapter->hw.phy.autoneg_wait_to_complete = 0;
5671 /* Copper options */
5672 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
5673 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5674 adapter->hw.phy.disable_polarity_correction = 0;
5675 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5678 if (e1000_check_reset_block(&adapter->hw))
5679 e_info("PHY reset is blocked due to SOL/IDER session.\n");
5681 netdev->features = NETIF_F_SG |
5683 NETIF_F_HW_VLAN_TX |
5686 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5687 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5689 netdev->features |= NETIF_F_TSO;
5690 netdev->features |= NETIF_F_TSO6;
5692 netdev->vlan_features |= NETIF_F_TSO;
5693 netdev->vlan_features |= NETIF_F_TSO6;
5694 netdev->vlan_features |= NETIF_F_HW_CSUM;
5695 netdev->vlan_features |= NETIF_F_SG;
5698 netdev->features |= NETIF_F_HIGHDMA;
5700 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5701 adapter->flags |= FLAG_MNG_PT_ENABLED;
5704 * before reading the NVM, reset the controller to
5705 * put the device in a known good starting state
5707 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5710 * systems with ASPM and others may see the checksum fail on the first
5711 * attempt. Let's give it a few tries
5714 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5717 e_err("The NVM Checksum Is Not Valid\n");
5723 e1000_eeprom_checks(adapter);
5725 /* copy the MAC address */
5726 if (e1000e_read_mac_addr(&adapter->hw))
5727 e_err("NVM Read Error while reading MAC address\n");
5729 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5730 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5732 if (!is_valid_ether_addr(netdev->perm_addr)) {
5733 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
5738 init_timer(&adapter->watchdog_timer);
5739 adapter->watchdog_timer.function = &e1000_watchdog;
5740 adapter->watchdog_timer.data = (unsigned long) adapter;
5742 init_timer(&adapter->phy_info_timer);
5743 adapter->phy_info_timer.function = &e1000_update_phy_info;
5744 adapter->phy_info_timer.data = (unsigned long) adapter;
5746 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5747 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5748 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5749 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5750 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5752 /* Initialize link parameters. User can change them with ethtool */
5753 adapter->hw.mac.autoneg = 1;
5754 adapter->fc_autoneg = 1;
5755 adapter->hw.fc.requested_mode = e1000_fc_default;
5756 adapter->hw.fc.current_mode = e1000_fc_default;
5757 adapter->hw.phy.autoneg_advertised = 0x2f;
5759 /* ring size defaults */
5760 adapter->rx_ring->count = 256;
5761 adapter->tx_ring->count = 256;
5764 * Initial Wake on LAN setting - If APM wake is enabled in
5765 * the EEPROM, enable the ACPI Magic Packet filter
5767 if (adapter->flags & FLAG_APME_IN_WUC) {
5768 /* APME bit in EEPROM is mapped to WUC.APME */
5769 eeprom_data = er32(WUC);
5770 eeprom_apme_mask = E1000_WUC_APME;
5771 if (eeprom_data & E1000_WUC_PHY_WAKE)
5772 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5773 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5774 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5775 (adapter->hw.bus.func == 1))
5776 e1000_read_nvm(&adapter->hw,
5777 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
5779 e1000_read_nvm(&adapter->hw,
5780 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5783 /* fetch WoL from EEPROM */
5784 if (eeprom_data & eeprom_apme_mask)
5785 adapter->eeprom_wol |= E1000_WUFC_MAG;
5788 * now that we have the eeprom settings, apply the special cases
5789 * where the eeprom may be wrong or the board simply won't support
5790 * wake on lan on a particular port
5792 if (!(adapter->flags & FLAG_HAS_WOL))
5793 adapter->eeprom_wol = 0;
5795 /* initialize the wol settings based on the eeprom settings */
5796 adapter->wol = adapter->eeprom_wol;
5797 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5799 /* save off EEPROM version number */
5800 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5802 /* reset the hardware with the new settings */
5803 e1000e_reset(adapter);
5806 * If the controller has AMT, do not set DRV_LOAD until the interface
5807 * is up. For all other cases, let the f/w know that the h/w is now
5808 * under the control of the driver.
5810 if (!(adapter->flags & FLAG_HAS_AMT))
5811 e1000_get_hw_control(adapter);
5813 strcpy(netdev->name, "eth%d");
5814 err = register_netdev(netdev);
5818 /* carrier off reporting is important to ethtool even BEFORE open */
5819 netif_carrier_off(netdev);
5821 e1000_print_device_info(adapter);
5823 if (pci_dev_run_wake(pdev)) {
5824 pm_runtime_set_active(&pdev->dev);
5825 pm_runtime_enable(&pdev->dev);
5827 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5832 if (!(adapter->flags & FLAG_HAS_AMT))
5833 e1000_release_hw_control(adapter);
5835 if (!e1000_check_reset_block(&adapter->hw))
5836 e1000_phy_hw_reset(&adapter->hw);
5839 kfree(adapter->tx_ring);
5840 kfree(adapter->rx_ring);
5842 if (adapter->hw.flash_address)
5843 iounmap(adapter->hw.flash_address);
5844 e1000e_reset_interrupt_capability(adapter);
5846 iounmap(adapter->hw.hw_addr);
5848 free_netdev(netdev);
5850 pci_release_selected_regions(pdev,
5851 pci_select_bars(pdev, IORESOURCE_MEM));
5854 pci_disable_device(pdev);
5859 * e1000_remove - Device Removal Routine
5860 * @pdev: PCI device information struct
5862 * e1000_remove is called by the PCI subsystem to alert the driver
5863 * that it should release a PCI device. The could be caused by a
5864 * Hot-Plug event, or because the driver is going to be removed from
5867 static void __devexit e1000_remove(struct pci_dev *pdev)
5869 struct net_device *netdev = pci_get_drvdata(pdev);
5870 struct e1000_adapter *adapter = netdev_priv(netdev);
5871 bool down = test_bit(__E1000_DOWN, &adapter->state);
5873 pm_runtime_get_sync(&pdev->dev);
5876 * flush_scheduled work may reschedule our watchdog task, so
5877 * explicitly disable watchdog tasks from being rescheduled
5880 set_bit(__E1000_DOWN, &adapter->state);
5881 del_timer_sync(&adapter->watchdog_timer);
5882 del_timer_sync(&adapter->phy_info_timer);
5884 cancel_work_sync(&adapter->reset_task);
5885 cancel_work_sync(&adapter->watchdog_task);
5886 cancel_work_sync(&adapter->downshift_task);
5887 cancel_work_sync(&adapter->update_phy_task);
5888 cancel_work_sync(&adapter->print_hang_task);
5889 flush_scheduled_work();
5891 if (!(netdev->flags & IFF_UP))
5892 e1000_power_down_phy(adapter);
5894 /* Don't lie to e1000_close() down the road. */
5896 clear_bit(__E1000_DOWN, &adapter->state);
5897 unregister_netdev(netdev);
5899 if (pci_dev_run_wake(pdev)) {
5900 pm_runtime_disable(&pdev->dev);
5901 pm_runtime_set_suspended(&pdev->dev);
5903 pm_runtime_put_noidle(&pdev->dev);
5906 * Release control of h/w to f/w. If f/w is AMT enabled, this
5907 * would have already happened in close and is redundant.
5909 e1000_release_hw_control(adapter);
5911 e1000e_reset_interrupt_capability(adapter);
5912 kfree(adapter->tx_ring);
5913 kfree(adapter->rx_ring);
5915 iounmap(adapter->hw.hw_addr);
5916 if (adapter->hw.flash_address)
5917 iounmap(adapter->hw.flash_address);
5918 pci_release_selected_regions(pdev,
5919 pci_select_bars(pdev, IORESOURCE_MEM));
5921 free_netdev(netdev);
5924 pci_disable_pcie_error_reporting(pdev);
5926 pci_disable_device(pdev);
5929 /* PCI Error Recovery (ERS) */
5930 static struct pci_error_handlers e1000_err_handler = {
5931 .error_detected = e1000_io_error_detected,
5932 .slot_reset = e1000_io_slot_reset,
5933 .resume = e1000_io_resume,
5936 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5937 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5938 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5939 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5940 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5941 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5942 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
5943 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5944 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5945 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
5947 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5948 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5949 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5950 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
5952 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5953 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5954 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
5956 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5957 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
5958 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
5960 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5961 board_80003es2lan },
5962 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5963 board_80003es2lan },
5964 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5965 board_80003es2lan },
5966 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5967 board_80003es2lan },
5969 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5970 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5971 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5972 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5973 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5974 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5975 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
5978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5979 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5980 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5981 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5982 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5983 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
5984 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5985 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5986 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5988 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5989 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5990 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
5992 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5993 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5994 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
5996 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5997 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
5998 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5999 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6001 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6002 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6004 { } /* terminate list */
6006 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6008 #ifdef CONFIG_PM_OPS
6009 static const struct dev_pm_ops e1000_pm_ops = {
6010 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6011 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6012 e1000_runtime_resume, e1000_idle)
6016 /* PCI Device API Driver */
6017 static struct pci_driver e1000_driver = {
6018 .name = e1000e_driver_name,
6019 .id_table = e1000_pci_tbl,
6020 .probe = e1000_probe,
6021 .remove = __devexit_p(e1000_remove),
6022 #ifdef CONFIG_PM_OPS
6023 .driver.pm = &e1000_pm_ops,
6025 .shutdown = e1000_shutdown,
6026 .err_handler = &e1000_err_handler
6030 * e1000_init_module - Driver Registration Routine
6032 * e1000_init_module is the first routine called when the driver is
6033 * loaded. All it does is register with the PCI subsystem.
6035 static int __init e1000_init_module(void)
6038 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6039 e1000e_driver_version);
6040 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
6041 ret = pci_register_driver(&e1000_driver);
6045 module_init(e1000_init_module);
6048 * e1000_exit_module - Driver Exit Cleanup Routine
6050 * e1000_exit_module is called just before the driver is removed
6053 static void __exit e1000_exit_module(void)
6055 pci_unregister_driver(&e1000_driver);
6057 module_exit(e1000_exit_module);
6060 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6061 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6062 MODULE_LICENSE("GPL");
6063 MODULE_VERSION(DRV_VERSION);