1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/mii.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos_params.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
55 #define DRV_VERSION "1.0.2-k2"
56 char e1000e_driver_name[] = "e1000e";
57 const char e1000e_driver_version[] = DRV_VERSION;
59 static const struct e1000_info *e1000_info_tbl[] = {
60 [board_82571] = &e1000_82571_info,
61 [board_82572] = &e1000_82572_info,
62 [board_82573] = &e1000_82573_info,
63 [board_82574] = &e1000_82574_info,
64 [board_82583] = &e1000_82583_info,
65 [board_80003es2lan] = &e1000_es2_info,
66 [board_ich8lan] = &e1000_ich8_info,
67 [board_ich9lan] = &e1000_ich9_info,
68 [board_ich10lan] = &e1000_ich10_info,
69 [board_pchlan] = &e1000_pch_info,
72 struct e1000_reg_info {
77 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
78 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
79 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
80 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
81 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
83 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
84 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
85 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
86 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
87 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
89 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
91 /* General Registers */
93 {E1000_STATUS, "STATUS"},
94 {E1000_CTRL_EXT, "CTRL_EXT"},
96 /* Interrupt Registers */
100 {E1000_RCTL, "RCTL"},
101 {E1000_RDLEN, "RDLEN"},
104 {E1000_RDTR, "RDTR"},
105 {E1000_RXDCTL(0), "RXDCTL"},
107 {E1000_RDBAL, "RDBAL"},
108 {E1000_RDBAH, "RDBAH"},
109 {E1000_RDFH, "RDFH"},
110 {E1000_RDFT, "RDFT"},
111 {E1000_RDFHS, "RDFHS"},
112 {E1000_RDFTS, "RDFTS"},
113 {E1000_RDFPC, "RDFPC"},
116 {E1000_TCTL, "TCTL"},
117 {E1000_TDBAL, "TDBAL"},
118 {E1000_TDBAH, "TDBAH"},
119 {E1000_TDLEN, "TDLEN"},
122 {E1000_TIDV, "TIDV"},
123 {E1000_TXDCTL(0), "TXDCTL"},
124 {E1000_TADV, "TADV"},
125 {E1000_TARC(0), "TARC"},
126 {E1000_TDFH, "TDFH"},
127 {E1000_TDFT, "TDFT"},
128 {E1000_TDFHS, "TDFHS"},
129 {E1000_TDFTS, "TDFTS"},
130 {E1000_TDFPC, "TDFPC"},
132 /* List Terminator */
137 * e1000_regdump - register printout routine
139 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
145 switch (reginfo->ofs) {
146 case E1000_RXDCTL(0):
147 for (n = 0; n < 2; n++)
148 regs[n] = __er32(hw, E1000_RXDCTL(n));
150 case E1000_TXDCTL(0):
151 for (n = 0; n < 2; n++)
152 regs[n] = __er32(hw, E1000_TXDCTL(n));
155 for (n = 0; n < 2; n++)
156 regs[n] = __er32(hw, E1000_TARC(n));
159 printk(KERN_INFO "%-15s %08x\n",
160 reginfo->name, __er32(hw, reginfo->ofs));
164 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
165 printk(KERN_INFO "%-15s ", rname);
166 for (n = 0; n < 2; n++)
167 printk(KERN_CONT "%08x ", regs[n]);
168 printk(KERN_CONT "\n");
173 * e1000e_dump - Print registers, tx-ring and rx-ring
175 static void e1000e_dump(struct e1000_adapter *adapter)
177 struct net_device *netdev = adapter->netdev;
178 struct e1000_hw *hw = &adapter->hw;
179 struct e1000_reg_info *reginfo;
180 struct e1000_ring *tx_ring = adapter->tx_ring;
181 struct e1000_tx_desc *tx_desc;
182 struct my_u0 { u64 a; u64 b; } *u0;
183 struct e1000_buffer *buffer_info;
184 struct e1000_ring *rx_ring = adapter->rx_ring;
185 union e1000_rx_desc_packet_split *rx_desc_ps;
186 struct e1000_rx_desc *rx_desc;
187 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
191 if (!netif_msg_hw(adapter))
194 /* Print netdevice Info */
196 dev_info(&adapter->pdev->dev, "Net device Info\n");
197 printk(KERN_INFO "Device Name state "
198 "trans_start last_rx\n");
199 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
206 /* Print Registers */
207 dev_info(&adapter->pdev->dev, "Register Dump\n");
208 printk(KERN_INFO " Register Name Value\n");
209 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
210 reginfo->name; reginfo++) {
211 e1000_regdump(hw, reginfo);
214 /* Print TX Ring Summary */
215 if (!netdev || !netif_running(netdev))
218 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
219 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
220 " leng ntw timestamp\n");
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma,
226 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp);
230 if (!netif_msg_tx_done(adapter))
231 goto rx_ring_summary;
233 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
235 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
237 * Legacy Transmit Descriptor
238 * +--------------------------------------------------------------+
239 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
240 * +--------------------------------------------------------------+
241 * 8 | Special | CSS | Status | CMD | CSO | Length |
242 * +--------------------------------------------------------------+
243 * 63 48 47 36 35 32 31 24 23 16 15 0
245 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
246 * 63 48 47 40 39 32 31 16 15 8 7 0
247 * +----------------------------------------------------------------+
248 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
249 * +----------------------------------------------------------------+
250 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
251 * +----------------------------------------------------------------+
252 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
254 * Extended Data Descriptor (DTYP=0x1)
255 * +----------------------------------------------------------------+
256 * 0 | Buffer Address [63:0] |
257 * +----------------------------------------------------------------+
258 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
259 * +----------------------------------------------------------------+
260 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
262 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
263 " [bi->dma ] leng ntw timestamp bi->skb "
264 "<-- Legacy format\n");
265 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Ext Context format\n");
268 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Data format\n");
271 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
272 tx_desc = E1000_TX_DESC(*tx_ring, i);
273 buffer_info = &tx_ring->buffer_info[i];
274 u0 = (struct my_u0 *)tx_desc;
275 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
276 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b),
280 (u64)buffer_info->dma, buffer_info->length,
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n");
285 else if (i == tx_ring->next_to_use)
286 printk(KERN_CONT " NTU\n");
287 else if (i == tx_ring->next_to_clean)
288 printk(KERN_CONT " NTC\n");
290 printk(KERN_CONT "\n");
292 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
293 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
294 16, 1, phys_to_virt(buffer_info->dma),
295 buffer_info->length, true);
298 /* Print RX Rings Summary */
300 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
301 printk(KERN_INFO "Queue [NTU] [NTC]\n");
302 printk(KERN_INFO " %5d %5X %5X\n", 0,
303 rx_ring->next_to_use, rx_ring->next_to_clean);
306 if (!netif_msg_rx_status(adapter))
309 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
310 switch (adapter->rx_ps_pages) {
314 /* [Extended] Packet Split Receive Descriptor Format
316 * +-----------------------------------------------------+
317 * 0 | Buffer Address 0 [63:0] |
318 * +-----------------------------------------------------+
319 * 8 | Buffer Address 1 [63:0] |
320 * +-----------------------------------------------------+
321 * 16 | Buffer Address 2 [63:0] |
322 * +-----------------------------------------------------+
323 * 24 | Buffer Address 3 [63:0] |
324 * +-----------------------------------------------------+
326 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
328 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
329 "[bi->skb] <-- Ext Pkt Split format\n");
330 /* [Extended] Receive Descriptor (Write-Back) Format
332 * 63 48 47 32 31 13 12 8 7 4 3 0
333 * +------------------------------------------------------+
334 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
335 * | Checksum | Ident | | Queue | | Type |
336 * +------------------------------------------------------+
337 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
338 * +------------------------------------------------------+
339 * 63 48 47 32 31 20 19 0
341 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
343 "[ l3 l2 l1 hs] [reserved ] ---------------- "
344 "[bi->skb] <-- Ext Rx Write-Back format\n");
345 for (i = 0; i < rx_ring->count; i++) {
346 buffer_info = &rx_ring->buffer_info[i];
347 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
348 u1 = (struct my_u1 *)rx_desc_ps;
350 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
351 if (staterr & E1000_RXD_STAT_DD) {
352 /* Descriptor Done */
353 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX "
355 "---------------- %p", i,
362 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i,
368 (u64)buffer_info->dma,
371 if (netif_msg_pktdata(adapter))
372 print_hex_dump(KERN_INFO, "",
373 DUMP_PREFIX_ADDRESS, 16, 1,
374 phys_to_virt(buffer_info->dma),
375 adapter->rx_ps_bsize0, true);
378 if (i == rx_ring->next_to_use)
379 printk(KERN_CONT " NTU\n");
380 else if (i == rx_ring->next_to_clean)
381 printk(KERN_CONT " NTC\n");
383 printk(KERN_CONT "\n");
388 /* Legacy Receive Descriptor Format
390 * +-----------------------------------------------------+
391 * | Buffer Address [63:0] |
392 * +-----------------------------------------------------+
393 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
394 * +-----------------------------------------------------+
395 * 63 48 47 40 39 32 31 16 15 0
397 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
398 "[vl er S cks ln] [bi->dma ] [bi->skb] "
399 "<-- Legacy format\n");
400 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
401 rx_desc = E1000_RX_DESC(*rx_ring, i);
402 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
407 (u64)buffer_info->dma, buffer_info->skb);
408 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean)
411 printk(KERN_CONT " NTC\n");
413 printk(KERN_CONT "\n");
415 if (netif_msg_pktdata(adapter))
416 print_hex_dump(KERN_INFO, "",
418 16, 1, phys_to_virt(buffer_info->dma),
419 adapter->rx_buffer_len, true);
428 * e1000_desc_unused - calculate if we have unused descriptors
430 static int e1000_desc_unused(struct e1000_ring *ring)
432 if (ring->next_to_clean > ring->next_to_use)
433 return ring->next_to_clean - ring->next_to_use - 1;
435 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
439 * e1000_receive_skb - helper function to handle Rx indications
440 * @adapter: board private structure
441 * @status: descriptor status field as written by hardware
442 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
443 * @skb: pointer to sk_buff to be indicated to stack
445 static void e1000_receive_skb(struct e1000_adapter *adapter,
446 struct net_device *netdev,
448 u8 status, __le16 vlan)
450 skb->protocol = eth_type_trans(skb, netdev);
452 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
453 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
454 le16_to_cpu(vlan), skb);
456 napi_gro_receive(&adapter->napi, skb);
460 * e1000_rx_checksum - Receive Checksum Offload for 82543
461 * @adapter: board private structure
462 * @status_err: receive descriptor status and error fields
463 * @csum: receive descriptor csum field
464 * @sk_buff: socket buffer with received data
466 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
467 u32 csum, struct sk_buff *skb)
469 u16 status = (u16)status_err;
470 u8 errors = (u8)(status_err >> 24);
471 skb->ip_summed = CHECKSUM_NONE;
473 /* Ignore Checksum bit is set */
474 if (status & E1000_RXD_STAT_IXSM)
476 /* TCP/UDP checksum error bit is set */
477 if (errors & E1000_RXD_ERR_TCPE) {
478 /* let the stack verify checksum errors */
479 adapter->hw_csum_err++;
483 /* TCP/UDP Checksum has not been calculated */
484 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
487 /* It must be a TCP or UDP packet with a valid checksum */
488 if (status & E1000_RXD_STAT_TCPCS) {
489 /* TCP checksum is good */
490 skb->ip_summed = CHECKSUM_UNNECESSARY;
493 * IP fragment with UDP payload
494 * Hardware complements the payload checksum, so we undo it
495 * and then put the value in host order for further stack use.
497 __sum16 sum = (__force __sum16)htons(csum);
498 skb->csum = csum_unfold(~sum);
499 skb->ip_summed = CHECKSUM_COMPLETE;
501 adapter->hw_csum_good++;
505 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
506 * @adapter: address of board private structure
508 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
511 struct net_device *netdev = adapter->netdev;
512 struct pci_dev *pdev = adapter->pdev;
513 struct e1000_ring *rx_ring = adapter->rx_ring;
514 struct e1000_rx_desc *rx_desc;
515 struct e1000_buffer *buffer_info;
518 unsigned int bufsz = adapter->rx_buffer_len;
520 i = rx_ring->next_to_use;
521 buffer_info = &rx_ring->buffer_info[i];
523 while (cleaned_count--) {
524 skb = buffer_info->skb;
530 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
532 /* Better luck next round */
533 adapter->alloc_rx_buff_failed++;
537 buffer_info->skb = skb;
539 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
540 adapter->rx_buffer_len,
542 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
543 dev_err(&pdev->dev, "RX DMA map failed\n");
544 adapter->rx_dma_failed++;
548 rx_desc = E1000_RX_DESC(*rx_ring, i);
549 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
552 if (i == rx_ring->count)
554 buffer_info = &rx_ring->buffer_info[i];
557 if (rx_ring->next_to_use != i) {
558 rx_ring->next_to_use = i;
560 i = (rx_ring->count - 1);
563 * Force memory writes to complete before letting h/w
564 * know there are new descriptors to fetch. (Only
565 * applicable for weak-ordered memory model archs,
569 writel(i, adapter->hw.hw_addr + rx_ring->tail);
574 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
575 * @adapter: address of board private structure
577 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
580 struct net_device *netdev = adapter->netdev;
581 struct pci_dev *pdev = adapter->pdev;
582 union e1000_rx_desc_packet_split *rx_desc;
583 struct e1000_ring *rx_ring = adapter->rx_ring;
584 struct e1000_buffer *buffer_info;
585 struct e1000_ps_page *ps_page;
589 i = rx_ring->next_to_use;
590 buffer_info = &rx_ring->buffer_info[i];
592 while (cleaned_count--) {
593 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
595 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
596 ps_page = &buffer_info->ps_pages[j];
597 if (j >= adapter->rx_ps_pages) {
598 /* all unused desc entries get hw null ptr */
599 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
602 if (!ps_page->page) {
603 ps_page->page = alloc_page(GFP_ATOMIC);
604 if (!ps_page->page) {
605 adapter->alloc_rx_buff_failed++;
608 ps_page->dma = dma_map_page(&pdev->dev,
612 if (dma_mapping_error(&pdev->dev,
614 dev_err(&adapter->pdev->dev,
615 "RX DMA page map failed\n");
616 adapter->rx_dma_failed++;
621 * Refresh the desc even if buffer_addrs
622 * didn't change because each write-back
625 rx_desc->read.buffer_addr[j+1] =
626 cpu_to_le64(ps_page->dma);
629 skb = netdev_alloc_skb_ip_align(netdev,
630 adapter->rx_ps_bsize0);
633 adapter->alloc_rx_buff_failed++;
637 buffer_info->skb = skb;
638 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
639 adapter->rx_ps_bsize0,
641 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
642 dev_err(&pdev->dev, "RX DMA map failed\n");
643 adapter->rx_dma_failed++;
645 dev_kfree_skb_any(skb);
646 buffer_info->skb = NULL;
650 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
653 if (i == rx_ring->count)
655 buffer_info = &rx_ring->buffer_info[i];
659 if (rx_ring->next_to_use != i) {
660 rx_ring->next_to_use = i;
663 i = (rx_ring->count - 1);
666 * Force memory writes to complete before letting h/w
667 * know there are new descriptors to fetch. (Only
668 * applicable for weak-ordered memory model archs,
673 * Hardware increments by 16 bytes, but packet split
674 * descriptors are 32 bytes...so we increment tail
677 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
682 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
683 * @adapter: address of board private structure
684 * @cleaned_count: number of buffers to allocate this pass
687 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
690 struct net_device *netdev = adapter->netdev;
691 struct pci_dev *pdev = adapter->pdev;
692 struct e1000_rx_desc *rx_desc;
693 struct e1000_ring *rx_ring = adapter->rx_ring;
694 struct e1000_buffer *buffer_info;
697 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
699 i = rx_ring->next_to_use;
700 buffer_info = &rx_ring->buffer_info[i];
702 while (cleaned_count--) {
703 skb = buffer_info->skb;
709 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
710 if (unlikely(!skb)) {
711 /* Better luck next round */
712 adapter->alloc_rx_buff_failed++;
716 buffer_info->skb = skb;
718 /* allocate a new page if necessary */
719 if (!buffer_info->page) {
720 buffer_info->page = alloc_page(GFP_ATOMIC);
721 if (unlikely(!buffer_info->page)) {
722 adapter->alloc_rx_buff_failed++;
727 if (!buffer_info->dma)
728 buffer_info->dma = dma_map_page(&pdev->dev,
729 buffer_info->page, 0,
733 rx_desc = E1000_RX_DESC(*rx_ring, i);
734 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
736 if (unlikely(++i == rx_ring->count))
738 buffer_info = &rx_ring->buffer_info[i];
741 if (likely(rx_ring->next_to_use != i)) {
742 rx_ring->next_to_use = i;
743 if (unlikely(i-- == 0))
744 i = (rx_ring->count - 1);
746 /* Force memory writes to complete before letting h/w
747 * know there are new descriptors to fetch. (Only
748 * applicable for weak-ordered memory model archs,
751 writel(i, adapter->hw.hw_addr + rx_ring->tail);
756 * e1000_clean_rx_irq - Send received data up the network stack; legacy
757 * @adapter: board private structure
759 * the return value indicates whether actual cleaning was done, there
760 * is no guarantee that everything was cleaned
762 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
763 int *work_done, int work_to_do)
765 struct net_device *netdev = adapter->netdev;
766 struct pci_dev *pdev = adapter->pdev;
767 struct e1000_hw *hw = &adapter->hw;
768 struct e1000_ring *rx_ring = adapter->rx_ring;
769 struct e1000_rx_desc *rx_desc, *next_rxd;
770 struct e1000_buffer *buffer_info, *next_buffer;
773 int cleaned_count = 0;
775 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
777 i = rx_ring->next_to_clean;
778 rx_desc = E1000_RX_DESC(*rx_ring, i);
779 buffer_info = &rx_ring->buffer_info[i];
781 while (rx_desc->status & E1000_RXD_STAT_DD) {
785 if (*work_done >= work_to_do)
789 status = rx_desc->status;
790 skb = buffer_info->skb;
791 buffer_info->skb = NULL;
793 prefetch(skb->data - NET_IP_ALIGN);
796 if (i == rx_ring->count)
798 next_rxd = E1000_RX_DESC(*rx_ring, i);
801 next_buffer = &rx_ring->buffer_info[i];
805 dma_unmap_single(&pdev->dev,
807 adapter->rx_buffer_len,
809 buffer_info->dma = 0;
811 length = le16_to_cpu(rx_desc->length);
814 * !EOP means multiple descriptors were used to store a single
815 * packet, if that's the case we need to toss it. In fact, we
816 * need to toss every packet with the EOP bit clear and the
817 * next frame that _does_ have the EOP bit set, as it is by
818 * definition only a frame fragment
820 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
821 adapter->flags2 |= FLAG2_IS_DISCARDING;
823 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
824 /* All receives must fit into a single buffer */
825 e_dbg("Receive packet consumed multiple buffers\n");
827 buffer_info->skb = skb;
828 if (status & E1000_RXD_STAT_EOP)
829 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
833 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
835 buffer_info->skb = skb;
839 /* adjust length to remove Ethernet CRC */
840 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
843 total_rx_bytes += length;
847 * code added for copybreak, this should improve
848 * performance for small packets with large amounts
849 * of reassembly being done in the stack
851 if (length < copybreak) {
852 struct sk_buff *new_skb =
853 netdev_alloc_skb_ip_align(netdev, length);
855 skb_copy_to_linear_data_offset(new_skb,
861 /* save the skb in buffer_info as good */
862 buffer_info->skb = skb;
865 /* else just continue with the old one */
867 /* end copybreak code */
868 skb_put(skb, length);
870 /* Receive Checksum Offload */
871 e1000_rx_checksum(adapter,
873 ((u32)(rx_desc->errors) << 24),
874 le16_to_cpu(rx_desc->csum), skb);
876 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
881 /* return some buffers to hardware, one at a time is too slow */
882 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
883 adapter->alloc_rx_buf(adapter, cleaned_count);
887 /* use prefetched values */
889 buffer_info = next_buffer;
891 rx_ring->next_to_clean = i;
893 cleaned_count = e1000_desc_unused(rx_ring);
895 adapter->alloc_rx_buf(adapter, cleaned_count);
897 adapter->total_rx_bytes += total_rx_bytes;
898 adapter->total_rx_packets += total_rx_packets;
899 netdev->stats.rx_bytes += total_rx_bytes;
900 netdev->stats.rx_packets += total_rx_packets;
904 static void e1000_put_txbuf(struct e1000_adapter *adapter,
905 struct e1000_buffer *buffer_info)
907 if (buffer_info->dma) {
908 if (buffer_info->mapped_as_page)
909 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
910 buffer_info->length, DMA_TO_DEVICE);
912 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
913 buffer_info->length, DMA_TO_DEVICE);
914 buffer_info->dma = 0;
916 if (buffer_info->skb) {
917 dev_kfree_skb_any(buffer_info->skb);
918 buffer_info->skb = NULL;
920 buffer_info->time_stamp = 0;
923 static void e1000_print_hw_hang(struct work_struct *work)
925 struct e1000_adapter *adapter = container_of(work,
926 struct e1000_adapter,
928 struct e1000_ring *tx_ring = adapter->tx_ring;
929 unsigned int i = tx_ring->next_to_clean;
930 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
931 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
932 struct e1000_hw *hw = &adapter->hw;
933 u16 phy_status, phy_1000t_status, phy_ext_status;
936 e1e_rphy(hw, PHY_STATUS, &phy_status);
937 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
938 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
940 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
942 /* detected Hardware unit hang */
943 e_err("Detected Hardware Unit Hang:\n"
946 " next_to_use <%x>\n"
947 " next_to_clean <%x>\n"
948 "buffer_info[next_to_clean]:\n"
949 " time_stamp <%lx>\n"
950 " next_to_watch <%x>\n"
952 " next_to_watch.status <%x>\n"
955 "PHY 1000BASE-T Status <%x>\n"
956 "PHY Extended Status <%x>\n"
958 readl(adapter->hw.hw_addr + tx_ring->head),
959 readl(adapter->hw.hw_addr + tx_ring->tail),
960 tx_ring->next_to_use,
961 tx_ring->next_to_clean,
962 tx_ring->buffer_info[eop].time_stamp,
965 eop_desc->upper.fields.status,
974 * e1000_clean_tx_irq - Reclaim resources after transmit completes
975 * @adapter: board private structure
977 * the return value indicates whether actual cleaning was done, there
978 * is no guarantee that everything was cleaned
980 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
982 struct net_device *netdev = adapter->netdev;
983 struct e1000_hw *hw = &adapter->hw;
984 struct e1000_ring *tx_ring = adapter->tx_ring;
985 struct e1000_tx_desc *tx_desc, *eop_desc;
986 struct e1000_buffer *buffer_info;
988 unsigned int count = 0;
989 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
991 i = tx_ring->next_to_clean;
992 eop = tx_ring->buffer_info[i].next_to_watch;
993 eop_desc = E1000_TX_DESC(*tx_ring, eop);
995 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
996 (count < tx_ring->count)) {
997 bool cleaned = false;
998 for (; !cleaned; count++) {
999 tx_desc = E1000_TX_DESC(*tx_ring, i);
1000 buffer_info = &tx_ring->buffer_info[i];
1001 cleaned = (i == eop);
1004 struct sk_buff *skb = buffer_info->skb;
1005 unsigned int segs, bytecount;
1006 segs = skb_shinfo(skb)->gso_segs ?: 1;
1007 /* multiply data chunks by size of headers */
1008 bytecount = ((segs - 1) * skb_headlen(skb)) +
1010 total_tx_packets += segs;
1011 total_tx_bytes += bytecount;
1014 e1000_put_txbuf(adapter, buffer_info);
1015 tx_desc->upper.data = 0;
1018 if (i == tx_ring->count)
1022 if (i == tx_ring->next_to_use)
1024 eop = tx_ring->buffer_info[i].next_to_watch;
1025 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1028 tx_ring->next_to_clean = i;
1030 #define TX_WAKE_THRESHOLD 32
1031 if (count && netif_carrier_ok(netdev) &&
1032 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1033 /* Make sure that anybody stopping the queue after this
1034 * sees the new next_to_clean.
1038 if (netif_queue_stopped(netdev) &&
1039 !(test_bit(__E1000_DOWN, &adapter->state))) {
1040 netif_wake_queue(netdev);
1041 ++adapter->restart_queue;
1045 if (adapter->detect_tx_hung) {
1047 * Detect a transmit hang in hardware, this serializes the
1048 * check with the clearing of time_stamp and movement of i
1050 adapter->detect_tx_hung = 0;
1051 if (tx_ring->buffer_info[i].time_stamp &&
1052 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1053 + (adapter->tx_timeout_factor * HZ)) &&
1054 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
1055 schedule_work(&adapter->print_hang_task);
1056 netif_stop_queue(netdev);
1059 adapter->total_tx_bytes += total_tx_bytes;
1060 adapter->total_tx_packets += total_tx_packets;
1061 netdev->stats.tx_bytes += total_tx_bytes;
1062 netdev->stats.tx_packets += total_tx_packets;
1063 return (count < tx_ring->count);
1067 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1068 * @adapter: board private structure
1070 * the return value indicates whether actual cleaning was done, there
1071 * is no guarantee that everything was cleaned
1073 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1074 int *work_done, int work_to_do)
1076 struct e1000_hw *hw = &adapter->hw;
1077 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1078 struct net_device *netdev = adapter->netdev;
1079 struct pci_dev *pdev = adapter->pdev;
1080 struct e1000_ring *rx_ring = adapter->rx_ring;
1081 struct e1000_buffer *buffer_info, *next_buffer;
1082 struct e1000_ps_page *ps_page;
1083 struct sk_buff *skb;
1085 u32 length, staterr;
1086 int cleaned_count = 0;
1088 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1090 i = rx_ring->next_to_clean;
1091 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1092 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1093 buffer_info = &rx_ring->buffer_info[i];
1095 while (staterr & E1000_RXD_STAT_DD) {
1096 if (*work_done >= work_to_do)
1099 skb = buffer_info->skb;
1101 /* in the packet split case this is header only */
1102 prefetch(skb->data - NET_IP_ALIGN);
1105 if (i == rx_ring->count)
1107 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1110 next_buffer = &rx_ring->buffer_info[i];
1114 dma_unmap_single(&pdev->dev, buffer_info->dma,
1115 adapter->rx_ps_bsize0,
1117 buffer_info->dma = 0;
1119 /* see !EOP comment in other rx routine */
1120 if (!(staterr & E1000_RXD_STAT_EOP))
1121 adapter->flags2 |= FLAG2_IS_DISCARDING;
1123 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1124 e_dbg("Packet Split buffers didn't pick up the full "
1126 dev_kfree_skb_irq(skb);
1127 if (staterr & E1000_RXD_STAT_EOP)
1128 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1132 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1133 dev_kfree_skb_irq(skb);
1137 length = le16_to_cpu(rx_desc->wb.middle.length0);
1140 e_dbg("Last part of the packet spanning multiple "
1142 dev_kfree_skb_irq(skb);
1147 skb_put(skb, length);
1151 * this looks ugly, but it seems compiler issues make it
1152 * more efficient than reusing j
1154 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1157 * page alloc/put takes too long and effects small packet
1158 * throughput, so unsplit small packets and save the alloc/put
1159 * only valid in softirq (napi) context to call kmap_*
1161 if (l1 && (l1 <= copybreak) &&
1162 ((length + l1) <= adapter->rx_ps_bsize0)) {
1165 ps_page = &buffer_info->ps_pages[0];
1168 * there is no documentation about how to call
1169 * kmap_atomic, so we can't hold the mapping
1172 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1173 PAGE_SIZE, DMA_FROM_DEVICE);
1174 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1175 memcpy(skb_tail_pointer(skb), vaddr, l1);
1176 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1177 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1178 PAGE_SIZE, DMA_FROM_DEVICE);
1180 /* remove the CRC */
1181 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1189 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1190 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1194 ps_page = &buffer_info->ps_pages[j];
1195 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1198 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1199 ps_page->page = NULL;
1201 skb->data_len += length;
1202 skb->truesize += length;
1205 /* strip the ethernet crc, problem is we're using pages now so
1206 * this whole operation can get a little cpu intensive
1208 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1209 pskb_trim(skb, skb->len - 4);
1212 total_rx_bytes += skb->len;
1215 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1216 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1218 if (rx_desc->wb.upper.header_status &
1219 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1220 adapter->rx_hdr_split++;
1222 e1000_receive_skb(adapter, netdev, skb,
1223 staterr, rx_desc->wb.middle.vlan);
1226 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1227 buffer_info->skb = NULL;
1229 /* return some buffers to hardware, one at a time is too slow */
1230 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1231 adapter->alloc_rx_buf(adapter, cleaned_count);
1235 /* use prefetched values */
1237 buffer_info = next_buffer;
1239 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1241 rx_ring->next_to_clean = i;
1243 cleaned_count = e1000_desc_unused(rx_ring);
1245 adapter->alloc_rx_buf(adapter, cleaned_count);
1247 adapter->total_rx_bytes += total_rx_bytes;
1248 adapter->total_rx_packets += total_rx_packets;
1249 netdev->stats.rx_bytes += total_rx_bytes;
1250 netdev->stats.rx_packets += total_rx_packets;
1255 * e1000_consume_page - helper function
1257 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1262 skb->data_len += length;
1263 skb->truesize += length;
1267 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1268 * @adapter: board private structure
1270 * the return value indicates whether actual cleaning was done, there
1271 * is no guarantee that everything was cleaned
1274 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1275 int *work_done, int work_to_do)
1277 struct net_device *netdev = adapter->netdev;
1278 struct pci_dev *pdev = adapter->pdev;
1279 struct e1000_ring *rx_ring = adapter->rx_ring;
1280 struct e1000_rx_desc *rx_desc, *next_rxd;
1281 struct e1000_buffer *buffer_info, *next_buffer;
1284 int cleaned_count = 0;
1285 bool cleaned = false;
1286 unsigned int total_rx_bytes=0, total_rx_packets=0;
1288 i = rx_ring->next_to_clean;
1289 rx_desc = E1000_RX_DESC(*rx_ring, i);
1290 buffer_info = &rx_ring->buffer_info[i];
1292 while (rx_desc->status & E1000_RXD_STAT_DD) {
1293 struct sk_buff *skb;
1296 if (*work_done >= work_to_do)
1300 status = rx_desc->status;
1301 skb = buffer_info->skb;
1302 buffer_info->skb = NULL;
1305 if (i == rx_ring->count)
1307 next_rxd = E1000_RX_DESC(*rx_ring, i);
1310 next_buffer = &rx_ring->buffer_info[i];
1314 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1316 buffer_info->dma = 0;
1318 length = le16_to_cpu(rx_desc->length);
1320 /* errors is only valid for DD + EOP descriptors */
1321 if (unlikely((status & E1000_RXD_STAT_EOP) &&
1322 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1323 /* recycle both page and skb */
1324 buffer_info->skb = skb;
1325 /* an error means any chain goes out the window
1327 if (rx_ring->rx_skb_top)
1328 dev_kfree_skb(rx_ring->rx_skb_top);
1329 rx_ring->rx_skb_top = NULL;
1333 #define rxtop rx_ring->rx_skb_top
1334 if (!(status & E1000_RXD_STAT_EOP)) {
1335 /* this descriptor is only the beginning (or middle) */
1337 /* this is the beginning of a chain */
1339 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1342 /* this is the middle of a chain */
1343 skb_fill_page_desc(rxtop,
1344 skb_shinfo(rxtop)->nr_frags,
1345 buffer_info->page, 0, length);
1346 /* re-use the skb, only consumed the page */
1347 buffer_info->skb = skb;
1349 e1000_consume_page(buffer_info, rxtop, length);
1353 /* end of the chain */
1354 skb_fill_page_desc(rxtop,
1355 skb_shinfo(rxtop)->nr_frags,
1356 buffer_info->page, 0, length);
1357 /* re-use the current skb, we only consumed the
1359 buffer_info->skb = skb;
1362 e1000_consume_page(buffer_info, skb, length);
1364 /* no chain, got EOP, this buf is the packet
1365 * copybreak to save the put_page/alloc_page */
1366 if (length <= copybreak &&
1367 skb_tailroom(skb) >= length) {
1369 vaddr = kmap_atomic(buffer_info->page,
1370 KM_SKB_DATA_SOFTIRQ);
1371 memcpy(skb_tail_pointer(skb), vaddr,
1373 kunmap_atomic(vaddr,
1374 KM_SKB_DATA_SOFTIRQ);
1375 /* re-use the page, so don't erase
1376 * buffer_info->page */
1377 skb_put(skb, length);
1379 skb_fill_page_desc(skb, 0,
1380 buffer_info->page, 0,
1382 e1000_consume_page(buffer_info, skb,
1388 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1389 e1000_rx_checksum(adapter,
1391 ((u32)(rx_desc->errors) << 24),
1392 le16_to_cpu(rx_desc->csum), skb);
1394 /* probably a little skewed due to removing CRC */
1395 total_rx_bytes += skb->len;
1398 /* eth type trans needs skb->data to point to something */
1399 if (!pskb_may_pull(skb, ETH_HLEN)) {
1400 e_err("pskb_may_pull failed.\n");
1405 e1000_receive_skb(adapter, netdev, skb, status,
1409 rx_desc->status = 0;
1411 /* return some buffers to hardware, one at a time is too slow */
1412 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1413 adapter->alloc_rx_buf(adapter, cleaned_count);
1417 /* use prefetched values */
1419 buffer_info = next_buffer;
1421 rx_ring->next_to_clean = i;
1423 cleaned_count = e1000_desc_unused(rx_ring);
1425 adapter->alloc_rx_buf(adapter, cleaned_count);
1427 adapter->total_rx_bytes += total_rx_bytes;
1428 adapter->total_rx_packets += total_rx_packets;
1429 netdev->stats.rx_bytes += total_rx_bytes;
1430 netdev->stats.rx_packets += total_rx_packets;
1435 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1436 * @adapter: board private structure
1438 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1440 struct e1000_ring *rx_ring = adapter->rx_ring;
1441 struct e1000_buffer *buffer_info;
1442 struct e1000_ps_page *ps_page;
1443 struct pci_dev *pdev = adapter->pdev;
1446 /* Free all the Rx ring sk_buffs */
1447 for (i = 0; i < rx_ring->count; i++) {
1448 buffer_info = &rx_ring->buffer_info[i];
1449 if (buffer_info->dma) {
1450 if (adapter->clean_rx == e1000_clean_rx_irq)
1451 dma_unmap_single(&pdev->dev, buffer_info->dma,
1452 adapter->rx_buffer_len,
1454 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1455 dma_unmap_page(&pdev->dev, buffer_info->dma,
1458 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1459 dma_unmap_single(&pdev->dev, buffer_info->dma,
1460 adapter->rx_ps_bsize0,
1462 buffer_info->dma = 0;
1465 if (buffer_info->page) {
1466 put_page(buffer_info->page);
1467 buffer_info->page = NULL;
1470 if (buffer_info->skb) {
1471 dev_kfree_skb(buffer_info->skb);
1472 buffer_info->skb = NULL;
1475 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1476 ps_page = &buffer_info->ps_pages[j];
1479 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1482 put_page(ps_page->page);
1483 ps_page->page = NULL;
1487 /* there also may be some cached data from a chained receive */
1488 if (rx_ring->rx_skb_top) {
1489 dev_kfree_skb(rx_ring->rx_skb_top);
1490 rx_ring->rx_skb_top = NULL;
1493 /* Zero out the descriptor ring */
1494 memset(rx_ring->desc, 0, rx_ring->size);
1496 rx_ring->next_to_clean = 0;
1497 rx_ring->next_to_use = 0;
1498 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1500 writel(0, adapter->hw.hw_addr + rx_ring->head);
1501 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1504 static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task);
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1513 * e1000_intr_msi - Interrupt Handler
1514 * @irq: interrupt number
1515 * @data: pointer to a network interface device structure
1517 static irqreturn_t e1000_intr_msi(int irq, void *data)
1519 struct net_device *netdev = data;
1520 struct e1000_adapter *adapter = netdev_priv(netdev);
1521 struct e1000_hw *hw = &adapter->hw;
1522 u32 icr = er32(ICR);
1525 * read ICR disables interrupts using IAM
1528 if (icr & E1000_ICR_LSC) {
1529 hw->mac.get_link_status = 1;
1531 * ICH8 workaround-- Call gig speed drop workaround on cable
1532 * disconnect (LSC) before accessing any PHY registers
1534 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1535 (!(er32(STATUS) & E1000_STATUS_LU)))
1536 schedule_work(&adapter->downshift_task);
1539 * 80003ES2LAN workaround-- For packet buffer work-around on
1540 * link down event; disable receives here in the ISR and reset
1541 * adapter in watchdog
1543 if (netif_carrier_ok(netdev) &&
1544 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1545 /* disable receives */
1546 u32 rctl = er32(RCTL);
1547 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1548 adapter->flags |= FLAG_RX_RESTART_NOW;
1550 /* guard against interrupt when we're going down */
1551 if (!test_bit(__E1000_DOWN, &adapter->state))
1552 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1555 if (napi_schedule_prep(&adapter->napi)) {
1556 adapter->total_tx_bytes = 0;
1557 adapter->total_tx_packets = 0;
1558 adapter->total_rx_bytes = 0;
1559 adapter->total_rx_packets = 0;
1560 __napi_schedule(&adapter->napi);
1567 * e1000_intr - Interrupt Handler
1568 * @irq: interrupt number
1569 * @data: pointer to a network interface device structure
1571 static irqreturn_t e1000_intr(int irq, void *data)
1573 struct net_device *netdev = data;
1574 struct e1000_adapter *adapter = netdev_priv(netdev);
1575 struct e1000_hw *hw = &adapter->hw;
1576 u32 rctl, icr = er32(ICR);
1578 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1579 return IRQ_NONE; /* Not our interrupt */
1582 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1583 * not set, then the adapter didn't send an interrupt
1585 if (!(icr & E1000_ICR_INT_ASSERTED))
1589 * Interrupt Auto-Mask...upon reading ICR,
1590 * interrupts are masked. No need for the
1594 if (icr & E1000_ICR_LSC) {
1595 hw->mac.get_link_status = 1;
1597 * ICH8 workaround-- Call gig speed drop workaround on cable
1598 * disconnect (LSC) before accessing any PHY registers
1600 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1601 (!(er32(STATUS) & E1000_STATUS_LU)))
1602 schedule_work(&adapter->downshift_task);
1605 * 80003ES2LAN workaround--
1606 * For packet buffer work-around on link down event;
1607 * disable receives here in the ISR and
1608 * reset adapter in watchdog
1610 if (netif_carrier_ok(netdev) &&
1611 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1612 /* disable receives */
1614 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1615 adapter->flags |= FLAG_RX_RESTART_NOW;
1617 /* guard against interrupt when we're going down */
1618 if (!test_bit(__E1000_DOWN, &adapter->state))
1619 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1622 if (napi_schedule_prep(&adapter->napi)) {
1623 adapter->total_tx_bytes = 0;
1624 adapter->total_tx_packets = 0;
1625 adapter->total_rx_bytes = 0;
1626 adapter->total_rx_packets = 0;
1627 __napi_schedule(&adapter->napi);
1633 static irqreturn_t e1000_msix_other(int irq, void *data)
1635 struct net_device *netdev = data;
1636 struct e1000_adapter *adapter = netdev_priv(netdev);
1637 struct e1000_hw *hw = &adapter->hw;
1638 u32 icr = er32(ICR);
1640 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1641 if (!test_bit(__E1000_DOWN, &adapter->state))
1642 ew32(IMS, E1000_IMS_OTHER);
1646 if (icr & adapter->eiac_mask)
1647 ew32(ICS, (icr & adapter->eiac_mask));
1649 if (icr & E1000_ICR_OTHER) {
1650 if (!(icr & E1000_ICR_LSC))
1651 goto no_link_interrupt;
1652 hw->mac.get_link_status = 1;
1653 /* guard against interrupt when we're going down */
1654 if (!test_bit(__E1000_DOWN, &adapter->state))
1655 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1659 if (!test_bit(__E1000_DOWN, &adapter->state))
1660 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1666 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1668 struct net_device *netdev = data;
1669 struct e1000_adapter *adapter = netdev_priv(netdev);
1670 struct e1000_hw *hw = &adapter->hw;
1671 struct e1000_ring *tx_ring = adapter->tx_ring;
1674 adapter->total_tx_bytes = 0;
1675 adapter->total_tx_packets = 0;
1677 if (!e1000_clean_tx_irq(adapter))
1678 /* Ring was not completely cleaned, so fire another interrupt */
1679 ew32(ICS, tx_ring->ims_val);
1684 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1686 struct net_device *netdev = data;
1687 struct e1000_adapter *adapter = netdev_priv(netdev);
1689 /* Write the ITR value calculated at the end of the
1690 * previous interrupt.
1692 if (adapter->rx_ring->set_itr) {
1693 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1694 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1695 adapter->rx_ring->set_itr = 0;
1698 if (napi_schedule_prep(&adapter->napi)) {
1699 adapter->total_rx_bytes = 0;
1700 adapter->total_rx_packets = 0;
1701 __napi_schedule(&adapter->napi);
1707 * e1000_configure_msix - Configure MSI-X hardware
1709 * e1000_configure_msix sets up the hardware to properly
1710 * generate MSI-X interrupts.
1712 static void e1000_configure_msix(struct e1000_adapter *adapter)
1714 struct e1000_hw *hw = &adapter->hw;
1715 struct e1000_ring *rx_ring = adapter->rx_ring;
1716 struct e1000_ring *tx_ring = adapter->tx_ring;
1718 u32 ctrl_ext, ivar = 0;
1720 adapter->eiac_mask = 0;
1722 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1723 if (hw->mac.type == e1000_82574) {
1724 u32 rfctl = er32(RFCTL);
1725 rfctl |= E1000_RFCTL_ACK_DIS;
1729 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1730 /* Configure Rx vector */
1731 rx_ring->ims_val = E1000_IMS_RXQ0;
1732 adapter->eiac_mask |= rx_ring->ims_val;
1733 if (rx_ring->itr_val)
1734 writel(1000000000 / (rx_ring->itr_val * 256),
1735 hw->hw_addr + rx_ring->itr_register);
1737 writel(1, hw->hw_addr + rx_ring->itr_register);
1738 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1740 /* Configure Tx vector */
1741 tx_ring->ims_val = E1000_IMS_TXQ0;
1743 if (tx_ring->itr_val)
1744 writel(1000000000 / (tx_ring->itr_val * 256),
1745 hw->hw_addr + tx_ring->itr_register);
1747 writel(1, hw->hw_addr + tx_ring->itr_register);
1748 adapter->eiac_mask |= tx_ring->ims_val;
1749 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1751 /* set vector for Other Causes, e.g. link changes */
1753 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1754 if (rx_ring->itr_val)
1755 writel(1000000000 / (rx_ring->itr_val * 256),
1756 hw->hw_addr + E1000_EITR_82574(vector));
1758 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1760 /* Cause Tx interrupts on every write back */
1765 /* enable MSI-X PBA support */
1766 ctrl_ext = er32(CTRL_EXT);
1767 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1769 /* Auto-Mask Other interrupts upon ICR read */
1770 #define E1000_EIAC_MASK_82574 0x01F00000
1771 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1772 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1773 ew32(CTRL_EXT, ctrl_ext);
1777 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1779 if (adapter->msix_entries) {
1780 pci_disable_msix(adapter->pdev);
1781 kfree(adapter->msix_entries);
1782 adapter->msix_entries = NULL;
1783 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1784 pci_disable_msi(adapter->pdev);
1785 adapter->flags &= ~FLAG_MSI_ENABLED;
1792 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1794 * Attempt to configure interrupts using the best available
1795 * capabilities of the hardware and kernel.
1797 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1803 switch (adapter->int_mode) {
1804 case E1000E_INT_MODE_MSIX:
1805 if (adapter->flags & FLAG_HAS_MSIX) {
1806 numvecs = 3; /* RxQ0, TxQ0 and other */
1807 adapter->msix_entries = kcalloc(numvecs,
1808 sizeof(struct msix_entry),
1810 if (adapter->msix_entries) {
1811 for (i = 0; i < numvecs; i++)
1812 adapter->msix_entries[i].entry = i;
1814 err = pci_enable_msix(adapter->pdev,
1815 adapter->msix_entries,
1820 /* MSI-X failed, so fall through and try MSI */
1821 e_err("Failed to initialize MSI-X interrupts. "
1822 "Falling back to MSI interrupts.\n");
1823 e1000e_reset_interrupt_capability(adapter);
1825 adapter->int_mode = E1000E_INT_MODE_MSI;
1827 case E1000E_INT_MODE_MSI:
1828 if (!pci_enable_msi(adapter->pdev)) {
1829 adapter->flags |= FLAG_MSI_ENABLED;
1831 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1832 e_err("Failed to initialize MSI interrupts. Falling "
1833 "back to legacy interrupts.\n");
1836 case E1000E_INT_MODE_LEGACY:
1837 /* Don't do anything; this is the system default */
1845 * e1000_request_msix - Initialize MSI-X interrupts
1847 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1850 static int e1000_request_msix(struct e1000_adapter *adapter)
1852 struct net_device *netdev = adapter->netdev;
1853 int err = 0, vector = 0;
1855 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1856 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1858 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1859 err = request_irq(adapter->msix_entries[vector].vector,
1860 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1864 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1865 adapter->rx_ring->itr_val = adapter->itr;
1868 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1869 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1872 err = request_irq(adapter->msix_entries[vector].vector,
1873 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1877 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1878 adapter->tx_ring->itr_val = adapter->itr;
1881 err = request_irq(adapter->msix_entries[vector].vector,
1882 e1000_msix_other, 0, netdev->name, netdev);
1886 e1000_configure_msix(adapter);
1893 * e1000_request_irq - initialize interrupts
1895 * Attempts to configure interrupts using the best available
1896 * capabilities of the hardware and kernel.
1898 static int e1000_request_irq(struct e1000_adapter *adapter)
1900 struct net_device *netdev = adapter->netdev;
1903 if (adapter->msix_entries) {
1904 err = e1000_request_msix(adapter);
1907 /* fall back to MSI */
1908 e1000e_reset_interrupt_capability(adapter);
1909 adapter->int_mode = E1000E_INT_MODE_MSI;
1910 e1000e_set_interrupt_capability(adapter);
1912 if (adapter->flags & FLAG_MSI_ENABLED) {
1913 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1914 netdev->name, netdev);
1918 /* fall back to legacy interrupt */
1919 e1000e_reset_interrupt_capability(adapter);
1920 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1923 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1924 netdev->name, netdev);
1926 e_err("Unable to allocate interrupt, Error: %d\n", err);
1931 static void e1000_free_irq(struct e1000_adapter *adapter)
1933 struct net_device *netdev = adapter->netdev;
1935 if (adapter->msix_entries) {
1938 free_irq(adapter->msix_entries[vector].vector, netdev);
1941 free_irq(adapter->msix_entries[vector].vector, netdev);
1944 /* Other Causes interrupt vector */
1945 free_irq(adapter->msix_entries[vector].vector, netdev);
1949 free_irq(adapter->pdev->irq, netdev);
1953 * e1000_irq_disable - Mask off interrupt generation on the NIC
1955 static void e1000_irq_disable(struct e1000_adapter *adapter)
1957 struct e1000_hw *hw = &adapter->hw;
1960 if (adapter->msix_entries)
1961 ew32(EIAC_82574, 0);
1963 synchronize_irq(adapter->pdev->irq);
1967 * e1000_irq_enable - Enable default interrupt generation settings
1969 static void e1000_irq_enable(struct e1000_adapter *adapter)
1971 struct e1000_hw *hw = &adapter->hw;
1973 if (adapter->msix_entries) {
1974 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1975 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1977 ew32(IMS, IMS_ENABLE_MASK);
1983 * e1000_get_hw_control - get control of the h/w from f/w
1984 * @adapter: address of board private structure
1986 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1987 * For ASF and Pass Through versions of f/w this means that
1988 * the driver is loaded. For AMT version (only with 82573)
1989 * of the f/w this means that the network i/f is open.
1991 static void e1000_get_hw_control(struct e1000_adapter *adapter)
1993 struct e1000_hw *hw = &adapter->hw;
1997 /* Let firmware know the driver has taken over */
1998 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2000 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2001 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2002 ctrl_ext = er32(CTRL_EXT);
2003 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2008 * e1000_release_hw_control - release control of the h/w to f/w
2009 * @adapter: address of board private structure
2011 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2012 * For ASF and Pass Through versions of f/w this means that the
2013 * driver is no longer loaded. For AMT version (only with 82573) i
2014 * of the f/w this means that the network i/f is closed.
2017 static void e1000_release_hw_control(struct e1000_adapter *adapter)
2019 struct e1000_hw *hw = &adapter->hw;
2023 /* Let firmware taken over control of h/w */
2024 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2026 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2027 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2028 ctrl_ext = er32(CTRL_EXT);
2029 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2034 * @e1000_alloc_ring - allocate memory for a ring structure
2036 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2037 struct e1000_ring *ring)
2039 struct pci_dev *pdev = adapter->pdev;
2041 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2050 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2051 * @adapter: board private structure
2053 * Return 0 on success, negative on failure
2055 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2057 struct e1000_ring *tx_ring = adapter->tx_ring;
2058 int err = -ENOMEM, size;
2060 size = sizeof(struct e1000_buffer) * tx_ring->count;
2061 tx_ring->buffer_info = vmalloc(size);
2062 if (!tx_ring->buffer_info)
2064 memset(tx_ring->buffer_info, 0, size);
2066 /* round up to nearest 4K */
2067 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2068 tx_ring->size = ALIGN(tx_ring->size, 4096);
2070 err = e1000_alloc_ring_dma(adapter, tx_ring);
2074 tx_ring->next_to_use = 0;
2075 tx_ring->next_to_clean = 0;
2079 vfree(tx_ring->buffer_info);
2080 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2085 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2086 * @adapter: board private structure
2088 * Returns 0 on success, negative on failure
2090 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2092 struct e1000_ring *rx_ring = adapter->rx_ring;
2093 struct e1000_buffer *buffer_info;
2094 int i, size, desc_len, err = -ENOMEM;
2096 size = sizeof(struct e1000_buffer) * rx_ring->count;
2097 rx_ring->buffer_info = vmalloc(size);
2098 if (!rx_ring->buffer_info)
2100 memset(rx_ring->buffer_info, 0, size);
2102 for (i = 0; i < rx_ring->count; i++) {
2103 buffer_info = &rx_ring->buffer_info[i];
2104 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2105 sizeof(struct e1000_ps_page),
2107 if (!buffer_info->ps_pages)
2111 desc_len = sizeof(union e1000_rx_desc_packet_split);
2113 /* Round up to nearest 4K */
2114 rx_ring->size = rx_ring->count * desc_len;
2115 rx_ring->size = ALIGN(rx_ring->size, 4096);
2117 err = e1000_alloc_ring_dma(adapter, rx_ring);
2121 rx_ring->next_to_clean = 0;
2122 rx_ring->next_to_use = 0;
2123 rx_ring->rx_skb_top = NULL;
2128 for (i = 0; i < rx_ring->count; i++) {
2129 buffer_info = &rx_ring->buffer_info[i];
2130 kfree(buffer_info->ps_pages);
2133 vfree(rx_ring->buffer_info);
2134 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2139 * e1000_clean_tx_ring - Free Tx Buffers
2140 * @adapter: board private structure
2142 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2144 struct e1000_ring *tx_ring = adapter->tx_ring;
2145 struct e1000_buffer *buffer_info;
2149 for (i = 0; i < tx_ring->count; i++) {
2150 buffer_info = &tx_ring->buffer_info[i];
2151 e1000_put_txbuf(adapter, buffer_info);
2154 size = sizeof(struct e1000_buffer) * tx_ring->count;
2155 memset(tx_ring->buffer_info, 0, size);
2157 memset(tx_ring->desc, 0, tx_ring->size);
2159 tx_ring->next_to_use = 0;
2160 tx_ring->next_to_clean = 0;
2162 writel(0, adapter->hw.hw_addr + tx_ring->head);
2163 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2167 * e1000e_free_tx_resources - Free Tx Resources per Queue
2168 * @adapter: board private structure
2170 * Free all transmit software resources
2172 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2174 struct pci_dev *pdev = adapter->pdev;
2175 struct e1000_ring *tx_ring = adapter->tx_ring;
2177 e1000_clean_tx_ring(adapter);
2179 vfree(tx_ring->buffer_info);
2180 tx_ring->buffer_info = NULL;
2182 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2184 tx_ring->desc = NULL;
2188 * e1000e_free_rx_resources - Free Rx Resources
2189 * @adapter: board private structure
2191 * Free all receive software resources
2194 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2196 struct pci_dev *pdev = adapter->pdev;
2197 struct e1000_ring *rx_ring = adapter->rx_ring;
2200 e1000_clean_rx_ring(adapter);
2202 for (i = 0; i < rx_ring->count; i++) {
2203 kfree(rx_ring->buffer_info[i].ps_pages);
2206 vfree(rx_ring->buffer_info);
2207 rx_ring->buffer_info = NULL;
2209 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2211 rx_ring->desc = NULL;
2215 * e1000_update_itr - update the dynamic ITR value based on statistics
2216 * @adapter: pointer to adapter
2217 * @itr_setting: current adapter->itr
2218 * @packets: the number of packets during this measurement interval
2219 * @bytes: the number of bytes during this measurement interval
2221 * Stores a new ITR value based on packets and byte
2222 * counts during the last interrupt. The advantage of per interrupt
2223 * computation is faster updates and more accurate ITR for the current
2224 * traffic pattern. Constants in this function were computed
2225 * based on theoretical maximum wire speed and thresholds were set based
2226 * on testing data as well as attempting to minimize response time
2227 * while increasing bulk throughput. This functionality is controlled
2228 * by the InterruptThrottleRate module parameter.
2230 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2231 u16 itr_setting, int packets,
2234 unsigned int retval = itr_setting;
2237 goto update_itr_done;
2239 switch (itr_setting) {
2240 case lowest_latency:
2241 /* handle TSO and jumbo frames */
2242 if (bytes/packets > 8000)
2243 retval = bulk_latency;
2244 else if ((packets < 5) && (bytes > 512)) {
2245 retval = low_latency;
2248 case low_latency: /* 50 usec aka 20000 ints/s */
2249 if (bytes > 10000) {
2250 /* this if handles the TSO accounting */
2251 if (bytes/packets > 8000) {
2252 retval = bulk_latency;
2253 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2254 retval = bulk_latency;
2255 } else if ((packets > 35)) {
2256 retval = lowest_latency;
2258 } else if (bytes/packets > 2000) {
2259 retval = bulk_latency;
2260 } else if (packets <= 2 && bytes < 512) {
2261 retval = lowest_latency;
2264 case bulk_latency: /* 250 usec aka 4000 ints/s */
2265 if (bytes > 25000) {
2267 retval = low_latency;
2269 } else if (bytes < 6000) {
2270 retval = low_latency;
2279 static void e1000_set_itr(struct e1000_adapter *adapter)
2281 struct e1000_hw *hw = &adapter->hw;
2283 u32 new_itr = adapter->itr;
2285 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2286 if (adapter->link_speed != SPEED_1000) {
2292 adapter->tx_itr = e1000_update_itr(adapter,
2294 adapter->total_tx_packets,
2295 adapter->total_tx_bytes);
2296 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2297 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2298 adapter->tx_itr = low_latency;
2300 adapter->rx_itr = e1000_update_itr(adapter,
2302 adapter->total_rx_packets,
2303 adapter->total_rx_bytes);
2304 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2305 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2306 adapter->rx_itr = low_latency;
2308 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2310 switch (current_itr) {
2311 /* counts and packets in update_itr are dependent on these numbers */
2312 case lowest_latency:
2316 new_itr = 20000; /* aka hwitr = ~200 */
2326 if (new_itr != adapter->itr) {
2328 * this attempts to bias the interrupt rate towards Bulk
2329 * by adding intermediate steps when interrupt rate is
2332 new_itr = new_itr > adapter->itr ?
2333 min(adapter->itr + (new_itr >> 2), new_itr) :
2335 adapter->itr = new_itr;
2336 adapter->rx_ring->itr_val = new_itr;
2337 if (adapter->msix_entries)
2338 adapter->rx_ring->set_itr = 1;
2340 ew32(ITR, 1000000000 / (new_itr * 256));
2345 * e1000_alloc_queues - Allocate memory for all rings
2346 * @adapter: board private structure to initialize
2348 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2350 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2351 if (!adapter->tx_ring)
2354 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2355 if (!adapter->rx_ring)
2360 e_err("Unable to allocate memory for queues\n");
2361 kfree(adapter->rx_ring);
2362 kfree(adapter->tx_ring);
2367 * e1000_clean - NAPI Rx polling callback
2368 * @napi: struct associated with this polling callback
2369 * @budget: amount of packets driver is allowed to process this poll
2371 static int e1000_clean(struct napi_struct *napi, int budget)
2373 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2374 struct e1000_hw *hw = &adapter->hw;
2375 struct net_device *poll_dev = adapter->netdev;
2376 int tx_cleaned = 1, work_done = 0;
2378 adapter = netdev_priv(poll_dev);
2380 if (adapter->msix_entries &&
2381 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2384 tx_cleaned = e1000_clean_tx_irq(adapter);
2387 adapter->clean_rx(adapter, &work_done, budget);
2392 /* If budget not fully consumed, exit the polling mode */
2393 if (work_done < budget) {
2394 if (adapter->itr_setting & 3)
2395 e1000_set_itr(adapter);
2396 napi_complete(napi);
2397 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2398 if (adapter->msix_entries)
2399 ew32(IMS, adapter->rx_ring->ims_val);
2401 e1000_irq_enable(adapter);
2408 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2410 struct e1000_adapter *adapter = netdev_priv(netdev);
2411 struct e1000_hw *hw = &adapter->hw;
2414 /* don't update vlan cookie if already programmed */
2415 if ((adapter->hw.mng_cookie.status &
2416 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2417 (vid == adapter->mng_vlan_id))
2420 /* add VID to filter table */
2421 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2422 index = (vid >> 5) & 0x7F;
2423 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2424 vfta |= (1 << (vid & 0x1F));
2425 hw->mac.ops.write_vfta(hw, index, vfta);
2429 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2431 struct e1000_adapter *adapter = netdev_priv(netdev);
2432 struct e1000_hw *hw = &adapter->hw;
2435 if (!test_bit(__E1000_DOWN, &adapter->state))
2436 e1000_irq_disable(adapter);
2437 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2439 if (!test_bit(__E1000_DOWN, &adapter->state))
2440 e1000_irq_enable(adapter);
2442 if ((adapter->hw.mng_cookie.status &
2443 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2444 (vid == adapter->mng_vlan_id)) {
2445 /* release control to f/w */
2446 e1000_release_hw_control(adapter);
2450 /* remove VID from filter table */
2451 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2452 index = (vid >> 5) & 0x7F;
2453 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2454 vfta &= ~(1 << (vid & 0x1F));
2455 hw->mac.ops.write_vfta(hw, index, vfta);
2459 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2461 struct net_device *netdev = adapter->netdev;
2462 u16 vid = adapter->hw.mng_cookie.vlan_id;
2463 u16 old_vid = adapter->mng_vlan_id;
2465 if (!adapter->vlgrp)
2468 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2469 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2470 if (adapter->hw.mng_cookie.status &
2471 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2472 e1000_vlan_rx_add_vid(netdev, vid);
2473 adapter->mng_vlan_id = vid;
2476 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2478 !vlan_group_get_device(adapter->vlgrp, old_vid))
2479 e1000_vlan_rx_kill_vid(netdev, old_vid);
2481 adapter->mng_vlan_id = vid;
2486 static void e1000_vlan_rx_register(struct net_device *netdev,
2487 struct vlan_group *grp)
2489 struct e1000_adapter *adapter = netdev_priv(netdev);
2490 struct e1000_hw *hw = &adapter->hw;
2493 if (!test_bit(__E1000_DOWN, &adapter->state))
2494 e1000_irq_disable(adapter);
2495 adapter->vlgrp = grp;
2498 /* enable VLAN tag insert/strip */
2500 ctrl |= E1000_CTRL_VME;
2503 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2504 /* enable VLAN receive filtering */
2506 rctl &= ~E1000_RCTL_CFIEN;
2508 e1000_update_mng_vlan(adapter);
2511 /* disable VLAN tag insert/strip */
2513 ctrl &= ~E1000_CTRL_VME;
2516 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2517 if (adapter->mng_vlan_id !=
2518 (u16)E1000_MNG_VLAN_NONE) {
2519 e1000_vlan_rx_kill_vid(netdev,
2520 adapter->mng_vlan_id);
2521 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2526 if (!test_bit(__E1000_DOWN, &adapter->state))
2527 e1000_irq_enable(adapter);
2530 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2534 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2536 if (!adapter->vlgrp)
2539 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2540 if (!vlan_group_get_device(adapter->vlgrp, vid))
2542 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2546 static void e1000_init_manageability(struct e1000_adapter *adapter)
2548 struct e1000_hw *hw = &adapter->hw;
2551 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2557 * enable receiving management packets to the host. this will probably
2558 * generate destination unreachable messages from the host OS, but
2559 * the packets will be handled on SMBUS
2561 manc |= E1000_MANC_EN_MNG2HOST;
2562 manc2h = er32(MANC2H);
2563 #define E1000_MNG2HOST_PORT_623 (1 << 5)
2564 #define E1000_MNG2HOST_PORT_664 (1 << 6)
2565 manc2h |= E1000_MNG2HOST_PORT_623;
2566 manc2h |= E1000_MNG2HOST_PORT_664;
2567 ew32(MANC2H, manc2h);
2572 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2573 * @adapter: board private structure
2575 * Configure the Tx unit of the MAC after a reset.
2577 static void e1000_configure_tx(struct e1000_adapter *adapter)
2579 struct e1000_hw *hw = &adapter->hw;
2580 struct e1000_ring *tx_ring = adapter->tx_ring;
2582 u32 tdlen, tctl, tipg, tarc;
2585 /* Setup the HW Tx Head and Tail descriptor pointers */
2586 tdba = tx_ring->dma;
2587 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2588 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2589 ew32(TDBAH, (tdba >> 32));
2593 tx_ring->head = E1000_TDH;
2594 tx_ring->tail = E1000_TDT;
2596 /* Set the default values for the Tx Inter Packet Gap timer */
2597 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2598 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2599 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2601 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2602 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2604 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2605 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2608 /* Set the Tx Interrupt Delay register */
2609 ew32(TIDV, adapter->tx_int_delay);
2610 /* Tx irq moderation */
2611 ew32(TADV, adapter->tx_abs_int_delay);
2613 /* Program the Transmit Control Register */
2615 tctl &= ~E1000_TCTL_CT;
2616 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2617 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2619 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2620 tarc = er32(TARC(0));
2622 * set the speed mode bit, we'll clear it if we're not at
2623 * gigabit link later
2625 #define SPEED_MODE_BIT (1 << 21)
2626 tarc |= SPEED_MODE_BIT;
2627 ew32(TARC(0), tarc);
2630 /* errata: program both queues to unweighted RR */
2631 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2632 tarc = er32(TARC(0));
2634 ew32(TARC(0), tarc);
2635 tarc = er32(TARC(1));
2637 ew32(TARC(1), tarc);
2640 /* Setup Transmit Descriptor Settings for eop descriptor */
2641 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2643 /* only set IDE if we are delaying interrupts using the timers */
2644 if (adapter->tx_int_delay)
2645 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2647 /* enable Report Status bit */
2648 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2652 e1000e_config_collision_dist(hw);
2656 * e1000_setup_rctl - configure the receive control registers
2657 * @adapter: Board private structure
2659 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2660 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2661 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2663 struct e1000_hw *hw = &adapter->hw;
2668 /* Program MC offset vector base */
2670 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2671 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2672 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2673 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2675 /* Do not Store bad packets */
2676 rctl &= ~E1000_RCTL_SBP;
2678 /* Enable Long Packet receive */
2679 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2680 rctl &= ~E1000_RCTL_LPE;
2682 rctl |= E1000_RCTL_LPE;
2684 /* Some systems expect that the CRC is included in SMBUS traffic. The
2685 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2686 * host memory when this is enabled
2688 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2689 rctl |= E1000_RCTL_SECRC;
2691 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2692 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2695 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2697 phy_data |= (1 << 2);
2698 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2700 e1e_rphy(hw, 22, &phy_data);
2702 phy_data |= (1 << 14);
2703 e1e_wphy(hw, 0x10, 0x2823);
2704 e1e_wphy(hw, 0x11, 0x0003);
2705 e1e_wphy(hw, 22, phy_data);
2708 /* Setup buffer sizes */
2709 rctl &= ~E1000_RCTL_SZ_4096;
2710 rctl |= E1000_RCTL_BSEX;
2711 switch (adapter->rx_buffer_len) {
2714 rctl |= E1000_RCTL_SZ_2048;
2715 rctl &= ~E1000_RCTL_BSEX;
2718 rctl |= E1000_RCTL_SZ_4096;
2721 rctl |= E1000_RCTL_SZ_8192;
2724 rctl |= E1000_RCTL_SZ_16384;
2729 * 82571 and greater support packet-split where the protocol
2730 * header is placed in skb->data and the packet data is
2731 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2732 * In the case of a non-split, skb->data is linearly filled,
2733 * followed by the page buffers. Therefore, skb->data is
2734 * sized to hold the largest protocol header.
2736 * allocations using alloc_page take too long for regular MTU
2737 * so only enable packet split for jumbo frames
2739 * Using pages when the page size is greater than 16k wastes
2740 * a lot of memory, since we allocate 3 pages at all times
2743 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2744 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2745 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2746 adapter->rx_ps_pages = pages;
2748 adapter->rx_ps_pages = 0;
2750 if (adapter->rx_ps_pages) {
2751 /* Configure extra packet-split registers */
2752 rfctl = er32(RFCTL);
2753 rfctl |= E1000_RFCTL_EXTEN;
2755 * disable packet split support for IPv6 extension headers,
2756 * because some malformed IPv6 headers can hang the Rx
2758 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2759 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2763 /* Enable Packet split descriptors */
2764 rctl |= E1000_RCTL_DTYP_PS;
2766 psrctl |= adapter->rx_ps_bsize0 >>
2767 E1000_PSRCTL_BSIZE0_SHIFT;
2769 switch (adapter->rx_ps_pages) {
2771 psrctl |= PAGE_SIZE <<
2772 E1000_PSRCTL_BSIZE3_SHIFT;
2774 psrctl |= PAGE_SIZE <<
2775 E1000_PSRCTL_BSIZE2_SHIFT;
2777 psrctl |= PAGE_SIZE >>
2778 E1000_PSRCTL_BSIZE1_SHIFT;
2782 ew32(PSRCTL, psrctl);
2786 /* just started the receive unit, no need to restart */
2787 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2791 * e1000_configure_rx - Configure Receive Unit after Reset
2792 * @adapter: board private structure
2794 * Configure the Rx unit of the MAC after a reset.
2796 static void e1000_configure_rx(struct e1000_adapter *adapter)
2798 struct e1000_hw *hw = &adapter->hw;
2799 struct e1000_ring *rx_ring = adapter->rx_ring;
2801 u32 rdlen, rctl, rxcsum, ctrl_ext;
2803 if (adapter->rx_ps_pages) {
2804 /* this is a 32 byte descriptor */
2805 rdlen = rx_ring->count *
2806 sizeof(union e1000_rx_desc_packet_split);
2807 adapter->clean_rx = e1000_clean_rx_irq_ps;
2808 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2809 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2810 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2811 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2812 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2814 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2815 adapter->clean_rx = e1000_clean_rx_irq;
2816 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2819 /* disable receives while setting up the descriptors */
2821 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2825 /* set the Receive Delay Timer Register */
2826 ew32(RDTR, adapter->rx_int_delay);
2828 /* irq moderation */
2829 ew32(RADV, adapter->rx_abs_int_delay);
2830 if (adapter->itr_setting != 0)
2831 ew32(ITR, 1000000000 / (adapter->itr * 256));
2833 ctrl_ext = er32(CTRL_EXT);
2834 /* Auto-Mask interrupts upon ICR access */
2835 ctrl_ext |= E1000_CTRL_EXT_IAME;
2836 ew32(IAM, 0xffffffff);
2837 ew32(CTRL_EXT, ctrl_ext);
2841 * Setup the HW Rx Head and Tail Descriptor Pointers and
2842 * the Base and Length of the Rx Descriptor Ring
2844 rdba = rx_ring->dma;
2845 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
2846 ew32(RDBAH, (rdba >> 32));
2850 rx_ring->head = E1000_RDH;
2851 rx_ring->tail = E1000_RDT;
2853 /* Enable Receive Checksum Offload for TCP and UDP */
2854 rxcsum = er32(RXCSUM);
2855 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2856 rxcsum |= E1000_RXCSUM_TUOFL;
2859 * IPv4 payload checksum for UDP fragments must be
2860 * used in conjunction with packet-split.
2862 if (adapter->rx_ps_pages)
2863 rxcsum |= E1000_RXCSUM_IPPCSE;
2865 rxcsum &= ~E1000_RXCSUM_TUOFL;
2866 /* no need to clear IPPCSE as it defaults to 0 */
2868 ew32(RXCSUM, rxcsum);
2871 * Enable early receives on supported devices, only takes effect when
2872 * packet size is equal or larger than the specified value (in 8 byte
2873 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2875 if (adapter->flags & FLAG_HAS_ERT) {
2876 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2877 u32 rxdctl = er32(RXDCTL(0));
2878 ew32(RXDCTL(0), rxdctl | 0x3);
2879 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2881 * With jumbo frames and early-receive enabled,
2882 * excessive C-state transition latencies result in
2883 * dropped transactions.
2885 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2886 adapter->netdev->name, 55);
2888 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2889 adapter->netdev->name,
2890 PM_QOS_DEFAULT_VALUE);
2894 /* Enable Receives */
2899 * e1000_update_mc_addr_list - Update Multicast addresses
2900 * @hw: pointer to the HW structure
2901 * @mc_addr_list: array of multicast addresses to program
2902 * @mc_addr_count: number of multicast addresses to program
2904 * Updates the Multicast Table Array.
2905 * The caller must have a packed mc_addr_list of multicast addresses.
2907 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2910 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2914 * e1000_set_multi - Multicast and Promiscuous mode set
2915 * @netdev: network interface device structure
2917 * The set_multi entry point is called whenever the multicast address
2918 * list or the network interface flags are updated. This routine is
2919 * responsible for configuring the hardware for proper multicast,
2920 * promiscuous mode, and all-multi behavior.
2922 static void e1000_set_multi(struct net_device *netdev)
2924 struct e1000_adapter *adapter = netdev_priv(netdev);
2925 struct e1000_hw *hw = &adapter->hw;
2926 struct netdev_hw_addr *ha;
2931 /* Check for Promiscuous and All Multicast modes */
2935 if (netdev->flags & IFF_PROMISC) {
2936 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2937 rctl &= ~E1000_RCTL_VFE;
2939 if (netdev->flags & IFF_ALLMULTI) {
2940 rctl |= E1000_RCTL_MPE;
2941 rctl &= ~E1000_RCTL_UPE;
2943 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2945 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
2946 rctl |= E1000_RCTL_VFE;
2951 if (!netdev_mc_empty(netdev)) {
2952 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2956 /* prepare a packed array of only addresses. */
2958 netdev_for_each_mc_addr(ha, netdev)
2959 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2961 e1000_update_mc_addr_list(hw, mta_list, i);
2965 * if we're called from probe, we might not have
2966 * anything to do here, so clear out the list
2968 e1000_update_mc_addr_list(hw, NULL, 0);
2973 * e1000_configure - configure the hardware for Rx and Tx
2974 * @adapter: private board structure
2976 static void e1000_configure(struct e1000_adapter *adapter)
2978 e1000_set_multi(adapter->netdev);
2980 e1000_restore_vlan(adapter);
2981 e1000_init_manageability(adapter);
2983 e1000_configure_tx(adapter);
2984 e1000_setup_rctl(adapter);
2985 e1000_configure_rx(adapter);
2986 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
2990 * e1000e_power_up_phy - restore link in case the phy was powered down
2991 * @adapter: address of board private structure
2993 * The phy may be powered down to save power and turn off link when the
2994 * driver is unloaded and wake on lan is not enabled (among others)
2995 * *** this routine MUST be followed by a call to e1000e_reset ***
2997 void e1000e_power_up_phy(struct e1000_adapter *adapter)
2999 if (adapter->hw.phy.ops.power_up)
3000 adapter->hw.phy.ops.power_up(&adapter->hw);
3002 adapter->hw.mac.ops.setup_link(&adapter->hw);
3006 * e1000_power_down_phy - Power down the PHY
3008 * Power down the PHY so no link is implied when interface is down.
3009 * The PHY cannot be powered down if management or WoL is active.
3011 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3013 /* WoL is enabled */
3017 if (adapter->hw.phy.ops.power_down)
3018 adapter->hw.phy.ops.power_down(&adapter->hw);
3022 * e1000e_reset - bring the hardware into a known good state
3024 * This function boots the hardware and enables some settings that
3025 * require a configuration cycle of the hardware - those cannot be
3026 * set/changed during runtime. After reset the device needs to be
3027 * properly configured for Rx, Tx etc.
3029 void e1000e_reset(struct e1000_adapter *adapter)
3031 struct e1000_mac_info *mac = &adapter->hw.mac;
3032 struct e1000_fc_info *fc = &adapter->hw.fc;
3033 struct e1000_hw *hw = &adapter->hw;
3034 u32 tx_space, min_tx_space, min_rx_space;
3035 u32 pba = adapter->pba;
3038 /* reset Packet Buffer Allocation to default */
3041 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3043 * To maintain wire speed transmits, the Tx FIFO should be
3044 * large enough to accommodate two full transmit packets,
3045 * rounded up to the next 1KB and expressed in KB. Likewise,
3046 * the Rx FIFO should be large enough to accommodate at least
3047 * one full receive packet and is similarly rounded up and
3051 /* upper 16 bits has Tx packet buffer allocation size in KB */
3052 tx_space = pba >> 16;
3053 /* lower 16 bits has Rx packet buffer allocation size in KB */
3056 * the Tx fifo also stores 16 bytes of information about the tx
3057 * but don't include ethernet FCS because hardware appends it
3059 min_tx_space = (adapter->max_frame_size +
3060 sizeof(struct e1000_tx_desc) -
3062 min_tx_space = ALIGN(min_tx_space, 1024);
3063 min_tx_space >>= 10;
3064 /* software strips receive CRC, so leave room for it */
3065 min_rx_space = adapter->max_frame_size;
3066 min_rx_space = ALIGN(min_rx_space, 1024);
3067 min_rx_space >>= 10;
3070 * If current Tx allocation is less than the min Tx FIFO size,
3071 * and the min Tx FIFO size is less than the current Rx FIFO
3072 * allocation, take space away from current Rx allocation
3074 if ((tx_space < min_tx_space) &&
3075 ((min_tx_space - tx_space) < pba)) {
3076 pba -= min_tx_space - tx_space;
3079 * if short on Rx space, Rx wins and must trump tx
3080 * adjustment or use Early Receive if available
3082 if ((pba < min_rx_space) &&
3083 (!(adapter->flags & FLAG_HAS_ERT)))
3084 /* ERT enabled in e1000_configure_rx */
3093 * flow control settings
3095 * The high water mark must be low enough to fit one full frame
3096 * (or the size used for early receive) above it in the Rx FIFO.
3097 * Set it to the lower of:
3098 * - 90% of the Rx FIFO size, and
3099 * - the full Rx FIFO size minus the early receive size (for parts
3100 * with ERT support assuming ERT set to E1000_ERT_2048), or
3101 * - the full Rx FIFO size minus one full frame
3103 if (hw->mac.type == e1000_pchlan) {
3105 * Workaround PCH LOM adapter hangs with certain network
3106 * loads. If hangs persist, try disabling Tx flow control.
3108 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3109 fc->high_water = 0x3500;
3110 fc->low_water = 0x1500;
3112 fc->high_water = 0x5000;
3113 fc->low_water = 0x3000;
3116 if ((adapter->flags & FLAG_HAS_ERT) &&
3117 (adapter->netdev->mtu > ETH_DATA_LEN))
3118 hwm = min(((pba << 10) * 9 / 10),
3119 ((pba << 10) - (E1000_ERT_2048 << 3)));
3121 hwm = min(((pba << 10) * 9 / 10),
3122 ((pba << 10) - adapter->max_frame_size));
3124 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3125 fc->low_water = fc->high_water - 8;
3128 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3129 fc->pause_time = 0xFFFF;
3131 fc->pause_time = E1000_FC_PAUSE_TIME;
3133 fc->current_mode = fc->requested_mode;
3135 /* Allow time for pending master requests to run */
3136 mac->ops.reset_hw(hw);
3139 * For parts with AMT enabled, let the firmware know
3140 * that the network interface is in control
3142 if (adapter->flags & FLAG_HAS_AMT)
3143 e1000_get_hw_control(adapter);
3146 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
3147 e1e_wphy(&adapter->hw, BM_WUC, 0);
3149 if (mac->ops.init_hw(hw))
3150 e_err("Hardware Error\n");
3152 /* additional part of the flow-control workaround above */
3153 if (hw->mac.type == e1000_pchlan)
3154 ew32(FCRTV_PCH, 0x1000);
3156 e1000_update_mng_vlan(adapter);
3158 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3159 ew32(VET, ETH_P_8021Q);
3161 e1000e_reset_adaptive(hw);
3162 e1000_get_phy_info(hw);
3164 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3165 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3168 * speed up time to link by disabling smart power down, ignore
3169 * the return value of this function because there is nothing
3170 * different we would do if it failed
3172 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3173 phy_data &= ~IGP02E1000_PM_SPD;
3174 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3178 int e1000e_up(struct e1000_adapter *adapter)
3180 struct e1000_hw *hw = &adapter->hw;
3182 /* DMA latency requirement to workaround early-receive/jumbo issue */
3183 if (adapter->flags & FLAG_HAS_ERT)
3184 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
3185 adapter->netdev->name,
3186 PM_QOS_DEFAULT_VALUE);
3188 /* hardware has been reset, we need to reload some things */
3189 e1000_configure(adapter);
3191 clear_bit(__E1000_DOWN, &adapter->state);
3193 napi_enable(&adapter->napi);
3194 if (adapter->msix_entries)
3195 e1000_configure_msix(adapter);
3196 e1000_irq_enable(adapter);
3198 netif_wake_queue(adapter->netdev);
3200 /* fire a link change interrupt to start the watchdog */
3201 ew32(ICS, E1000_ICS_LSC);
3205 void e1000e_down(struct e1000_adapter *adapter)
3207 struct net_device *netdev = adapter->netdev;
3208 struct e1000_hw *hw = &adapter->hw;
3212 * signal that we're down so the interrupt handler does not
3213 * reschedule our watchdog timer
3215 set_bit(__E1000_DOWN, &adapter->state);
3217 /* disable receives in the hardware */
3219 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3220 /* flush and sleep below */
3222 netif_stop_queue(netdev);
3224 /* disable transmits in the hardware */
3226 tctl &= ~E1000_TCTL_EN;
3228 /* flush both disables and wait for them to finish */
3232 napi_disable(&adapter->napi);
3233 e1000_irq_disable(adapter);
3235 del_timer_sync(&adapter->watchdog_timer);
3236 del_timer_sync(&adapter->phy_info_timer);
3238 netif_carrier_off(netdev);
3239 adapter->link_speed = 0;
3240 adapter->link_duplex = 0;
3242 if (!pci_channel_offline(adapter->pdev))
3243 e1000e_reset(adapter);
3244 e1000_clean_tx_ring(adapter);
3245 e1000_clean_rx_ring(adapter);
3247 if (adapter->flags & FLAG_HAS_ERT)
3248 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
3249 adapter->netdev->name);
3252 * TODO: for power management, we could drop the link and
3253 * pci_disable_device here.
3257 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3260 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3262 e1000e_down(adapter);
3264 clear_bit(__E1000_RESETTING, &adapter->state);
3268 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3269 * @adapter: board private structure to initialize
3271 * e1000_sw_init initializes the Adapter private data structure.
3272 * Fields are initialized based on PCI device information and
3273 * OS network device settings (MTU size).
3275 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3277 struct net_device *netdev = adapter->netdev;
3279 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3280 adapter->rx_ps_bsize0 = 128;
3281 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3282 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3284 e1000e_set_interrupt_capability(adapter);
3286 if (e1000_alloc_queues(adapter))
3289 /* Explicitly disable IRQ since the NIC can be in any state. */
3290 e1000_irq_disable(adapter);
3292 set_bit(__E1000_DOWN, &adapter->state);
3297 * e1000_intr_msi_test - Interrupt Handler
3298 * @irq: interrupt number
3299 * @data: pointer to a network interface device structure
3301 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3303 struct net_device *netdev = data;
3304 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw;
3306 u32 icr = er32(ICR);
3308 e_dbg("icr is %08X\n", icr);
3309 if (icr & E1000_ICR_RXSEQ) {
3310 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3318 * e1000_test_msi_interrupt - Returns 0 for successful test
3319 * @adapter: board private struct
3321 * code flow taken from tg3.c
3323 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3325 struct net_device *netdev = adapter->netdev;
3326 struct e1000_hw *hw = &adapter->hw;
3329 /* poll_enable hasn't been called yet, so don't need disable */
3330 /* clear any pending events */
3333 /* free the real vector and request a test handler */
3334 e1000_free_irq(adapter);
3335 e1000e_reset_interrupt_capability(adapter);
3337 /* Assume that the test fails, if it succeeds then the test
3338 * MSI irq handler will unset this flag */
3339 adapter->flags |= FLAG_MSI_TEST_FAILED;
3341 err = pci_enable_msi(adapter->pdev);
3343 goto msi_test_failed;
3345 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3346 netdev->name, netdev);
3348 pci_disable_msi(adapter->pdev);
3349 goto msi_test_failed;
3354 e1000_irq_enable(adapter);
3356 /* fire an unusual interrupt on the test handler */
3357 ew32(ICS, E1000_ICS_RXSEQ);
3361 e1000_irq_disable(adapter);
3365 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3366 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3368 e_info("MSI interrupt test failed!\n");
3371 free_irq(adapter->pdev->irq, netdev);
3372 pci_disable_msi(adapter->pdev);
3375 goto msi_test_failed;
3377 /* okay so the test worked, restore settings */
3378 e_dbg("MSI interrupt test succeeded!\n");
3380 e1000e_set_interrupt_capability(adapter);
3381 e1000_request_irq(adapter);
3386 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3387 * @adapter: board private struct
3389 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3391 static int e1000_test_msi(struct e1000_adapter *adapter)
3396 if (!(adapter->flags & FLAG_MSI_ENABLED))
3399 /* disable SERR in case the MSI write causes a master abort */
3400 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3401 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3402 pci_cmd & ~PCI_COMMAND_SERR);
3404 err = e1000_test_msi_interrupt(adapter);
3406 /* restore previous setting of command word */
3407 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3413 /* EIO means MSI test failed */
3417 /* back to INTx mode */
3418 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3420 e1000_free_irq(adapter);
3422 err = e1000_request_irq(adapter);
3428 * e1000_open - Called when a network interface is made active
3429 * @netdev: network interface device structure
3431 * Returns 0 on success, negative value on failure
3433 * The open entry point is called when a network interface is made
3434 * active by the system (IFF_UP). At this point all resources needed
3435 * for transmit and receive operations are allocated, the interrupt
3436 * handler is registered with the OS, the watchdog timer is started,
3437 * and the stack is notified that the interface is ready.
3439 static int e1000_open(struct net_device *netdev)
3441 struct e1000_adapter *adapter = netdev_priv(netdev);
3442 struct e1000_hw *hw = &adapter->hw;
3443 struct pci_dev *pdev = adapter->pdev;
3446 /* disallow open during test */
3447 if (test_bit(__E1000_TESTING, &adapter->state))
3450 pm_runtime_get_sync(&pdev->dev);
3452 netif_carrier_off(netdev);
3454 /* allocate transmit descriptors */
3455 err = e1000e_setup_tx_resources(adapter);
3459 /* allocate receive descriptors */
3460 err = e1000e_setup_rx_resources(adapter);
3464 e1000e_power_up_phy(adapter);
3466 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3467 if ((adapter->hw.mng_cookie.status &
3468 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3469 e1000_update_mng_vlan(adapter);
3472 * If AMT is enabled, let the firmware know that the network
3473 * interface is now open
3475 if (adapter->flags & FLAG_HAS_AMT)
3476 e1000_get_hw_control(adapter);
3479 * before we allocate an interrupt, we must be ready to handle it.
3480 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3481 * as soon as we call pci_request_irq, so we have to setup our
3482 * clean_rx handler before we do so.
3484 e1000_configure(adapter);
3486 err = e1000_request_irq(adapter);
3491 * Work around PCIe errata with MSI interrupts causing some chipsets to
3492 * ignore e1000e MSI messages, which means we need to test our MSI
3495 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3496 err = e1000_test_msi(adapter);
3498 e_err("Interrupt allocation failed\n");
3503 /* From here on the code is the same as e1000e_up() */
3504 clear_bit(__E1000_DOWN, &adapter->state);
3506 napi_enable(&adapter->napi);
3508 e1000_irq_enable(adapter);
3510 netif_start_queue(netdev);
3512 adapter->idle_check = true;
3513 pm_runtime_put(&pdev->dev);
3515 /* fire a link status change interrupt to start the watchdog */
3516 ew32(ICS, E1000_ICS_LSC);
3521 e1000_release_hw_control(adapter);
3522 e1000_power_down_phy(adapter);
3523 e1000e_free_rx_resources(adapter);
3525 e1000e_free_tx_resources(adapter);
3527 e1000e_reset(adapter);
3528 pm_runtime_put_sync(&pdev->dev);
3534 * e1000_close - Disables a network interface
3535 * @netdev: network interface device structure
3537 * Returns 0, this is not allowed to fail
3539 * The close entry point is called when an interface is de-activated
3540 * by the OS. The hardware is still under the drivers control, but
3541 * needs to be disabled. A global MAC reset is issued to stop the
3542 * hardware, and all transmit and receive resources are freed.
3544 static int e1000_close(struct net_device *netdev)
3546 struct e1000_adapter *adapter = netdev_priv(netdev);
3547 struct pci_dev *pdev = adapter->pdev;
3549 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3551 pm_runtime_get_sync(&pdev->dev);
3553 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3554 e1000e_down(adapter);
3555 e1000_free_irq(adapter);
3557 e1000_power_down_phy(adapter);
3559 e1000e_free_tx_resources(adapter);
3560 e1000e_free_rx_resources(adapter);
3563 * kill manageability vlan ID if supported, but not if a vlan with
3564 * the same ID is registered on the host OS (let 8021q kill it)
3566 if ((adapter->hw.mng_cookie.status &
3567 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3569 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3570 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3573 * If AMT is enabled, let the firmware know that the network
3574 * interface is now closed
3576 if (adapter->flags & FLAG_HAS_AMT)
3577 e1000_release_hw_control(adapter);
3579 pm_runtime_put_sync(&pdev->dev);
3584 * e1000_set_mac - Change the Ethernet Address of the NIC
3585 * @netdev: network interface device structure
3586 * @p: pointer to an address structure
3588 * Returns 0 on success, negative on failure
3590 static int e1000_set_mac(struct net_device *netdev, void *p)
3592 struct e1000_adapter *adapter = netdev_priv(netdev);
3593 struct sockaddr *addr = p;
3595 if (!is_valid_ether_addr(addr->sa_data))
3596 return -EADDRNOTAVAIL;
3598 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3599 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3601 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3603 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3604 /* activate the work around */
3605 e1000e_set_laa_state_82571(&adapter->hw, 1);
3608 * Hold a copy of the LAA in RAR[14] This is done so that
3609 * between the time RAR[0] gets clobbered and the time it
3610 * gets fixed (in e1000_watchdog), the actual LAA is in one
3611 * of the RARs and no incoming packets directed to this port
3612 * are dropped. Eventually the LAA will be in RAR[0] and
3615 e1000e_rar_set(&adapter->hw,
3616 adapter->hw.mac.addr,
3617 adapter->hw.mac.rar_entry_count - 1);
3624 * e1000e_update_phy_task - work thread to update phy
3625 * @work: pointer to our work struct
3627 * this worker thread exists because we must acquire a
3628 * semaphore to read the phy, which we could msleep while
3629 * waiting for it, and we can't msleep in a timer.
3631 static void e1000e_update_phy_task(struct work_struct *work)
3633 struct e1000_adapter *adapter = container_of(work,
3634 struct e1000_adapter, update_phy_task);
3635 e1000_get_phy_info(&adapter->hw);
3639 * Need to wait a few seconds after link up to get diagnostic information from
3642 static void e1000_update_phy_info(unsigned long data)
3644 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3645 schedule_work(&adapter->update_phy_task);
3649 * e1000e_update_stats - Update the board statistics counters
3650 * @adapter: board private structure
3652 void e1000e_update_stats(struct e1000_adapter *adapter)
3654 struct net_device *netdev = adapter->netdev;
3655 struct e1000_hw *hw = &adapter->hw;
3656 struct pci_dev *pdev = adapter->pdev;
3660 * Prevent stats update while adapter is being reset, or if the pci
3661 * connection is down.
3663 if (adapter->link_speed == 0)
3665 if (pci_channel_offline(pdev))
3668 adapter->stats.crcerrs += er32(CRCERRS);
3669 adapter->stats.gprc += er32(GPRC);
3670 adapter->stats.gorc += er32(GORCL);
3671 er32(GORCH); /* Clear gorc */
3672 adapter->stats.bprc += er32(BPRC);
3673 adapter->stats.mprc += er32(MPRC);
3674 adapter->stats.roc += er32(ROC);
3676 adapter->stats.mpc += er32(MPC);
3677 if ((hw->phy.type == e1000_phy_82578) ||
3678 (hw->phy.type == e1000_phy_82577)) {
3679 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3680 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3681 adapter->stats.scc += phy_data;
3683 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3684 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3685 adapter->stats.ecol += phy_data;
3687 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3688 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3689 adapter->stats.mcc += phy_data;
3691 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3692 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3693 adapter->stats.latecol += phy_data;
3695 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3696 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3697 adapter->stats.dc += phy_data;
3699 adapter->stats.scc += er32(SCC);
3700 adapter->stats.ecol += er32(ECOL);
3701 adapter->stats.mcc += er32(MCC);
3702 adapter->stats.latecol += er32(LATECOL);
3703 adapter->stats.dc += er32(DC);
3705 adapter->stats.xonrxc += er32(XONRXC);
3706 adapter->stats.xontxc += er32(XONTXC);
3707 adapter->stats.xoffrxc += er32(XOFFRXC);
3708 adapter->stats.xofftxc += er32(XOFFTXC);
3709 adapter->stats.gptc += er32(GPTC);
3710 adapter->stats.gotc += er32(GOTCL);
3711 er32(GOTCH); /* Clear gotc */
3712 adapter->stats.rnbc += er32(RNBC);
3713 adapter->stats.ruc += er32(RUC);
3715 adapter->stats.mptc += er32(MPTC);
3716 adapter->stats.bptc += er32(BPTC);
3718 /* used for adaptive IFS */
3720 hw->mac.tx_packet_delta = er32(TPT);
3721 adapter->stats.tpt += hw->mac.tx_packet_delta;
3722 if ((hw->phy.type == e1000_phy_82578) ||
3723 (hw->phy.type == e1000_phy_82577)) {
3724 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3725 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3726 hw->mac.collision_delta = phy_data;
3728 hw->mac.collision_delta = er32(COLC);
3730 adapter->stats.colc += hw->mac.collision_delta;
3732 adapter->stats.algnerrc += er32(ALGNERRC);
3733 adapter->stats.rxerrc += er32(RXERRC);
3734 if ((hw->phy.type == e1000_phy_82578) ||
3735 (hw->phy.type == e1000_phy_82577)) {
3736 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3737 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3738 adapter->stats.tncrs += phy_data;
3740 if ((hw->mac.type != e1000_82574) &&
3741 (hw->mac.type != e1000_82583))
3742 adapter->stats.tncrs += er32(TNCRS);
3744 adapter->stats.cexterr += er32(CEXTERR);
3745 adapter->stats.tsctc += er32(TSCTC);
3746 adapter->stats.tsctfc += er32(TSCTFC);
3748 /* Fill out the OS statistics structure */
3749 netdev->stats.multicast = adapter->stats.mprc;
3750 netdev->stats.collisions = adapter->stats.colc;
3755 * RLEC on some newer hardware can be incorrect so build
3756 * our own version based on RUC and ROC
3758 netdev->stats.rx_errors = adapter->stats.rxerrc +
3759 adapter->stats.crcerrs + adapter->stats.algnerrc +
3760 adapter->stats.ruc + adapter->stats.roc +
3761 adapter->stats.cexterr;
3762 netdev->stats.rx_length_errors = adapter->stats.ruc +
3764 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3765 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3766 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3769 netdev->stats.tx_errors = adapter->stats.ecol +
3770 adapter->stats.latecol;
3771 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3772 netdev->stats.tx_window_errors = adapter->stats.latecol;
3773 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3775 /* Tx Dropped needs to be maintained elsewhere */
3777 /* Management Stats */
3778 adapter->stats.mgptc += er32(MGTPTC);
3779 adapter->stats.mgprc += er32(MGTPRC);
3780 adapter->stats.mgpdc += er32(MGTPDC);
3784 * e1000_phy_read_status - Update the PHY register status snapshot
3785 * @adapter: board private structure
3787 static void e1000_phy_read_status(struct e1000_adapter *adapter)
3789 struct e1000_hw *hw = &adapter->hw;
3790 struct e1000_phy_regs *phy = &adapter->phy_regs;
3793 if ((er32(STATUS) & E1000_STATUS_LU) &&
3794 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3795 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3796 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3797 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3798 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3799 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3800 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3801 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3802 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3804 e_warn("Error reading PHY register\n");
3807 * Do not read PHY registers if link is not up
3808 * Set values to typical power-on defaults
3810 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3811 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3812 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3814 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3815 ADVERTISE_ALL | ADVERTISE_CSMA);
3817 phy->expansion = EXPANSION_ENABLENPAGE;
3818 phy->ctrl1000 = ADVERTISE_1000FULL;
3820 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3824 static void e1000_print_link_info(struct e1000_adapter *adapter)
3826 struct e1000_hw *hw = &adapter->hw;
3827 u32 ctrl = er32(CTRL);
3829 /* Link status message must follow this format for user tools */
3830 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3831 "Flow Control: %s\n",
3832 adapter->netdev->name,
3833 adapter->link_speed,
3834 (adapter->link_duplex == FULL_DUPLEX) ?
3835 "Full Duplex" : "Half Duplex",
3836 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3838 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3839 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3842 bool e1000e_has_link(struct e1000_adapter *adapter)
3844 struct e1000_hw *hw = &adapter->hw;
3845 bool link_active = 0;
3849 * get_link_status is set on LSC (link status) interrupt or
3850 * Rx sequence error interrupt. get_link_status will stay
3851 * false until the check_for_link establishes link
3852 * for copper adapters ONLY
3854 switch (hw->phy.media_type) {
3855 case e1000_media_type_copper:
3856 if (hw->mac.get_link_status) {
3857 ret_val = hw->mac.ops.check_for_link(hw);
3858 link_active = !hw->mac.get_link_status;
3863 case e1000_media_type_fiber:
3864 ret_val = hw->mac.ops.check_for_link(hw);
3865 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
3867 case e1000_media_type_internal_serdes:
3868 ret_val = hw->mac.ops.check_for_link(hw);
3869 link_active = adapter->hw.mac.serdes_has_link;
3872 case e1000_media_type_unknown:
3876 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
3877 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
3878 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
3879 e_info("Gigabit has been disabled, downgrading speed\n");
3885 static void e1000e_enable_receives(struct e1000_adapter *adapter)
3887 /* make sure the receive unit is started */
3888 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
3889 (adapter->flags & FLAG_RX_RESTART_NOW)) {
3890 struct e1000_hw *hw = &adapter->hw;
3891 u32 rctl = er32(RCTL);
3892 ew32(RCTL, rctl | E1000_RCTL_EN);
3893 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3898 * e1000_watchdog - Timer Call-back
3899 * @data: pointer to adapter cast into an unsigned long
3901 static void e1000_watchdog(unsigned long data)
3903 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3905 /* Do the rest outside of interrupt context */
3906 schedule_work(&adapter->watchdog_task);
3908 /* TODO: make this use queue_delayed_work() */
3911 static void e1000_watchdog_task(struct work_struct *work)
3913 struct e1000_adapter *adapter = container_of(work,
3914 struct e1000_adapter, watchdog_task);
3915 struct net_device *netdev = adapter->netdev;
3916 struct e1000_mac_info *mac = &adapter->hw.mac;
3917 struct e1000_phy_info *phy = &adapter->hw.phy;
3918 struct e1000_ring *tx_ring = adapter->tx_ring;
3919 struct e1000_hw *hw = &adapter->hw;
3923 link = e1000e_has_link(adapter);
3924 if ((netif_carrier_ok(netdev)) && link) {
3925 /* Cancel scheduled suspend requests. */
3926 pm_runtime_resume(netdev->dev.parent);
3928 e1000e_enable_receives(adapter);
3932 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
3933 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
3934 e1000_update_mng_vlan(adapter);
3937 if (!netif_carrier_ok(netdev)) {
3940 /* Cancel scheduled suspend requests. */
3941 pm_runtime_resume(netdev->dev.parent);
3943 /* update snapshot of PHY registers on LSC */
3944 e1000_phy_read_status(adapter);
3945 mac->ops.get_link_up_info(&adapter->hw,
3946 &adapter->link_speed,
3947 &adapter->link_duplex);
3948 e1000_print_link_info(adapter);
3950 * On supported PHYs, check for duplex mismatch only
3951 * if link has autonegotiated at 10/100 half
3953 if ((hw->phy.type == e1000_phy_igp_3 ||
3954 hw->phy.type == e1000_phy_bm) &&
3955 (hw->mac.autoneg == true) &&
3956 (adapter->link_speed == SPEED_10 ||
3957 adapter->link_speed == SPEED_100) &&
3958 (adapter->link_duplex == HALF_DUPLEX)) {
3961 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3963 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3964 e_info("Autonegotiated half duplex but"
3965 " link partner cannot autoneg. "
3966 " Try forcing full duplex if "
3967 "link gets many collisions.\n");
3970 /* adjust timeout factor according to speed/duplex */
3971 adapter->tx_timeout_factor = 1;
3972 switch (adapter->link_speed) {
3975 adapter->tx_timeout_factor = 16;
3979 adapter->tx_timeout_factor = 10;
3984 * workaround: re-program speed mode bit after
3987 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
3990 tarc0 = er32(TARC(0));
3991 tarc0 &= ~SPEED_MODE_BIT;
3992 ew32(TARC(0), tarc0);
3996 * disable TSO for pcie and 10/100 speeds, to avoid
3997 * some hardware issues
3999 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4000 switch (adapter->link_speed) {
4003 e_info("10/100 speed: disabling TSO\n");
4004 netdev->features &= ~NETIF_F_TSO;
4005 netdev->features &= ~NETIF_F_TSO6;
4008 netdev->features |= NETIF_F_TSO;
4009 netdev->features |= NETIF_F_TSO6;
4018 * enable transmits in the hardware, need to do this
4019 * after setting TARC(0)
4022 tctl |= E1000_TCTL_EN;
4026 * Perform any post-link-up configuration before
4027 * reporting link up.
4029 if (phy->ops.cfg_on_link_up)
4030 phy->ops.cfg_on_link_up(hw);
4032 netif_carrier_on(netdev);
4034 if (!test_bit(__E1000_DOWN, &adapter->state))
4035 mod_timer(&adapter->phy_info_timer,
4036 round_jiffies(jiffies + 2 * HZ));
4039 if (netif_carrier_ok(netdev)) {
4040 adapter->link_speed = 0;
4041 adapter->link_duplex = 0;
4042 /* Link status message must follow this format */
4043 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4044 adapter->netdev->name);
4045 netif_carrier_off(netdev);
4046 if (!test_bit(__E1000_DOWN, &adapter->state))
4047 mod_timer(&adapter->phy_info_timer,
4048 round_jiffies(jiffies + 2 * HZ));
4050 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4051 schedule_work(&adapter->reset_task);
4053 pm_schedule_suspend(netdev->dev.parent,
4059 e1000e_update_stats(adapter);
4061 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4062 adapter->tpt_old = adapter->stats.tpt;
4063 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4064 adapter->colc_old = adapter->stats.colc;
4066 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4067 adapter->gorc_old = adapter->stats.gorc;
4068 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4069 adapter->gotc_old = adapter->stats.gotc;
4071 e1000e_update_adaptive(&adapter->hw);
4073 if (!netif_carrier_ok(netdev)) {
4074 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
4078 * We've lost link, so the controller stops DMA,
4079 * but we've got queued Tx work that's never going
4080 * to get done, so reset controller to flush Tx.
4081 * (Do the reset outside of interrupt context).
4083 adapter->tx_timeout_count++;
4084 schedule_work(&adapter->reset_task);
4085 /* return immediately since reset is imminent */
4090 /* Cause software interrupt to ensure Rx ring is cleaned */
4091 if (adapter->msix_entries)
4092 ew32(ICS, adapter->rx_ring->ims_val);
4094 ew32(ICS, E1000_ICS_RXDMT0);
4096 /* Force detection of hung controller every watchdog period */
4097 adapter->detect_tx_hung = 1;
4100 * With 82571 controllers, LAA may be overwritten due to controller
4101 * reset from the other port. Set the appropriate LAA in RAR[0]
4103 if (e1000e_get_laa_state_82571(hw))
4104 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4106 /* Reset the timer */
4107 if (!test_bit(__E1000_DOWN, &adapter->state))
4108 mod_timer(&adapter->watchdog_timer,
4109 round_jiffies(jiffies + 2 * HZ));
4112 #define E1000_TX_FLAGS_CSUM 0x00000001
4113 #define E1000_TX_FLAGS_VLAN 0x00000002
4114 #define E1000_TX_FLAGS_TSO 0x00000004
4115 #define E1000_TX_FLAGS_IPV4 0x00000008
4116 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4117 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4119 static int e1000_tso(struct e1000_adapter *adapter,
4120 struct sk_buff *skb)
4122 struct e1000_ring *tx_ring = adapter->tx_ring;
4123 struct e1000_context_desc *context_desc;
4124 struct e1000_buffer *buffer_info;
4127 u16 ipcse = 0, tucse, mss;
4128 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4131 if (!skb_is_gso(skb))
4134 if (skb_header_cloned(skb)) {
4135 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4140 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4141 mss = skb_shinfo(skb)->gso_size;
4142 if (skb->protocol == htons(ETH_P_IP)) {
4143 struct iphdr *iph = ip_hdr(skb);
4146 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4148 cmd_length = E1000_TXD_CMD_IP;
4149 ipcse = skb_transport_offset(skb) - 1;
4150 } else if (skb_is_gso_v6(skb)) {
4151 ipv6_hdr(skb)->payload_len = 0;
4152 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4153 &ipv6_hdr(skb)->daddr,
4157 ipcss = skb_network_offset(skb);
4158 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4159 tucss = skb_transport_offset(skb);
4160 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4163 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4164 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4166 i = tx_ring->next_to_use;
4167 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4168 buffer_info = &tx_ring->buffer_info[i];
4170 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4171 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4172 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4173 context_desc->upper_setup.tcp_fields.tucss = tucss;
4174 context_desc->upper_setup.tcp_fields.tucso = tucso;
4175 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4176 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4177 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4178 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4180 buffer_info->time_stamp = jiffies;
4181 buffer_info->next_to_watch = i;
4184 if (i == tx_ring->count)
4186 tx_ring->next_to_use = i;
4191 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4193 struct e1000_ring *tx_ring = adapter->tx_ring;
4194 struct e1000_context_desc *context_desc;
4195 struct e1000_buffer *buffer_info;
4198 u32 cmd_len = E1000_TXD_CMD_DEXT;
4201 if (skb->ip_summed != CHECKSUM_PARTIAL)
4204 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4205 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4207 protocol = skb->protocol;
4210 case cpu_to_be16(ETH_P_IP):
4211 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4212 cmd_len |= E1000_TXD_CMD_TCP;
4214 case cpu_to_be16(ETH_P_IPV6):
4215 /* XXX not handling all IPV6 headers */
4216 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4217 cmd_len |= E1000_TXD_CMD_TCP;
4220 if (unlikely(net_ratelimit()))
4221 e_warn("checksum_partial proto=%x!\n",
4222 be16_to_cpu(protocol));
4226 css = skb_transport_offset(skb);
4228 i = tx_ring->next_to_use;
4229 buffer_info = &tx_ring->buffer_info[i];
4230 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4232 context_desc->lower_setup.ip_config = 0;
4233 context_desc->upper_setup.tcp_fields.tucss = css;
4234 context_desc->upper_setup.tcp_fields.tucso =
4235 css + skb->csum_offset;
4236 context_desc->upper_setup.tcp_fields.tucse = 0;
4237 context_desc->tcp_seg_setup.data = 0;
4238 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4240 buffer_info->time_stamp = jiffies;
4241 buffer_info->next_to_watch = i;
4244 if (i == tx_ring->count)
4246 tx_ring->next_to_use = i;
4251 #define E1000_MAX_PER_TXD 8192
4252 #define E1000_MAX_TXD_PWR 12
4254 static int e1000_tx_map(struct e1000_adapter *adapter,
4255 struct sk_buff *skb, unsigned int first,
4256 unsigned int max_per_txd, unsigned int nr_frags,
4259 struct e1000_ring *tx_ring = adapter->tx_ring;
4260 struct pci_dev *pdev = adapter->pdev;
4261 struct e1000_buffer *buffer_info;
4262 unsigned int len = skb_headlen(skb);
4263 unsigned int offset = 0, size, count = 0, i;
4266 i = tx_ring->next_to_use;
4269 buffer_info = &tx_ring->buffer_info[i];
4270 size = min(len, max_per_txd);
4272 buffer_info->length = size;
4273 buffer_info->time_stamp = jiffies;
4274 buffer_info->next_to_watch = i;
4275 buffer_info->dma = dma_map_single(&pdev->dev,
4277 size, DMA_TO_DEVICE);
4278 buffer_info->mapped_as_page = false;
4279 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4288 if (i == tx_ring->count)
4293 for (f = 0; f < nr_frags; f++) {
4294 struct skb_frag_struct *frag;
4296 frag = &skb_shinfo(skb)->frags[f];
4298 offset = frag->page_offset;
4302 if (i == tx_ring->count)
4305 buffer_info = &tx_ring->buffer_info[i];
4306 size = min(len, max_per_txd);
4308 buffer_info->length = size;
4309 buffer_info->time_stamp = jiffies;
4310 buffer_info->next_to_watch = i;
4311 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
4314 buffer_info->mapped_as_page = true;
4315 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4324 tx_ring->buffer_info[i].skb = skb;
4325 tx_ring->buffer_info[first].next_to_watch = i;
4330 dev_err(&pdev->dev, "TX DMA map failed\n");
4331 buffer_info->dma = 0;
4337 i += tx_ring->count;
4339 buffer_info = &tx_ring->buffer_info[i];
4340 e1000_put_txbuf(adapter, buffer_info);;
4346 static void e1000_tx_queue(struct e1000_adapter *adapter,
4347 int tx_flags, int count)
4349 struct e1000_ring *tx_ring = adapter->tx_ring;
4350 struct e1000_tx_desc *tx_desc = NULL;
4351 struct e1000_buffer *buffer_info;
4352 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4355 if (tx_flags & E1000_TX_FLAGS_TSO) {
4356 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4358 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4360 if (tx_flags & E1000_TX_FLAGS_IPV4)
4361 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4364 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4365 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4366 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4369 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4370 txd_lower |= E1000_TXD_CMD_VLE;
4371 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4374 i = tx_ring->next_to_use;
4377 buffer_info = &tx_ring->buffer_info[i];
4378 tx_desc = E1000_TX_DESC(*tx_ring, i);
4379 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4380 tx_desc->lower.data =
4381 cpu_to_le32(txd_lower | buffer_info->length);
4382 tx_desc->upper.data = cpu_to_le32(txd_upper);
4385 if (i == tx_ring->count)
4389 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4392 * Force memory writes to complete before letting h/w
4393 * know there are new descriptors to fetch. (Only
4394 * applicable for weak-ordered memory model archs,
4399 tx_ring->next_to_use = i;
4400 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4402 * we need this if more than one processor can write to our tail
4403 * at a time, it synchronizes IO on IA64/Altix systems
4408 #define MINIMUM_DHCP_PACKET_SIZE 282
4409 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4410 struct sk_buff *skb)
4412 struct e1000_hw *hw = &adapter->hw;
4415 if (vlan_tx_tag_present(skb)) {
4416 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4417 (adapter->hw.mng_cookie.status &
4418 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4422 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4425 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4429 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4432 if (ip->protocol != IPPROTO_UDP)
4435 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4436 if (ntohs(udp->dest) != 67)
4439 offset = (u8 *)udp + 8 - skb->data;
4440 length = skb->len - offset;
4441 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4447 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4449 struct e1000_adapter *adapter = netdev_priv(netdev);
4451 netif_stop_queue(netdev);
4453 * Herbert's original patch had:
4454 * smp_mb__after_netif_stop_queue();
4455 * but since that doesn't exist yet, just open code it.
4460 * We need to check again in a case another CPU has just
4461 * made room available.
4463 if (e1000_desc_unused(adapter->tx_ring) < size)
4467 netif_start_queue(netdev);
4468 ++adapter->restart_queue;
4472 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4474 struct e1000_adapter *adapter = netdev_priv(netdev);
4476 if (e1000_desc_unused(adapter->tx_ring) >= size)
4478 return __e1000_maybe_stop_tx(netdev, size);
4481 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4482 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4483 struct net_device *netdev)
4485 struct e1000_adapter *adapter = netdev_priv(netdev);
4486 struct e1000_ring *tx_ring = adapter->tx_ring;
4488 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4489 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4490 unsigned int tx_flags = 0;
4491 unsigned int len = skb_headlen(skb);
4492 unsigned int nr_frags;
4498 if (test_bit(__E1000_DOWN, &adapter->state)) {
4499 dev_kfree_skb_any(skb);
4500 return NETDEV_TX_OK;
4503 if (skb->len <= 0) {
4504 dev_kfree_skb_any(skb);
4505 return NETDEV_TX_OK;
4508 mss = skb_shinfo(skb)->gso_size;
4510 * The controller does a simple calculation to
4511 * make sure there is enough room in the FIFO before
4512 * initiating the DMA for each buffer. The calc is:
4513 * 4 = ceil(buffer len/mss). To make sure we don't
4514 * overrun the FIFO, adjust the max buffer len if mss
4519 max_per_txd = min(mss << 2, max_per_txd);
4520 max_txd_pwr = fls(max_per_txd) - 1;
4523 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4524 * points to just header, pull a few bytes of payload from
4525 * frags into skb->data
4527 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4529 * we do this workaround for ES2LAN, but it is un-necessary,
4530 * avoiding it could save a lot of cycles
4532 if (skb->data_len && (hdr_len == len)) {
4533 unsigned int pull_size;
4535 pull_size = min((unsigned int)4, skb->data_len);
4536 if (!__pskb_pull_tail(skb, pull_size)) {
4537 e_err("__pskb_pull_tail failed.\n");
4538 dev_kfree_skb_any(skb);
4539 return NETDEV_TX_OK;
4541 len = skb_headlen(skb);
4545 /* reserve a descriptor for the offload context */
4546 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4550 count += TXD_USE_COUNT(len, max_txd_pwr);
4552 nr_frags = skb_shinfo(skb)->nr_frags;
4553 for (f = 0; f < nr_frags; f++)
4554 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4557 if (adapter->hw.mac.tx_pkt_filtering)
4558 e1000_transfer_dhcp_info(adapter, skb);
4561 * need: count + 2 desc gap to keep tail from touching
4562 * head, otherwise try next time
4564 if (e1000_maybe_stop_tx(netdev, count + 2))
4565 return NETDEV_TX_BUSY;
4567 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4568 tx_flags |= E1000_TX_FLAGS_VLAN;
4569 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4572 first = tx_ring->next_to_use;
4574 tso = e1000_tso(adapter, skb);
4576 dev_kfree_skb_any(skb);
4577 return NETDEV_TX_OK;
4581 tx_flags |= E1000_TX_FLAGS_TSO;
4582 else if (e1000_tx_csum(adapter, skb))
4583 tx_flags |= E1000_TX_FLAGS_CSUM;
4586 * Old method was to assume IPv4 packet by default if TSO was enabled.
4587 * 82571 hardware supports TSO capabilities for IPv6 as well...
4588 * no longer assume, we must.
4590 if (skb->protocol == htons(ETH_P_IP))
4591 tx_flags |= E1000_TX_FLAGS_IPV4;
4593 /* if count is 0 then mapping error has occured */
4594 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4596 e1000_tx_queue(adapter, tx_flags, count);
4597 /* Make sure there is space in the ring for the next send. */
4598 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4601 dev_kfree_skb_any(skb);
4602 tx_ring->buffer_info[first].time_stamp = 0;
4603 tx_ring->next_to_use = first;
4606 return NETDEV_TX_OK;
4610 * e1000_tx_timeout - Respond to a Tx Hang
4611 * @netdev: network interface device structure
4613 static void e1000_tx_timeout(struct net_device *netdev)
4615 struct e1000_adapter *adapter = netdev_priv(netdev);
4617 /* Do the reset outside of interrupt context */
4618 adapter->tx_timeout_count++;
4619 schedule_work(&adapter->reset_task);
4622 static void e1000_reset_task(struct work_struct *work)
4624 struct e1000_adapter *adapter;
4625 adapter = container_of(work, struct e1000_adapter, reset_task);
4627 e1000e_dump(adapter);
4628 e_err("Reset adapter\n");
4629 e1000e_reinit_locked(adapter);
4633 * e1000_get_stats - Get System Network Statistics
4634 * @netdev: network interface device structure
4636 * Returns the address of the device statistics structure.
4637 * The statistics are actually updated from the timer callback.
4639 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4641 /* only return the current stats */
4642 return &netdev->stats;
4646 * e1000_change_mtu - Change the Maximum Transfer Unit
4647 * @netdev: network interface device structure
4648 * @new_mtu: new value for maximum frame size
4650 * Returns 0 on success, negative on failure
4652 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4654 struct e1000_adapter *adapter = netdev_priv(netdev);
4655 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4657 /* Jumbo frame support */
4658 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4659 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4660 e_err("Jumbo Frames not supported.\n");
4664 /* Supported frame sizes */
4665 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4666 (max_frame > adapter->max_hw_frame_size)) {
4667 e_err("Unsupported MTU setting\n");
4671 /* 82573 Errata 17 */
4672 if (((adapter->hw.mac.type == e1000_82573) ||
4673 (adapter->hw.mac.type == e1000_82574)) &&
4674 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4675 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4676 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4679 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4681 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4682 adapter->max_frame_size = max_frame;
4683 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4684 netdev->mtu = new_mtu;
4685 if (netif_running(netdev))
4686 e1000e_down(adapter);
4689 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4690 * means we reserve 2 more, this pushes us to allocate from the next
4692 * i.e. RXBUFFER_2048 --> size-4096 slab
4693 * However with the new *_jumbo_rx* routines, jumbo receives will use
4697 if (max_frame <= 2048)
4698 adapter->rx_buffer_len = 2048;
4700 adapter->rx_buffer_len = 4096;
4702 /* adjust allocation if LPE protects us, and we aren't using SBP */
4703 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4704 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4705 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4708 if (netif_running(netdev))
4711 e1000e_reset(adapter);
4713 clear_bit(__E1000_RESETTING, &adapter->state);
4718 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4721 struct e1000_adapter *adapter = netdev_priv(netdev);
4722 struct mii_ioctl_data *data = if_mii(ifr);
4724 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4729 data->phy_id = adapter->hw.phy.addr;
4732 e1000_phy_read_status(adapter);
4734 switch (data->reg_num & 0x1F) {
4736 data->val_out = adapter->phy_regs.bmcr;
4739 data->val_out = adapter->phy_regs.bmsr;
4742 data->val_out = (adapter->hw.phy.id >> 16);
4745 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4748 data->val_out = adapter->phy_regs.advertise;
4751 data->val_out = adapter->phy_regs.lpa;
4754 data->val_out = adapter->phy_regs.expansion;
4757 data->val_out = adapter->phy_regs.ctrl1000;
4760 data->val_out = adapter->phy_regs.stat1000;
4763 data->val_out = adapter->phy_regs.estatus;
4776 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4782 return e1000_mii_ioctl(netdev, ifr, cmd);
4788 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4790 struct e1000_hw *hw = &adapter->hw;
4795 /* copy MAC RARs to PHY RARs */
4796 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4797 mac_reg = er32(RAL(i));
4798 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4799 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4800 mac_reg = er32(RAH(i));
4801 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4802 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4805 /* copy MAC MTA to PHY MTA */
4806 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4807 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4808 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4809 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4812 /* configure PHY Rx Control register */
4813 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4814 mac_reg = er32(RCTL);
4815 if (mac_reg & E1000_RCTL_UPE)
4816 phy_reg |= BM_RCTL_UPE;
4817 if (mac_reg & E1000_RCTL_MPE)
4818 phy_reg |= BM_RCTL_MPE;
4819 phy_reg &= ~(BM_RCTL_MO_MASK);
4820 if (mac_reg & E1000_RCTL_MO_3)
4821 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4822 << BM_RCTL_MO_SHIFT);
4823 if (mac_reg & E1000_RCTL_BAM)
4824 phy_reg |= BM_RCTL_BAM;
4825 if (mac_reg & E1000_RCTL_PMCF)
4826 phy_reg |= BM_RCTL_PMCF;
4827 mac_reg = er32(CTRL);
4828 if (mac_reg & E1000_CTRL_RFCE)
4829 phy_reg |= BM_RCTL_RFCE;
4830 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4832 /* enable PHY wakeup in MAC register */
4834 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4836 /* configure and enable PHY wakeup in PHY registers */
4837 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4838 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4840 /* activate PHY wakeup */
4841 retval = hw->phy.ops.acquire(hw);
4843 e_err("Could not acquire PHY\n");
4846 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4847 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4848 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
4850 e_err("Could not read PHY page 769\n");
4853 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4854 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
4856 e_err("Could not set PHY Host Wakeup bit\n");
4858 hw->phy.ops.release(hw);
4863 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
4866 struct net_device *netdev = pci_get_drvdata(pdev);
4867 struct e1000_adapter *adapter = netdev_priv(netdev);
4868 struct e1000_hw *hw = &adapter->hw;
4869 u32 ctrl, ctrl_ext, rctl, status;
4870 /* Runtime suspend should only enable wakeup for link changes */
4871 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
4874 netif_device_detach(netdev);
4876 if (netif_running(netdev)) {
4877 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4878 e1000e_down(adapter);
4879 e1000_free_irq(adapter);
4881 e1000e_reset_interrupt_capability(adapter);
4883 retval = pci_save_state(pdev);
4887 status = er32(STATUS);
4888 if (status & E1000_STATUS_LU)
4889 wufc &= ~E1000_WUFC_LNKC;
4892 e1000_setup_rctl(adapter);
4893 e1000_set_multi(netdev);
4895 /* turn on all-multi mode if wake on multicast is enabled */
4896 if (wufc & E1000_WUFC_MC) {
4898 rctl |= E1000_RCTL_MPE;
4903 /* advertise wake from D3Cold */
4904 #define E1000_CTRL_ADVD3WUC 0x00100000
4905 /* phy power management enable */
4906 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4907 ctrl |= E1000_CTRL_ADVD3WUC;
4908 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
4909 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
4912 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4913 adapter->hw.phy.media_type ==
4914 e1000_media_type_internal_serdes) {
4915 /* keep the laser running in D3 */
4916 ctrl_ext = er32(CTRL_EXT);
4917 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4918 ew32(CTRL_EXT, ctrl_ext);
4921 if (adapter->flags & FLAG_IS_ICH)
4922 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
4924 /* Allow time for pending master requests to run */
4925 e1000e_disable_pcie_master(&adapter->hw);
4927 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4928 /* enable wakeup by the PHY */
4929 retval = e1000_init_phy_wakeup(adapter, wufc);
4933 /* enable wakeup by the MAC */
4935 ew32(WUC, E1000_WUC_PME_EN);
4942 *enable_wake = !!wufc;
4944 /* make sure adapter isn't asleep if manageability is enabled */
4945 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
4946 (hw->mac.ops.check_mng_mode(hw)))
4947 *enable_wake = true;
4949 if (adapter->hw.phy.type == e1000_phy_igp_3)
4950 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4953 * Release control of h/w to f/w. If f/w is AMT enabled, this
4954 * would have already happened in close and is redundant.
4956 e1000_release_hw_control(adapter);
4958 pci_disable_device(pdev);
4963 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
4965 if (sleep && wake) {
4966 pci_prepare_to_sleep(pdev);
4970 pci_wake_from_d3(pdev, wake);
4971 pci_set_power_state(pdev, PCI_D3hot);
4974 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4977 struct net_device *netdev = pci_get_drvdata(pdev);
4978 struct e1000_adapter *adapter = netdev_priv(netdev);
4981 * The pci-e switch on some quad port adapters will report a
4982 * correctable error when the MAC transitions from D0 to D3. To
4983 * prevent this we need to mask off the correctable errors on the
4984 * downstream port of the pci-e switch.
4986 if (adapter->flags & FLAG_IS_QUAD_PORT) {
4987 struct pci_dev *us_dev = pdev->bus->self;
4988 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
4991 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
4992 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4993 (devctl & ~PCI_EXP_DEVCTL_CERE));
4995 e1000_power_off(pdev, sleep, wake);
4997 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4999 e1000_power_off(pdev, sleep, wake);
5003 #ifdef CONFIG_PCIEASPM
5004 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5006 pci_disable_link_state(pdev, state);
5009 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5015 * Both device and parent should have the same ASPM setting.
5016 * Disable ASPM in downstream component first and then upstream.
5018 pos = pci_pcie_cap(pdev);
5019 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5021 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5023 if (!pdev->bus->self)
5026 pos = pci_pcie_cap(pdev->bus->self);
5027 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5029 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5032 void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5034 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5035 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5036 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5038 __e1000e_disable_aspm(pdev, state);
5041 #ifdef CONFIG_PM_OPS
5042 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5044 return !!adapter->tx_ring->buffer_info;
5047 static int __e1000_resume(struct pci_dev *pdev)
5049 struct net_device *netdev = pci_get_drvdata(pdev);
5050 struct e1000_adapter *adapter = netdev_priv(netdev);
5051 struct e1000_hw *hw = &adapter->hw;
5054 pci_set_power_state(pdev, PCI_D0);
5055 pci_restore_state(pdev);
5056 pci_save_state(pdev);
5057 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5058 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5060 e1000e_set_interrupt_capability(adapter);
5061 if (netif_running(netdev)) {
5062 err = e1000_request_irq(adapter);
5067 e1000e_power_up_phy(adapter);
5069 /* report the system wakeup cause from S3/S4 */
5070 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5073 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5075 e_info("PHY Wakeup cause - %s\n",
5076 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5077 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5078 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5079 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5080 phy_data & E1000_WUS_LNKC ? "Link Status "
5081 " Change" : "other");
5083 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5085 u32 wus = er32(WUS);
5087 e_info("MAC Wakeup cause - %s\n",
5088 wus & E1000_WUS_EX ? "Unicast Packet" :
5089 wus & E1000_WUS_MC ? "Multicast Packet" :
5090 wus & E1000_WUS_BC ? "Broadcast Packet" :
5091 wus & E1000_WUS_MAG ? "Magic Packet" :
5092 wus & E1000_WUS_LNKC ? "Link Status Change" :
5098 e1000e_reset(adapter);
5100 e1000_init_manageability(adapter);
5102 if (netif_running(netdev))
5105 netif_device_attach(netdev);
5108 * If the controller has AMT, do not set DRV_LOAD until the interface
5109 * is up. For all other cases, let the f/w know that the h/w is now
5110 * under the control of the driver.
5112 if (!(adapter->flags & FLAG_HAS_AMT))
5113 e1000_get_hw_control(adapter);
5118 #ifdef CONFIG_PM_SLEEP
5119 static int e1000_suspend(struct device *dev)
5121 struct pci_dev *pdev = to_pci_dev(dev);
5125 retval = __e1000_shutdown(pdev, &wake, false);
5127 e1000_complete_shutdown(pdev, true, wake);
5132 static int e1000_resume(struct device *dev)
5134 struct pci_dev *pdev = to_pci_dev(dev);
5135 struct net_device *netdev = pci_get_drvdata(pdev);
5136 struct e1000_adapter *adapter = netdev_priv(netdev);
5138 if (e1000e_pm_ready(adapter))
5139 adapter->idle_check = true;
5141 return __e1000_resume(pdev);
5143 #endif /* CONFIG_PM_SLEEP */
5145 #ifdef CONFIG_PM_RUNTIME
5146 static int e1000_runtime_suspend(struct device *dev)
5148 struct pci_dev *pdev = to_pci_dev(dev);
5149 struct net_device *netdev = pci_get_drvdata(pdev);
5150 struct e1000_adapter *adapter = netdev_priv(netdev);
5152 if (e1000e_pm_ready(adapter)) {
5155 __e1000_shutdown(pdev, &wake, true);
5161 static int e1000_idle(struct device *dev)
5163 struct pci_dev *pdev = to_pci_dev(dev);
5164 struct net_device *netdev = pci_get_drvdata(pdev);
5165 struct e1000_adapter *adapter = netdev_priv(netdev);
5167 if (!e1000e_pm_ready(adapter))
5170 if (adapter->idle_check) {
5171 adapter->idle_check = false;
5172 if (!e1000e_has_link(adapter))
5173 pm_schedule_suspend(dev, MSEC_PER_SEC);
5179 static int e1000_runtime_resume(struct device *dev)
5181 struct pci_dev *pdev = to_pci_dev(dev);
5182 struct net_device *netdev = pci_get_drvdata(pdev);
5183 struct e1000_adapter *adapter = netdev_priv(netdev);
5185 if (!e1000e_pm_ready(adapter))
5188 adapter->idle_check = !dev->power.runtime_auto;
5189 return __e1000_resume(pdev);
5191 #endif /* CONFIG_PM_RUNTIME */
5192 #endif /* CONFIG_PM_OPS */
5194 static void e1000_shutdown(struct pci_dev *pdev)
5198 __e1000_shutdown(pdev, &wake, false);
5200 if (system_state == SYSTEM_POWER_OFF)
5201 e1000_complete_shutdown(pdev, false, wake);
5204 #ifdef CONFIG_NET_POLL_CONTROLLER
5206 * Polling 'interrupt' - used by things like netconsole to send skbs
5207 * without having to re-enable interrupts. It's not called while
5208 * the interrupt routine is executing.
5210 static void e1000_netpoll(struct net_device *netdev)
5212 struct e1000_adapter *adapter = netdev_priv(netdev);
5214 disable_irq(adapter->pdev->irq);
5215 e1000_intr(adapter->pdev->irq, netdev);
5217 enable_irq(adapter->pdev->irq);
5222 * e1000_io_error_detected - called when PCI error is detected
5223 * @pdev: Pointer to PCI device
5224 * @state: The current pci connection state
5226 * This function is called after a PCI bus error affecting
5227 * this device has been detected.
5229 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5230 pci_channel_state_t state)
5232 struct net_device *netdev = pci_get_drvdata(pdev);
5233 struct e1000_adapter *adapter = netdev_priv(netdev);
5235 netif_device_detach(netdev);
5237 if (state == pci_channel_io_perm_failure)
5238 return PCI_ERS_RESULT_DISCONNECT;
5240 if (netif_running(netdev))
5241 e1000e_down(adapter);
5242 pci_disable_device(pdev);
5244 /* Request a slot slot reset. */
5245 return PCI_ERS_RESULT_NEED_RESET;
5249 * e1000_io_slot_reset - called after the pci bus has been reset.
5250 * @pdev: Pointer to PCI device
5252 * Restart the card from scratch, as if from a cold-boot. Implementation
5253 * resembles the first-half of the e1000_resume routine.
5255 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5257 struct net_device *netdev = pci_get_drvdata(pdev);
5258 struct e1000_adapter *adapter = netdev_priv(netdev);
5259 struct e1000_hw *hw = &adapter->hw;
5261 pci_ers_result_t result;
5263 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5264 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5265 err = pci_enable_device_mem(pdev);
5268 "Cannot re-enable PCI device after reset.\n");
5269 result = PCI_ERS_RESULT_DISCONNECT;
5271 pci_set_master(pdev);
5272 pdev->state_saved = true;
5273 pci_restore_state(pdev);
5275 pci_enable_wake(pdev, PCI_D3hot, 0);
5276 pci_enable_wake(pdev, PCI_D3cold, 0);
5278 e1000e_reset(adapter);
5280 result = PCI_ERS_RESULT_RECOVERED;
5283 pci_cleanup_aer_uncorrect_error_status(pdev);
5289 * e1000_io_resume - called when traffic can start flowing again.
5290 * @pdev: Pointer to PCI device
5292 * This callback is called when the error recovery driver tells us that
5293 * its OK to resume normal operation. Implementation resembles the
5294 * second-half of the e1000_resume routine.
5296 static void e1000_io_resume(struct pci_dev *pdev)
5298 struct net_device *netdev = pci_get_drvdata(pdev);
5299 struct e1000_adapter *adapter = netdev_priv(netdev);
5301 e1000_init_manageability(adapter);
5303 if (netif_running(netdev)) {
5304 if (e1000e_up(adapter)) {
5306 "can't bring device back up after reset\n");
5311 netif_device_attach(netdev);
5314 * If the controller has AMT, do not set DRV_LOAD until the interface
5315 * is up. For all other cases, let the f/w know that the h/w is now
5316 * under the control of the driver.
5318 if (!(adapter->flags & FLAG_HAS_AMT))
5319 e1000_get_hw_control(adapter);
5323 static void e1000_print_device_info(struct e1000_adapter *adapter)
5325 struct e1000_hw *hw = &adapter->hw;
5326 struct net_device *netdev = adapter->netdev;
5329 /* print bus type/speed/width info */
5330 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
5332 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5336 e_info("Intel(R) PRO/%s Network Connection\n",
5337 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5338 e1000e_read_pba_num(hw, &pba_num);
5339 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5340 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
5343 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5345 struct e1000_hw *hw = &adapter->hw;
5349 if (hw->mac.type != e1000_82573)
5352 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5353 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5354 /* Deep Smart Power Down (DSPD) */
5355 dev_warn(&adapter->pdev->dev,
5356 "Warning: detected DSPD enabled in EEPROM\n");
5360 static const struct net_device_ops e1000e_netdev_ops = {
5361 .ndo_open = e1000_open,
5362 .ndo_stop = e1000_close,
5363 .ndo_start_xmit = e1000_xmit_frame,
5364 .ndo_get_stats = e1000_get_stats,
5365 .ndo_set_multicast_list = e1000_set_multi,
5366 .ndo_set_mac_address = e1000_set_mac,
5367 .ndo_change_mtu = e1000_change_mtu,
5368 .ndo_do_ioctl = e1000_ioctl,
5369 .ndo_tx_timeout = e1000_tx_timeout,
5370 .ndo_validate_addr = eth_validate_addr,
5372 .ndo_vlan_rx_register = e1000_vlan_rx_register,
5373 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5374 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5375 #ifdef CONFIG_NET_POLL_CONTROLLER
5376 .ndo_poll_controller = e1000_netpoll,
5381 * e1000_probe - Device Initialization Routine
5382 * @pdev: PCI device information struct
5383 * @ent: entry in e1000_pci_tbl
5385 * Returns 0 on success, negative on failure
5387 * e1000_probe initializes an adapter identified by a pci_dev structure.
5388 * The OS initialization, configuring of the adapter private structure,
5389 * and a hardware reset occur.
5391 static int __devinit e1000_probe(struct pci_dev *pdev,
5392 const struct pci_device_id *ent)
5394 struct net_device *netdev;
5395 struct e1000_adapter *adapter;
5396 struct e1000_hw *hw;
5397 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5398 resource_size_t mmio_start, mmio_len;
5399 resource_size_t flash_start, flash_len;
5401 static int cards_found;
5402 int i, err, pci_using_dac;
5403 u16 eeprom_data = 0;
5404 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5406 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5407 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5409 err = pci_enable_device_mem(pdev);
5414 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5416 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5420 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5422 err = dma_set_coherent_mask(&pdev->dev,
5425 dev_err(&pdev->dev, "No usable DMA "
5426 "configuration, aborting\n");
5432 err = pci_request_selected_regions_exclusive(pdev,
5433 pci_select_bars(pdev, IORESOURCE_MEM),
5434 e1000e_driver_name);
5438 /* AER (Advanced Error Reporting) hooks */
5439 pci_enable_pcie_error_reporting(pdev);
5441 pci_set_master(pdev);
5442 /* PCI config space info */
5443 err = pci_save_state(pdev);
5445 goto err_alloc_etherdev;
5448 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5450 goto err_alloc_etherdev;
5452 SET_NETDEV_DEV(netdev, &pdev->dev);
5454 pci_set_drvdata(pdev, netdev);
5455 adapter = netdev_priv(netdev);
5457 adapter->netdev = netdev;
5458 adapter->pdev = pdev;
5460 adapter->pba = ei->pba;
5461 adapter->flags = ei->flags;
5462 adapter->flags2 = ei->flags2;
5463 adapter->hw.adapter = adapter;
5464 adapter->hw.mac.type = ei->mac;
5465 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5466 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5468 mmio_start = pci_resource_start(pdev, 0);
5469 mmio_len = pci_resource_len(pdev, 0);
5472 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5473 if (!adapter->hw.hw_addr)
5476 if ((adapter->flags & FLAG_HAS_FLASH) &&
5477 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5478 flash_start = pci_resource_start(pdev, 1);
5479 flash_len = pci_resource_len(pdev, 1);
5480 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5481 if (!adapter->hw.flash_address)
5485 /* construct the net_device struct */
5486 netdev->netdev_ops = &e1000e_netdev_ops;
5487 e1000e_set_ethtool_ops(netdev);
5488 netdev->watchdog_timeo = 5 * HZ;
5489 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5490 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5492 netdev->mem_start = mmio_start;
5493 netdev->mem_end = mmio_start + mmio_len;
5495 adapter->bd_number = cards_found++;
5497 e1000e_check_options(adapter);
5499 /* setup adapter struct */
5500 err = e1000_sw_init(adapter);
5506 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5507 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5508 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5510 err = ei->get_variants(adapter);
5514 if ((adapter->flags & FLAG_IS_ICH) &&
5515 (adapter->flags & FLAG_READ_ONLY_NVM))
5516 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5518 hw->mac.ops.get_bus_info(&adapter->hw);
5520 adapter->hw.phy.autoneg_wait_to_complete = 0;
5522 /* Copper options */
5523 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
5524 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5525 adapter->hw.phy.disable_polarity_correction = 0;
5526 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5529 if (e1000_check_reset_block(&adapter->hw))
5530 e_info("PHY reset is blocked due to SOL/IDER session.\n");
5532 netdev->features = NETIF_F_SG |
5534 NETIF_F_HW_VLAN_TX |
5537 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5538 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5540 netdev->features |= NETIF_F_TSO;
5541 netdev->features |= NETIF_F_TSO6;
5543 netdev->vlan_features |= NETIF_F_TSO;
5544 netdev->vlan_features |= NETIF_F_TSO6;
5545 netdev->vlan_features |= NETIF_F_HW_CSUM;
5546 netdev->vlan_features |= NETIF_F_SG;
5549 netdev->features |= NETIF_F_HIGHDMA;
5551 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5552 adapter->flags |= FLAG_MNG_PT_ENABLED;
5555 * before reading the NVM, reset the controller to
5556 * put the device in a known good starting state
5558 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5561 * systems with ASPM and others may see the checksum fail on the first
5562 * attempt. Let's give it a few tries
5565 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5568 e_err("The NVM Checksum Is Not Valid\n");
5574 e1000_eeprom_checks(adapter);
5576 /* copy the MAC address */
5577 if (e1000e_read_mac_addr(&adapter->hw))
5578 e_err("NVM Read Error while reading MAC address\n");
5580 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5581 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5583 if (!is_valid_ether_addr(netdev->perm_addr)) {
5584 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
5589 init_timer(&adapter->watchdog_timer);
5590 adapter->watchdog_timer.function = &e1000_watchdog;
5591 adapter->watchdog_timer.data = (unsigned long) adapter;
5593 init_timer(&adapter->phy_info_timer);
5594 adapter->phy_info_timer.function = &e1000_update_phy_info;
5595 adapter->phy_info_timer.data = (unsigned long) adapter;
5597 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5598 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5599 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5600 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5601 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5603 /* Initialize link parameters. User can change them with ethtool */
5604 adapter->hw.mac.autoneg = 1;
5605 adapter->fc_autoneg = 1;
5606 adapter->hw.fc.requested_mode = e1000_fc_default;
5607 adapter->hw.fc.current_mode = e1000_fc_default;
5608 adapter->hw.phy.autoneg_advertised = 0x2f;
5610 /* ring size defaults */
5611 adapter->rx_ring->count = 256;
5612 adapter->tx_ring->count = 256;
5615 * Initial Wake on LAN setting - If APM wake is enabled in
5616 * the EEPROM, enable the ACPI Magic Packet filter
5618 if (adapter->flags & FLAG_APME_IN_WUC) {
5619 /* APME bit in EEPROM is mapped to WUC.APME */
5620 eeprom_data = er32(WUC);
5621 eeprom_apme_mask = E1000_WUC_APME;
5622 if (eeprom_data & E1000_WUC_PHY_WAKE)
5623 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5624 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5625 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5626 (adapter->hw.bus.func == 1))
5627 e1000_read_nvm(&adapter->hw,
5628 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
5630 e1000_read_nvm(&adapter->hw,
5631 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5634 /* fetch WoL from EEPROM */
5635 if (eeprom_data & eeprom_apme_mask)
5636 adapter->eeprom_wol |= E1000_WUFC_MAG;
5639 * now that we have the eeprom settings, apply the special cases
5640 * where the eeprom may be wrong or the board simply won't support
5641 * wake on lan on a particular port
5643 if (!(adapter->flags & FLAG_HAS_WOL))
5644 adapter->eeprom_wol = 0;
5646 /* initialize the wol settings based on the eeprom settings */
5647 adapter->wol = adapter->eeprom_wol;
5648 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5650 /* save off EEPROM version number */
5651 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5653 /* reset the hardware with the new settings */
5654 e1000e_reset(adapter);
5657 * If the controller has AMT, do not set DRV_LOAD until the interface
5658 * is up. For all other cases, let the f/w know that the h/w is now
5659 * under the control of the driver.
5661 if (!(adapter->flags & FLAG_HAS_AMT))
5662 e1000_get_hw_control(adapter);
5664 strcpy(netdev->name, "eth%d");
5665 err = register_netdev(netdev);
5669 /* carrier off reporting is important to ethtool even BEFORE open */
5670 netif_carrier_off(netdev);
5672 e1000_print_device_info(adapter);
5674 if (pci_dev_run_wake(pdev)) {
5675 pm_runtime_set_active(&pdev->dev);
5676 pm_runtime_enable(&pdev->dev);
5678 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5683 if (!(adapter->flags & FLAG_HAS_AMT))
5684 e1000_release_hw_control(adapter);
5686 if (!e1000_check_reset_block(&adapter->hw))
5687 e1000_phy_hw_reset(&adapter->hw);
5690 kfree(adapter->tx_ring);
5691 kfree(adapter->rx_ring);
5693 if (adapter->hw.flash_address)
5694 iounmap(adapter->hw.flash_address);
5695 e1000e_reset_interrupt_capability(adapter);
5697 iounmap(adapter->hw.hw_addr);
5699 free_netdev(netdev);
5701 pci_release_selected_regions(pdev,
5702 pci_select_bars(pdev, IORESOURCE_MEM));
5705 pci_disable_device(pdev);
5710 * e1000_remove - Device Removal Routine
5711 * @pdev: PCI device information struct
5713 * e1000_remove is called by the PCI subsystem to alert the driver
5714 * that it should release a PCI device. The could be caused by a
5715 * Hot-Plug event, or because the driver is going to be removed from
5718 static void __devexit e1000_remove(struct pci_dev *pdev)
5720 struct net_device *netdev = pci_get_drvdata(pdev);
5721 struct e1000_adapter *adapter = netdev_priv(netdev);
5722 bool down = test_bit(__E1000_DOWN, &adapter->state);
5724 pm_runtime_get_sync(&pdev->dev);
5727 * flush_scheduled work may reschedule our watchdog task, so
5728 * explicitly disable watchdog tasks from being rescheduled
5731 set_bit(__E1000_DOWN, &adapter->state);
5732 del_timer_sync(&adapter->watchdog_timer);
5733 del_timer_sync(&adapter->phy_info_timer);
5735 cancel_work_sync(&adapter->reset_task);
5736 cancel_work_sync(&adapter->watchdog_task);
5737 cancel_work_sync(&adapter->downshift_task);
5738 cancel_work_sync(&adapter->update_phy_task);
5739 cancel_work_sync(&adapter->print_hang_task);
5740 flush_scheduled_work();
5742 if (!(netdev->flags & IFF_UP))
5743 e1000_power_down_phy(adapter);
5745 /* Don't lie to e1000_close() down the road. */
5747 clear_bit(__E1000_DOWN, &adapter->state);
5748 unregister_netdev(netdev);
5750 if (pci_dev_run_wake(pdev)) {
5751 pm_runtime_disable(&pdev->dev);
5752 pm_runtime_set_suspended(&pdev->dev);
5754 pm_runtime_put_noidle(&pdev->dev);
5757 * Release control of h/w to f/w. If f/w is AMT enabled, this
5758 * would have already happened in close and is redundant.
5760 e1000_release_hw_control(adapter);
5762 e1000e_reset_interrupt_capability(adapter);
5763 kfree(adapter->tx_ring);
5764 kfree(adapter->rx_ring);
5766 iounmap(adapter->hw.hw_addr);
5767 if (adapter->hw.flash_address)
5768 iounmap(adapter->hw.flash_address);
5769 pci_release_selected_regions(pdev,
5770 pci_select_bars(pdev, IORESOURCE_MEM));
5772 free_netdev(netdev);
5775 pci_disable_pcie_error_reporting(pdev);
5777 pci_disable_device(pdev);
5780 /* PCI Error Recovery (ERS) */
5781 static struct pci_error_handlers e1000_err_handler = {
5782 .error_detected = e1000_io_error_detected,
5783 .slot_reset = e1000_io_slot_reset,
5784 .resume = e1000_io_resume,
5787 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5788 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5789 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5790 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5791 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5792 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5793 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
5794 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5795 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5796 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
5798 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5799 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5800 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5801 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
5803 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5804 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5805 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
5807 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5808 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
5809 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
5811 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5812 board_80003es2lan },
5813 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5814 board_80003es2lan },
5815 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5816 board_80003es2lan },
5817 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5818 board_80003es2lan },
5820 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5821 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5822 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5823 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5824 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5825 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5826 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5827 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
5829 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5830 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5831 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5832 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5833 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5834 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
5835 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5836 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5837 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5839 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5840 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5841 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
5843 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5844 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5846 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5847 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
5848 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5849 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5851 { } /* terminate list */
5853 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5855 #ifdef CONFIG_PM_OPS
5856 static const struct dev_pm_ops e1000_pm_ops = {
5857 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
5858 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
5859 e1000_runtime_resume, e1000_idle)
5863 /* PCI Device API Driver */
5864 static struct pci_driver e1000_driver = {
5865 .name = e1000e_driver_name,
5866 .id_table = e1000_pci_tbl,
5867 .probe = e1000_probe,
5868 .remove = __devexit_p(e1000_remove),
5869 #ifdef CONFIG_PM_OPS
5870 .driver.pm = &e1000_pm_ops,
5872 .shutdown = e1000_shutdown,
5873 .err_handler = &e1000_err_handler
5877 * e1000_init_module - Driver Registration Routine
5879 * e1000_init_module is the first routine called when the driver is
5880 * loaded. All it does is register with the PCI subsystem.
5882 static int __init e1000_init_module(void)
5885 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5886 e1000e_driver_version);
5887 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
5888 ret = pci_register_driver(&e1000_driver);
5892 module_init(e1000_init_module);
5895 * e1000_exit_module - Driver Exit Cleanup Routine
5897 * e1000_exit_module is called just before the driver is removed
5900 static void __exit e1000_exit_module(void)
5902 pci_unregister_driver(&e1000_driver);
5904 module_exit(e1000_exit_module);
5907 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
5908 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
5909 MODULE_LICENSE("GPL");
5910 MODULE_VERSION(DRV_VERSION);