]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/e1000e/netdev.c
net/sb1250: register mdio bus in probe
[net-next-2.6.git] / drivers / net / e1000e / netdev.c
CommitLineData
bc7f75fa
AK
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
c7e54b1b 4 Copyright(c) 1999 - 2009 Intel Corporation.
bc7f75fa
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/vmalloc.h>
34#include <linux/pagemap.h>
35#include <linux/delay.h>
36#include <linux/netdevice.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
5a0e3ad6 39#include <linux/slab.h>
bc7f75fa
AK
40#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/mii.h>
43#include <linux/ethtool.h>
44#include <linux/if_vlan.h>
45#include <linux/cpu.h>
46#include <linux/smp.h>
97ac8cae 47#include <linux/pm_qos_params.h>
111b9dc5 48#include <linux/aer.h>
bc7f75fa
AK
49
50#include "e1000.h"
51
3be8c940 52#define DRV_VERSION "1.0.2-k2"
bc7f75fa
AK
53char e1000e_driver_name[] = "e1000e";
54const char e1000e_driver_version[] = DRV_VERSION;
55
56static const struct e1000_info *e1000_info_tbl[] = {
57 [board_82571] = &e1000_82571_info,
58 [board_82572] = &e1000_82572_info,
59 [board_82573] = &e1000_82573_info,
4662e82b 60 [board_82574] = &e1000_82574_info,
8c81c9c3 61 [board_82583] = &e1000_82583_info,
bc7f75fa
AK
62 [board_80003es2lan] = &e1000_es2_info,
63 [board_ich8lan] = &e1000_ich8_info,
64 [board_ich9lan] = &e1000_ich9_info,
f4187b56 65 [board_ich10lan] = &e1000_ich10_info,
a4f58f54 66 [board_pchlan] = &e1000_pch_info,
bc7f75fa
AK
67};
68
bc7f75fa
AK
69/**
70 * e1000_desc_unused - calculate if we have unused descriptors
71 **/
72static int e1000_desc_unused(struct e1000_ring *ring)
73{
74 if (ring->next_to_clean > ring->next_to_use)
75 return ring->next_to_clean - ring->next_to_use - 1;
76
77 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
78}
79
80/**
ad68076e 81 * e1000_receive_skb - helper function to handle Rx indications
bc7f75fa
AK
82 * @adapter: board private structure
83 * @status: descriptor status field as written by hardware
84 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
85 * @skb: pointer to sk_buff to be indicated to stack
86 **/
87static void e1000_receive_skb(struct e1000_adapter *adapter,
88 struct net_device *netdev,
89 struct sk_buff *skb,
a39fe742 90 u8 status, __le16 vlan)
bc7f75fa
AK
91{
92 skb->protocol = eth_type_trans(skb, netdev);
93
94 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
c405b828
HX
95 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
96 le16_to_cpu(vlan), skb);
bc7f75fa 97 else
89c88b16 98 napi_gro_receive(&adapter->napi, skb);
bc7f75fa
AK
99}
100
101/**
102 * e1000_rx_checksum - Receive Checksum Offload for 82543
103 * @adapter: board private structure
104 * @status_err: receive descriptor status and error fields
105 * @csum: receive descriptor csum field
106 * @sk_buff: socket buffer with received data
107 **/
108static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
109 u32 csum, struct sk_buff *skb)
110{
111 u16 status = (u16)status_err;
112 u8 errors = (u8)(status_err >> 24);
113 skb->ip_summed = CHECKSUM_NONE;
114
115 /* Ignore Checksum bit is set */
116 if (status & E1000_RXD_STAT_IXSM)
117 return;
118 /* TCP/UDP checksum error bit is set */
119 if (errors & E1000_RXD_ERR_TCPE) {
120 /* let the stack verify checksum errors */
121 adapter->hw_csum_err++;
122 return;
123 }
124
125 /* TCP/UDP Checksum has not been calculated */
126 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
127 return;
128
129 /* It must be a TCP or UDP packet with a valid checksum */
130 if (status & E1000_RXD_STAT_TCPCS) {
131 /* TCP checksum is good */
132 skb->ip_summed = CHECKSUM_UNNECESSARY;
133 } else {
ad68076e
BA
134 /*
135 * IP fragment with UDP payload
136 * Hardware complements the payload checksum, so we undo it
bc7f75fa
AK
137 * and then put the value in host order for further stack use.
138 */
a39fe742
AV
139 __sum16 sum = (__force __sum16)htons(csum);
140 skb->csum = csum_unfold(~sum);
bc7f75fa
AK
141 skb->ip_summed = CHECKSUM_COMPLETE;
142 }
143 adapter->hw_csum_good++;
144}
145
146/**
147 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
148 * @adapter: address of board private structure
149 **/
150static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
151 int cleaned_count)
152{
153 struct net_device *netdev = adapter->netdev;
154 struct pci_dev *pdev = adapter->pdev;
155 struct e1000_ring *rx_ring = adapter->rx_ring;
156 struct e1000_rx_desc *rx_desc;
157 struct e1000_buffer *buffer_info;
158 struct sk_buff *skb;
159 unsigned int i;
89d71a66 160 unsigned int bufsz = adapter->rx_buffer_len;
bc7f75fa
AK
161
162 i = rx_ring->next_to_use;
163 buffer_info = &rx_ring->buffer_info[i];
164
165 while (cleaned_count--) {
166 skb = buffer_info->skb;
167 if (skb) {
168 skb_trim(skb, 0);
169 goto map_skb;
170 }
171
89d71a66 172 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
bc7f75fa
AK
173 if (!skb) {
174 /* Better luck next round */
175 adapter->alloc_rx_buff_failed++;
176 break;
177 }
178
bc7f75fa
AK
179 buffer_info->skb = skb;
180map_skb:
181 buffer_info->dma = pci_map_single(pdev, skb->data,
182 adapter->rx_buffer_len,
183 PCI_DMA_FROMDEVICE);
8d8bb39b 184 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
bc7f75fa
AK
185 dev_err(&pdev->dev, "RX DMA map failed\n");
186 adapter->rx_dma_failed++;
187 break;
188 }
189
190 rx_desc = E1000_RX_DESC(*rx_ring, i);
191 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
192
193 i++;
194 if (i == rx_ring->count)
195 i = 0;
196 buffer_info = &rx_ring->buffer_info[i];
197 }
198
199 if (rx_ring->next_to_use != i) {
200 rx_ring->next_to_use = i;
201 if (i-- == 0)
202 i = (rx_ring->count - 1);
203
ad68076e
BA
204 /*
205 * Force memory writes to complete before letting h/w
bc7f75fa
AK
206 * know there are new descriptors to fetch. (Only
207 * applicable for weak-ordered memory model archs,
ad68076e
BA
208 * such as IA-64).
209 */
bc7f75fa
AK
210 wmb();
211 writel(i, adapter->hw.hw_addr + rx_ring->tail);
212 }
213}
214
215/**
216 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
217 * @adapter: address of board private structure
218 **/
219static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
220 int cleaned_count)
221{
222 struct net_device *netdev = adapter->netdev;
223 struct pci_dev *pdev = adapter->pdev;
224 union e1000_rx_desc_packet_split *rx_desc;
225 struct e1000_ring *rx_ring = adapter->rx_ring;
226 struct e1000_buffer *buffer_info;
227 struct e1000_ps_page *ps_page;
228 struct sk_buff *skb;
229 unsigned int i, j;
230
231 i = rx_ring->next_to_use;
232 buffer_info = &rx_ring->buffer_info[i];
233
234 while (cleaned_count--) {
235 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
236
237 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40
AK
238 ps_page = &buffer_info->ps_pages[j];
239 if (j >= adapter->rx_ps_pages) {
240 /* all unused desc entries get hw null ptr */
a39fe742 241 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
47f44e40
AK
242 continue;
243 }
244 if (!ps_page->page) {
245 ps_page->page = alloc_page(GFP_ATOMIC);
bc7f75fa 246 if (!ps_page->page) {
47f44e40
AK
247 adapter->alloc_rx_buff_failed++;
248 goto no_buffers;
249 }
250 ps_page->dma = pci_map_page(pdev,
251 ps_page->page,
252 0, PAGE_SIZE,
253 PCI_DMA_FROMDEVICE);
8d8bb39b 254 if (pci_dma_mapping_error(pdev, ps_page->dma)) {
47f44e40
AK
255 dev_err(&adapter->pdev->dev,
256 "RX DMA page map failed\n");
257 adapter->rx_dma_failed++;
258 goto no_buffers;
bc7f75fa 259 }
bc7f75fa 260 }
47f44e40
AK
261 /*
262 * Refresh the desc even if buffer_addrs
263 * didn't change because each write-back
264 * erases this info.
265 */
266 rx_desc->read.buffer_addr[j+1] =
267 cpu_to_le64(ps_page->dma);
bc7f75fa
AK
268 }
269
89d71a66
ED
270 skb = netdev_alloc_skb_ip_align(netdev,
271 adapter->rx_ps_bsize0);
bc7f75fa
AK
272
273 if (!skb) {
274 adapter->alloc_rx_buff_failed++;
275 break;
276 }
277
bc7f75fa
AK
278 buffer_info->skb = skb;
279 buffer_info->dma = pci_map_single(pdev, skb->data,
280 adapter->rx_ps_bsize0,
281 PCI_DMA_FROMDEVICE);
8d8bb39b 282 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
bc7f75fa
AK
283 dev_err(&pdev->dev, "RX DMA map failed\n");
284 adapter->rx_dma_failed++;
285 /* cleanup skb */
286 dev_kfree_skb_any(skb);
287 buffer_info->skb = NULL;
288 break;
289 }
290
291 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
292
293 i++;
294 if (i == rx_ring->count)
295 i = 0;
296 buffer_info = &rx_ring->buffer_info[i];
297 }
298
299no_buffers:
300 if (rx_ring->next_to_use != i) {
301 rx_ring->next_to_use = i;
302
303 if (!(i--))
304 i = (rx_ring->count - 1);
305
ad68076e
BA
306 /*
307 * Force memory writes to complete before letting h/w
bc7f75fa
AK
308 * know there are new descriptors to fetch. (Only
309 * applicable for weak-ordered memory model archs,
ad68076e
BA
310 * such as IA-64).
311 */
bc7f75fa 312 wmb();
ad68076e
BA
313 /*
314 * Hardware increments by 16 bytes, but packet split
bc7f75fa
AK
315 * descriptors are 32 bytes...so we increment tail
316 * twice as much.
317 */
318 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
319 }
320}
321
97ac8cae
BA
322/**
323 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
324 * @adapter: address of board private structure
97ac8cae
BA
325 * @cleaned_count: number of buffers to allocate this pass
326 **/
327
328static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
329 int cleaned_count)
330{
331 struct net_device *netdev = adapter->netdev;
332 struct pci_dev *pdev = adapter->pdev;
333 struct e1000_rx_desc *rx_desc;
334 struct e1000_ring *rx_ring = adapter->rx_ring;
335 struct e1000_buffer *buffer_info;
336 struct sk_buff *skb;
337 unsigned int i;
89d71a66 338 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
97ac8cae
BA
339
340 i = rx_ring->next_to_use;
341 buffer_info = &rx_ring->buffer_info[i];
342
343 while (cleaned_count--) {
344 skb = buffer_info->skb;
345 if (skb) {
346 skb_trim(skb, 0);
347 goto check_page;
348 }
349
89d71a66 350 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
97ac8cae
BA
351 if (unlikely(!skb)) {
352 /* Better luck next round */
353 adapter->alloc_rx_buff_failed++;
354 break;
355 }
356
97ac8cae
BA
357 buffer_info->skb = skb;
358check_page:
359 /* allocate a new page if necessary */
360 if (!buffer_info->page) {
361 buffer_info->page = alloc_page(GFP_ATOMIC);
362 if (unlikely(!buffer_info->page)) {
363 adapter->alloc_rx_buff_failed++;
364 break;
365 }
366 }
367
368 if (!buffer_info->dma)
369 buffer_info->dma = pci_map_page(pdev,
370 buffer_info->page, 0,
371 PAGE_SIZE,
372 PCI_DMA_FROMDEVICE);
373
374 rx_desc = E1000_RX_DESC(*rx_ring, i);
375 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
376
377 if (unlikely(++i == rx_ring->count))
378 i = 0;
379 buffer_info = &rx_ring->buffer_info[i];
380 }
381
382 if (likely(rx_ring->next_to_use != i)) {
383 rx_ring->next_to_use = i;
384 if (unlikely(i-- == 0))
385 i = (rx_ring->count - 1);
386
387 /* Force memory writes to complete before letting h/w
388 * know there are new descriptors to fetch. (Only
389 * applicable for weak-ordered memory model archs,
390 * such as IA-64). */
391 wmb();
392 writel(i, adapter->hw.hw_addr + rx_ring->tail);
393 }
394}
395
bc7f75fa
AK
396/**
397 * e1000_clean_rx_irq - Send received data up the network stack; legacy
398 * @adapter: board private structure
399 *
400 * the return value indicates whether actual cleaning was done, there
401 * is no guarantee that everything was cleaned
402 **/
403static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
404 int *work_done, int work_to_do)
405{
406 struct net_device *netdev = adapter->netdev;
407 struct pci_dev *pdev = adapter->pdev;
3bb99fe2 408 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
409 struct e1000_ring *rx_ring = adapter->rx_ring;
410 struct e1000_rx_desc *rx_desc, *next_rxd;
411 struct e1000_buffer *buffer_info, *next_buffer;
412 u32 length;
413 unsigned int i;
414 int cleaned_count = 0;
415 bool cleaned = 0;
416 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
417
418 i = rx_ring->next_to_clean;
419 rx_desc = E1000_RX_DESC(*rx_ring, i);
420 buffer_info = &rx_ring->buffer_info[i];
421
422 while (rx_desc->status & E1000_RXD_STAT_DD) {
423 struct sk_buff *skb;
424 u8 status;
425
426 if (*work_done >= work_to_do)
427 break;
428 (*work_done)++;
429
430 status = rx_desc->status;
431 skb = buffer_info->skb;
432 buffer_info->skb = NULL;
433
434 prefetch(skb->data - NET_IP_ALIGN);
435
436 i++;
437 if (i == rx_ring->count)
438 i = 0;
439 next_rxd = E1000_RX_DESC(*rx_ring, i);
440 prefetch(next_rxd);
441
442 next_buffer = &rx_ring->buffer_info[i];
443
444 cleaned = 1;
445 cleaned_count++;
446 pci_unmap_single(pdev,
447 buffer_info->dma,
448 adapter->rx_buffer_len,
449 PCI_DMA_FROMDEVICE);
450 buffer_info->dma = 0;
451
452 length = le16_to_cpu(rx_desc->length);
453
b94b5028
JB
454 /*
455 * !EOP means multiple descriptors were used to store a single
456 * packet, if that's the case we need to toss it. In fact, we
457 * need to toss every packet with the EOP bit clear and the
458 * next frame that _does_ have the EOP bit set, as it is by
459 * definition only a frame fragment
460 */
461 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
462 adapter->flags2 |= FLAG2_IS_DISCARDING;
463
464 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
bc7f75fa 465 /* All receives must fit into a single buffer */
3bb99fe2 466 e_dbg("Receive packet consumed multiple buffers\n");
bc7f75fa
AK
467 /* recycle */
468 buffer_info->skb = skb;
b94b5028
JB
469 if (status & E1000_RXD_STAT_EOP)
470 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
471 goto next_desc;
472 }
473
474 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
475 /* recycle */
476 buffer_info->skb = skb;
477 goto next_desc;
478 }
479
eb7c3adb
JK
480 /* adjust length to remove Ethernet CRC */
481 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
482 length -= 4;
483
bc7f75fa
AK
484 total_rx_bytes += length;
485 total_rx_packets++;
486
ad68076e
BA
487 /*
488 * code added for copybreak, this should improve
bc7f75fa 489 * performance for small packets with large amounts
ad68076e
BA
490 * of reassembly being done in the stack
491 */
bc7f75fa
AK
492 if (length < copybreak) {
493 struct sk_buff *new_skb =
89d71a66 494 netdev_alloc_skb_ip_align(netdev, length);
bc7f75fa 495 if (new_skb) {
808ff676
BA
496 skb_copy_to_linear_data_offset(new_skb,
497 -NET_IP_ALIGN,
498 (skb->data -
499 NET_IP_ALIGN),
500 (length +
501 NET_IP_ALIGN));
bc7f75fa
AK
502 /* save the skb in buffer_info as good */
503 buffer_info->skb = skb;
504 skb = new_skb;
505 }
506 /* else just continue with the old one */
507 }
508 /* end copybreak code */
509 skb_put(skb, length);
510
511 /* Receive Checksum Offload */
512 e1000_rx_checksum(adapter,
513 (u32)(status) |
514 ((u32)(rx_desc->errors) << 24),
515 le16_to_cpu(rx_desc->csum), skb);
516
517 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
518
519next_desc:
520 rx_desc->status = 0;
521
522 /* return some buffers to hardware, one at a time is too slow */
523 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
524 adapter->alloc_rx_buf(adapter, cleaned_count);
525 cleaned_count = 0;
526 }
527
528 /* use prefetched values */
529 rx_desc = next_rxd;
530 buffer_info = next_buffer;
531 }
532 rx_ring->next_to_clean = i;
533
534 cleaned_count = e1000_desc_unused(rx_ring);
535 if (cleaned_count)
536 adapter->alloc_rx_buf(adapter, cleaned_count);
537
bc7f75fa 538 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 539 adapter->total_rx_packets += total_rx_packets;
7274c20f
AK
540 netdev->stats.rx_bytes += total_rx_bytes;
541 netdev->stats.rx_packets += total_rx_packets;
bc7f75fa
AK
542 return cleaned;
543}
544
bc7f75fa
AK
545static void e1000_put_txbuf(struct e1000_adapter *adapter,
546 struct e1000_buffer *buffer_info)
547{
03b1320d
AD
548 if (buffer_info->dma) {
549 if (buffer_info->mapped_as_page)
550 pci_unmap_page(adapter->pdev, buffer_info->dma,
551 buffer_info->length, PCI_DMA_TODEVICE);
552 else
553 pci_unmap_single(adapter->pdev, buffer_info->dma,
554 buffer_info->length,
555 PCI_DMA_TODEVICE);
556 buffer_info->dma = 0;
557 }
bc7f75fa
AK
558 if (buffer_info->skb) {
559 dev_kfree_skb_any(buffer_info->skb);
560 buffer_info->skb = NULL;
561 }
1b7719c4 562 buffer_info->time_stamp = 0;
bc7f75fa
AK
563}
564
41cec6f1 565static void e1000_print_hw_hang(struct work_struct *work)
bc7f75fa 566{
41cec6f1
BA
567 struct e1000_adapter *adapter = container_of(work,
568 struct e1000_adapter,
569 print_hang_task);
bc7f75fa
AK
570 struct e1000_ring *tx_ring = adapter->tx_ring;
571 unsigned int i = tx_ring->next_to_clean;
572 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
573 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
41cec6f1
BA
574 struct e1000_hw *hw = &adapter->hw;
575 u16 phy_status, phy_1000t_status, phy_ext_status;
576 u16 pci_status;
577
578 e1e_rphy(hw, PHY_STATUS, &phy_status);
579 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
580 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
bc7f75fa 581
41cec6f1
BA
582 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
583
584 /* detected Hardware unit hang */
585 e_err("Detected Hardware Unit Hang:\n"
44defeb3
JK
586 " TDH <%x>\n"
587 " TDT <%x>\n"
588 " next_to_use <%x>\n"
589 " next_to_clean <%x>\n"
590 "buffer_info[next_to_clean]:\n"
591 " time_stamp <%lx>\n"
592 " next_to_watch <%x>\n"
593 " jiffies <%lx>\n"
41cec6f1
BA
594 " next_to_watch.status <%x>\n"
595 "MAC Status <%x>\n"
596 "PHY Status <%x>\n"
597 "PHY 1000BASE-T Status <%x>\n"
598 "PHY Extended Status <%x>\n"
599 "PCI Status <%x>\n",
44defeb3
JK
600 readl(adapter->hw.hw_addr + tx_ring->head),
601 readl(adapter->hw.hw_addr + tx_ring->tail),
602 tx_ring->next_to_use,
603 tx_ring->next_to_clean,
604 tx_ring->buffer_info[eop].time_stamp,
605 eop,
606 jiffies,
41cec6f1
BA
607 eop_desc->upper.fields.status,
608 er32(STATUS),
609 phy_status,
610 phy_1000t_status,
611 phy_ext_status,
612 pci_status);
bc7f75fa
AK
613}
614
615/**
616 * e1000_clean_tx_irq - Reclaim resources after transmit completes
617 * @adapter: board private structure
618 *
619 * the return value indicates whether actual cleaning was done, there
620 * is no guarantee that everything was cleaned
621 **/
622static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
623{
624 struct net_device *netdev = adapter->netdev;
625 struct e1000_hw *hw = &adapter->hw;
626 struct e1000_ring *tx_ring = adapter->tx_ring;
627 struct e1000_tx_desc *tx_desc, *eop_desc;
628 struct e1000_buffer *buffer_info;
629 unsigned int i, eop;
630 unsigned int count = 0;
bc7f75fa
AK
631 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
632
633 i = tx_ring->next_to_clean;
634 eop = tx_ring->buffer_info[i].next_to_watch;
635 eop_desc = E1000_TX_DESC(*tx_ring, eop);
636
12d04a3c
AD
637 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
638 (count < tx_ring->count)) {
a86043c2
JB
639 bool cleaned = false;
640 for (; !cleaned; count++) {
bc7f75fa
AK
641 tx_desc = E1000_TX_DESC(*tx_ring, i);
642 buffer_info = &tx_ring->buffer_info[i];
643 cleaned = (i == eop);
644
645 if (cleaned) {
646 struct sk_buff *skb = buffer_info->skb;
647 unsigned int segs, bytecount;
648 segs = skb_shinfo(skb)->gso_segs ?: 1;
649 /* multiply data chunks by size of headers */
650 bytecount = ((segs - 1) * skb_headlen(skb)) +
651 skb->len;
652 total_tx_packets += segs;
653 total_tx_bytes += bytecount;
654 }
655
656 e1000_put_txbuf(adapter, buffer_info);
657 tx_desc->upper.data = 0;
658
659 i++;
660 if (i == tx_ring->count)
661 i = 0;
662 }
663
dac87619
TL
664 if (i == tx_ring->next_to_use)
665 break;
bc7f75fa
AK
666 eop = tx_ring->buffer_info[i].next_to_watch;
667 eop_desc = E1000_TX_DESC(*tx_ring, eop);
bc7f75fa
AK
668 }
669
670 tx_ring->next_to_clean = i;
671
672#define TX_WAKE_THRESHOLD 32
a86043c2
JB
673 if (count && netif_carrier_ok(netdev) &&
674 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
bc7f75fa
AK
675 /* Make sure that anybody stopping the queue after this
676 * sees the new next_to_clean.
677 */
678 smp_mb();
679
680 if (netif_queue_stopped(netdev) &&
681 !(test_bit(__E1000_DOWN, &adapter->state))) {
682 netif_wake_queue(netdev);
683 ++adapter->restart_queue;
684 }
685 }
686
687 if (adapter->detect_tx_hung) {
41cec6f1
BA
688 /*
689 * Detect a transmit hang in hardware, this serializes the
690 * check with the clearing of time_stamp and movement of i
691 */
bc7f75fa 692 adapter->detect_tx_hung = 0;
12d04a3c
AD
693 if (tx_ring->buffer_info[i].time_stamp &&
694 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
8e95a202
JP
695 + (adapter->tx_timeout_factor * HZ)) &&
696 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
41cec6f1 697 schedule_work(&adapter->print_hang_task);
bc7f75fa
AK
698 netif_stop_queue(netdev);
699 }
700 }
701 adapter->total_tx_bytes += total_tx_bytes;
702 adapter->total_tx_packets += total_tx_packets;
7274c20f
AK
703 netdev->stats.tx_bytes += total_tx_bytes;
704 netdev->stats.tx_packets += total_tx_packets;
12d04a3c 705 return (count < tx_ring->count);
bc7f75fa
AK
706}
707
bc7f75fa
AK
708/**
709 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
710 * @adapter: board private structure
711 *
712 * the return value indicates whether actual cleaning was done, there
713 * is no guarantee that everything was cleaned
714 **/
715static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
716 int *work_done, int work_to_do)
717{
3bb99fe2 718 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
719 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
720 struct net_device *netdev = adapter->netdev;
721 struct pci_dev *pdev = adapter->pdev;
722 struct e1000_ring *rx_ring = adapter->rx_ring;
723 struct e1000_buffer *buffer_info, *next_buffer;
724 struct e1000_ps_page *ps_page;
725 struct sk_buff *skb;
726 unsigned int i, j;
727 u32 length, staterr;
728 int cleaned_count = 0;
729 bool cleaned = 0;
730 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
731
732 i = rx_ring->next_to_clean;
733 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
734 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
735 buffer_info = &rx_ring->buffer_info[i];
736
737 while (staterr & E1000_RXD_STAT_DD) {
738 if (*work_done >= work_to_do)
739 break;
740 (*work_done)++;
741 skb = buffer_info->skb;
742
743 /* in the packet split case this is header only */
744 prefetch(skb->data - NET_IP_ALIGN);
745
746 i++;
747 if (i == rx_ring->count)
748 i = 0;
749 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
750 prefetch(next_rxd);
751
752 next_buffer = &rx_ring->buffer_info[i];
753
754 cleaned = 1;
755 cleaned_count++;
756 pci_unmap_single(pdev, buffer_info->dma,
757 adapter->rx_ps_bsize0,
758 PCI_DMA_FROMDEVICE);
759 buffer_info->dma = 0;
760
b94b5028
JB
761 /* see !EOP comment in other rx routine */
762 if (!(staterr & E1000_RXD_STAT_EOP))
763 adapter->flags2 |= FLAG2_IS_DISCARDING;
764
765 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
3bb99fe2
BA
766 e_dbg("Packet Split buffers didn't pick up the full "
767 "packet\n");
bc7f75fa 768 dev_kfree_skb_irq(skb);
b94b5028
JB
769 if (staterr & E1000_RXD_STAT_EOP)
770 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
771 goto next_desc;
772 }
773
774 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
775 dev_kfree_skb_irq(skb);
776 goto next_desc;
777 }
778
779 length = le16_to_cpu(rx_desc->wb.middle.length0);
780
781 if (!length) {
3bb99fe2
BA
782 e_dbg("Last part of the packet spanning multiple "
783 "descriptors\n");
bc7f75fa
AK
784 dev_kfree_skb_irq(skb);
785 goto next_desc;
786 }
787
788 /* Good Receive */
789 skb_put(skb, length);
790
791 {
ad68076e
BA
792 /*
793 * this looks ugly, but it seems compiler issues make it
794 * more efficient than reusing j
795 */
bc7f75fa
AK
796 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
797
ad68076e
BA
798 /*
799 * page alloc/put takes too long and effects small packet
800 * throughput, so unsplit small packets and save the alloc/put
801 * only valid in softirq (napi) context to call kmap_*
802 */
bc7f75fa
AK
803 if (l1 && (l1 <= copybreak) &&
804 ((length + l1) <= adapter->rx_ps_bsize0)) {
805 u8 *vaddr;
806
47f44e40 807 ps_page = &buffer_info->ps_pages[0];
bc7f75fa 808
ad68076e
BA
809 /*
810 * there is no documentation about how to call
bc7f75fa 811 * kmap_atomic, so we can't hold the mapping
ad68076e
BA
812 * very long
813 */
bc7f75fa
AK
814 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
815 PAGE_SIZE, PCI_DMA_FROMDEVICE);
816 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
817 memcpy(skb_tail_pointer(skb), vaddr, l1);
818 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
819 pci_dma_sync_single_for_device(pdev, ps_page->dma,
820 PAGE_SIZE, PCI_DMA_FROMDEVICE);
140a7480 821
eb7c3adb
JK
822 /* remove the CRC */
823 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
824 l1 -= 4;
825
bc7f75fa
AK
826 skb_put(skb, l1);
827 goto copydone;
828 } /* if */
829 }
830
831 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
832 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
833 if (!length)
834 break;
835
47f44e40 836 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
837 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
838 PCI_DMA_FROMDEVICE);
839 ps_page->dma = 0;
840 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
841 ps_page->page = NULL;
842 skb->len += length;
843 skb->data_len += length;
844 skb->truesize += length;
845 }
846
eb7c3adb
JK
847 /* strip the ethernet crc, problem is we're using pages now so
848 * this whole operation can get a little cpu intensive
849 */
850 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
851 pskb_trim(skb, skb->len - 4);
852
bc7f75fa
AK
853copydone:
854 total_rx_bytes += skb->len;
855 total_rx_packets++;
856
857 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
858 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
859
860 if (rx_desc->wb.upper.header_status &
861 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
862 adapter->rx_hdr_split++;
863
864 e1000_receive_skb(adapter, netdev, skb,
865 staterr, rx_desc->wb.middle.vlan);
866
867next_desc:
868 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
869 buffer_info->skb = NULL;
870
871 /* return some buffers to hardware, one at a time is too slow */
872 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
873 adapter->alloc_rx_buf(adapter, cleaned_count);
874 cleaned_count = 0;
875 }
876
877 /* use prefetched values */
878 rx_desc = next_rxd;
879 buffer_info = next_buffer;
880
881 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
882 }
883 rx_ring->next_to_clean = i;
884
885 cleaned_count = e1000_desc_unused(rx_ring);
886 if (cleaned_count)
887 adapter->alloc_rx_buf(adapter, cleaned_count);
888
bc7f75fa 889 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 890 adapter->total_rx_packets += total_rx_packets;
7274c20f
AK
891 netdev->stats.rx_bytes += total_rx_bytes;
892 netdev->stats.rx_packets += total_rx_packets;
bc7f75fa
AK
893 return cleaned;
894}
895
97ac8cae
BA
896/**
897 * e1000_consume_page - helper function
898 **/
899static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
900 u16 length)
901{
902 bi->page = NULL;
903 skb->len += length;
904 skb->data_len += length;
905 skb->truesize += length;
906}
907
908/**
909 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
910 * @adapter: board private structure
911 *
912 * the return value indicates whether actual cleaning was done, there
913 * is no guarantee that everything was cleaned
914 **/
915
916static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
917 int *work_done, int work_to_do)
918{
919 struct net_device *netdev = adapter->netdev;
920 struct pci_dev *pdev = adapter->pdev;
921 struct e1000_ring *rx_ring = adapter->rx_ring;
922 struct e1000_rx_desc *rx_desc, *next_rxd;
923 struct e1000_buffer *buffer_info, *next_buffer;
924 u32 length;
925 unsigned int i;
926 int cleaned_count = 0;
927 bool cleaned = false;
928 unsigned int total_rx_bytes=0, total_rx_packets=0;
929
930 i = rx_ring->next_to_clean;
931 rx_desc = E1000_RX_DESC(*rx_ring, i);
932 buffer_info = &rx_ring->buffer_info[i];
933
934 while (rx_desc->status & E1000_RXD_STAT_DD) {
935 struct sk_buff *skb;
936 u8 status;
937
938 if (*work_done >= work_to_do)
939 break;
940 (*work_done)++;
941
942 status = rx_desc->status;
943 skb = buffer_info->skb;
944 buffer_info->skb = NULL;
945
946 ++i;
947 if (i == rx_ring->count)
948 i = 0;
949 next_rxd = E1000_RX_DESC(*rx_ring, i);
950 prefetch(next_rxd);
951
952 next_buffer = &rx_ring->buffer_info[i];
953
954 cleaned = true;
955 cleaned_count++;
956 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
957 PCI_DMA_FROMDEVICE);
958 buffer_info->dma = 0;
959
960 length = le16_to_cpu(rx_desc->length);
961
962 /* errors is only valid for DD + EOP descriptors */
963 if (unlikely((status & E1000_RXD_STAT_EOP) &&
964 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
965 /* recycle both page and skb */
966 buffer_info->skb = skb;
967 /* an error means any chain goes out the window
968 * too */
969 if (rx_ring->rx_skb_top)
970 dev_kfree_skb(rx_ring->rx_skb_top);
971 rx_ring->rx_skb_top = NULL;
972 goto next_desc;
973 }
974
975#define rxtop rx_ring->rx_skb_top
976 if (!(status & E1000_RXD_STAT_EOP)) {
977 /* this descriptor is only the beginning (or middle) */
978 if (!rxtop) {
979 /* this is the beginning of a chain */
980 rxtop = skb;
981 skb_fill_page_desc(rxtop, 0, buffer_info->page,
982 0, length);
983 } else {
984 /* this is the middle of a chain */
985 skb_fill_page_desc(rxtop,
986 skb_shinfo(rxtop)->nr_frags,
987 buffer_info->page, 0, length);
988 /* re-use the skb, only consumed the page */
989 buffer_info->skb = skb;
990 }
991 e1000_consume_page(buffer_info, rxtop, length);
992 goto next_desc;
993 } else {
994 if (rxtop) {
995 /* end of the chain */
996 skb_fill_page_desc(rxtop,
997 skb_shinfo(rxtop)->nr_frags,
998 buffer_info->page, 0, length);
999 /* re-use the current skb, we only consumed the
1000 * page */
1001 buffer_info->skb = skb;
1002 skb = rxtop;
1003 rxtop = NULL;
1004 e1000_consume_page(buffer_info, skb, length);
1005 } else {
1006 /* no chain, got EOP, this buf is the packet
1007 * copybreak to save the put_page/alloc_page */
1008 if (length <= copybreak &&
1009 skb_tailroom(skb) >= length) {
1010 u8 *vaddr;
1011 vaddr = kmap_atomic(buffer_info->page,
1012 KM_SKB_DATA_SOFTIRQ);
1013 memcpy(skb_tail_pointer(skb), vaddr,
1014 length);
1015 kunmap_atomic(vaddr,
1016 KM_SKB_DATA_SOFTIRQ);
1017 /* re-use the page, so don't erase
1018 * buffer_info->page */
1019 skb_put(skb, length);
1020 } else {
1021 skb_fill_page_desc(skb, 0,
1022 buffer_info->page, 0,
1023 length);
1024 e1000_consume_page(buffer_info, skb,
1025 length);
1026 }
1027 }
1028 }
1029
1030 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1031 e1000_rx_checksum(adapter,
1032 (u32)(status) |
1033 ((u32)(rx_desc->errors) << 24),
1034 le16_to_cpu(rx_desc->csum), skb);
1035
1036 /* probably a little skewed due to removing CRC */
1037 total_rx_bytes += skb->len;
1038 total_rx_packets++;
1039
1040 /* eth type trans needs skb->data to point to something */
1041 if (!pskb_may_pull(skb, ETH_HLEN)) {
44defeb3 1042 e_err("pskb_may_pull failed.\n");
97ac8cae
BA
1043 dev_kfree_skb(skb);
1044 goto next_desc;
1045 }
1046
1047 e1000_receive_skb(adapter, netdev, skb, status,
1048 rx_desc->special);
1049
1050next_desc:
1051 rx_desc->status = 0;
1052
1053 /* return some buffers to hardware, one at a time is too slow */
1054 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1055 adapter->alloc_rx_buf(adapter, cleaned_count);
1056 cleaned_count = 0;
1057 }
1058
1059 /* use prefetched values */
1060 rx_desc = next_rxd;
1061 buffer_info = next_buffer;
1062 }
1063 rx_ring->next_to_clean = i;
1064
1065 cleaned_count = e1000_desc_unused(rx_ring);
1066 if (cleaned_count)
1067 adapter->alloc_rx_buf(adapter, cleaned_count);
1068
1069 adapter->total_rx_bytes += total_rx_bytes;
1070 adapter->total_rx_packets += total_rx_packets;
7274c20f
AK
1071 netdev->stats.rx_bytes += total_rx_bytes;
1072 netdev->stats.rx_packets += total_rx_packets;
97ac8cae
BA
1073 return cleaned;
1074}
1075
bc7f75fa
AK
1076/**
1077 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1078 * @adapter: board private structure
1079 **/
1080static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1081{
1082 struct e1000_ring *rx_ring = adapter->rx_ring;
1083 struct e1000_buffer *buffer_info;
1084 struct e1000_ps_page *ps_page;
1085 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1086 unsigned int i, j;
1087
1088 /* Free all the Rx ring sk_buffs */
1089 for (i = 0; i < rx_ring->count; i++) {
1090 buffer_info = &rx_ring->buffer_info[i];
1091 if (buffer_info->dma) {
1092 if (adapter->clean_rx == e1000_clean_rx_irq)
1093 pci_unmap_single(pdev, buffer_info->dma,
1094 adapter->rx_buffer_len,
1095 PCI_DMA_FROMDEVICE);
97ac8cae
BA
1096 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1097 pci_unmap_page(pdev, buffer_info->dma,
1098 PAGE_SIZE,
1099 PCI_DMA_FROMDEVICE);
bc7f75fa
AK
1100 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1101 pci_unmap_single(pdev, buffer_info->dma,
1102 adapter->rx_ps_bsize0,
1103 PCI_DMA_FROMDEVICE);
1104 buffer_info->dma = 0;
1105 }
1106
97ac8cae
BA
1107 if (buffer_info->page) {
1108 put_page(buffer_info->page);
1109 buffer_info->page = NULL;
1110 }
1111
bc7f75fa
AK
1112 if (buffer_info->skb) {
1113 dev_kfree_skb(buffer_info->skb);
1114 buffer_info->skb = NULL;
1115 }
1116
1117 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40 1118 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
1119 if (!ps_page->page)
1120 break;
1121 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1122 PCI_DMA_FROMDEVICE);
1123 ps_page->dma = 0;
1124 put_page(ps_page->page);
1125 ps_page->page = NULL;
1126 }
1127 }
1128
1129 /* there also may be some cached data from a chained receive */
1130 if (rx_ring->rx_skb_top) {
1131 dev_kfree_skb(rx_ring->rx_skb_top);
1132 rx_ring->rx_skb_top = NULL;
1133 }
1134
bc7f75fa
AK
1135 /* Zero out the descriptor ring */
1136 memset(rx_ring->desc, 0, rx_ring->size);
1137
1138 rx_ring->next_to_clean = 0;
1139 rx_ring->next_to_use = 0;
b94b5028 1140 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
1141
1142 writel(0, adapter->hw.hw_addr + rx_ring->head);
1143 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1144}
1145
a8f88ff5
JB
1146static void e1000e_downshift_workaround(struct work_struct *work)
1147{
1148 struct e1000_adapter *adapter = container_of(work,
1149 struct e1000_adapter, downshift_task);
1150
1151 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1152}
1153
bc7f75fa
AK
1154/**
1155 * e1000_intr_msi - Interrupt Handler
1156 * @irq: interrupt number
1157 * @data: pointer to a network interface device structure
1158 **/
1159static irqreturn_t e1000_intr_msi(int irq, void *data)
1160{
1161 struct net_device *netdev = data;
1162 struct e1000_adapter *adapter = netdev_priv(netdev);
1163 struct e1000_hw *hw = &adapter->hw;
1164 u32 icr = er32(ICR);
1165
ad68076e
BA
1166 /*
1167 * read ICR disables interrupts using IAM
1168 */
bc7f75fa 1169
573cca8c 1170 if (icr & E1000_ICR_LSC) {
bc7f75fa 1171 hw->mac.get_link_status = 1;
ad68076e
BA
1172 /*
1173 * ICH8 workaround-- Call gig speed drop workaround on cable
1174 * disconnect (LSC) before accessing any PHY registers
1175 */
bc7f75fa
AK
1176 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1177 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1178 schedule_work(&adapter->downshift_task);
bc7f75fa 1179
ad68076e
BA
1180 /*
1181 * 80003ES2LAN workaround-- For packet buffer work-around on
bc7f75fa 1182 * link down event; disable receives here in the ISR and reset
ad68076e
BA
1183 * adapter in watchdog
1184 */
bc7f75fa
AK
1185 if (netif_carrier_ok(netdev) &&
1186 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1187 /* disable receives */
1188 u32 rctl = er32(RCTL);
1189 ew32(RCTL, rctl & ~E1000_RCTL_EN);
318a94d6 1190 adapter->flags |= FLAG_RX_RESTART_NOW;
bc7f75fa
AK
1191 }
1192 /* guard against interrupt when we're going down */
1193 if (!test_bit(__E1000_DOWN, &adapter->state))
1194 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1195 }
1196
288379f0 1197 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1198 adapter->total_tx_bytes = 0;
1199 adapter->total_tx_packets = 0;
1200 adapter->total_rx_bytes = 0;
1201 adapter->total_rx_packets = 0;
288379f0 1202 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1203 }
1204
1205 return IRQ_HANDLED;
1206}
1207
1208/**
1209 * e1000_intr - Interrupt Handler
1210 * @irq: interrupt number
1211 * @data: pointer to a network interface device structure
1212 **/
1213static irqreturn_t e1000_intr(int irq, void *data)
1214{
1215 struct net_device *netdev = data;
1216 struct e1000_adapter *adapter = netdev_priv(netdev);
1217 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 1218 u32 rctl, icr = er32(ICR);
4662e82b 1219
a68ea775 1220 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
bc7f75fa
AK
1221 return IRQ_NONE; /* Not our interrupt */
1222
ad68076e
BA
1223 /*
1224 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1225 * not set, then the adapter didn't send an interrupt
1226 */
bc7f75fa
AK
1227 if (!(icr & E1000_ICR_INT_ASSERTED))
1228 return IRQ_NONE;
1229
ad68076e
BA
1230 /*
1231 * Interrupt Auto-Mask...upon reading ICR,
1232 * interrupts are masked. No need for the
1233 * IMC write
1234 */
bc7f75fa 1235
573cca8c 1236 if (icr & E1000_ICR_LSC) {
bc7f75fa 1237 hw->mac.get_link_status = 1;
ad68076e
BA
1238 /*
1239 * ICH8 workaround-- Call gig speed drop workaround on cable
1240 * disconnect (LSC) before accessing any PHY registers
1241 */
bc7f75fa
AK
1242 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1243 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1244 schedule_work(&adapter->downshift_task);
bc7f75fa 1245
ad68076e
BA
1246 /*
1247 * 80003ES2LAN workaround--
bc7f75fa
AK
1248 * For packet buffer work-around on link down event;
1249 * disable receives here in the ISR and
1250 * reset adapter in watchdog
1251 */
1252 if (netif_carrier_ok(netdev) &&
1253 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1254 /* disable receives */
1255 rctl = er32(RCTL);
1256 ew32(RCTL, rctl & ~E1000_RCTL_EN);
318a94d6 1257 adapter->flags |= FLAG_RX_RESTART_NOW;
bc7f75fa
AK
1258 }
1259 /* guard against interrupt when we're going down */
1260 if (!test_bit(__E1000_DOWN, &adapter->state))
1261 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1262 }
1263
288379f0 1264 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1265 adapter->total_tx_bytes = 0;
1266 adapter->total_tx_packets = 0;
1267 adapter->total_rx_bytes = 0;
1268 adapter->total_rx_packets = 0;
288379f0 1269 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1270 }
1271
1272 return IRQ_HANDLED;
1273}
1274
4662e82b
BA
1275static irqreturn_t e1000_msix_other(int irq, void *data)
1276{
1277 struct net_device *netdev = data;
1278 struct e1000_adapter *adapter = netdev_priv(netdev);
1279 struct e1000_hw *hw = &adapter->hw;
1280 u32 icr = er32(ICR);
1281
1282 if (!(icr & E1000_ICR_INT_ASSERTED)) {
a3c69fef
JB
1283 if (!test_bit(__E1000_DOWN, &adapter->state))
1284 ew32(IMS, E1000_IMS_OTHER);
4662e82b
BA
1285 return IRQ_NONE;
1286 }
1287
1288 if (icr & adapter->eiac_mask)
1289 ew32(ICS, (icr & adapter->eiac_mask));
1290
1291 if (icr & E1000_ICR_OTHER) {
1292 if (!(icr & E1000_ICR_LSC))
1293 goto no_link_interrupt;
1294 hw->mac.get_link_status = 1;
1295 /* guard against interrupt when we're going down */
1296 if (!test_bit(__E1000_DOWN, &adapter->state))
1297 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1298 }
1299
1300no_link_interrupt:
a3c69fef
JB
1301 if (!test_bit(__E1000_DOWN, &adapter->state))
1302 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
4662e82b
BA
1303
1304 return IRQ_HANDLED;
1305}
1306
1307
1308static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1309{
1310 struct net_device *netdev = data;
1311 struct e1000_adapter *adapter = netdev_priv(netdev);
1312 struct e1000_hw *hw = &adapter->hw;
1313 struct e1000_ring *tx_ring = adapter->tx_ring;
1314
1315
1316 adapter->total_tx_bytes = 0;
1317 adapter->total_tx_packets = 0;
1318
1319 if (!e1000_clean_tx_irq(adapter))
1320 /* Ring was not completely cleaned, so fire another interrupt */
1321 ew32(ICS, tx_ring->ims_val);
1322
1323 return IRQ_HANDLED;
1324}
1325
1326static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1327{
1328 struct net_device *netdev = data;
1329 struct e1000_adapter *adapter = netdev_priv(netdev);
1330
1331 /* Write the ITR value calculated at the end of the
1332 * previous interrupt.
1333 */
1334 if (adapter->rx_ring->set_itr) {
1335 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1336 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1337 adapter->rx_ring->set_itr = 0;
1338 }
1339
288379f0 1340 if (napi_schedule_prep(&adapter->napi)) {
4662e82b
BA
1341 adapter->total_rx_bytes = 0;
1342 adapter->total_rx_packets = 0;
288379f0 1343 __napi_schedule(&adapter->napi);
4662e82b
BA
1344 }
1345 return IRQ_HANDLED;
1346}
1347
1348/**
1349 * e1000_configure_msix - Configure MSI-X hardware
1350 *
1351 * e1000_configure_msix sets up the hardware to properly
1352 * generate MSI-X interrupts.
1353 **/
1354static void e1000_configure_msix(struct e1000_adapter *adapter)
1355{
1356 struct e1000_hw *hw = &adapter->hw;
1357 struct e1000_ring *rx_ring = adapter->rx_ring;
1358 struct e1000_ring *tx_ring = adapter->tx_ring;
1359 int vector = 0;
1360 u32 ctrl_ext, ivar = 0;
1361
1362 adapter->eiac_mask = 0;
1363
1364 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1365 if (hw->mac.type == e1000_82574) {
1366 u32 rfctl = er32(RFCTL);
1367 rfctl |= E1000_RFCTL_ACK_DIS;
1368 ew32(RFCTL, rfctl);
1369 }
1370
1371#define E1000_IVAR_INT_ALLOC_VALID 0x8
1372 /* Configure Rx vector */
1373 rx_ring->ims_val = E1000_IMS_RXQ0;
1374 adapter->eiac_mask |= rx_ring->ims_val;
1375 if (rx_ring->itr_val)
1376 writel(1000000000 / (rx_ring->itr_val * 256),
1377 hw->hw_addr + rx_ring->itr_register);
1378 else
1379 writel(1, hw->hw_addr + rx_ring->itr_register);
1380 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1381
1382 /* Configure Tx vector */
1383 tx_ring->ims_val = E1000_IMS_TXQ0;
1384 vector++;
1385 if (tx_ring->itr_val)
1386 writel(1000000000 / (tx_ring->itr_val * 256),
1387 hw->hw_addr + tx_ring->itr_register);
1388 else
1389 writel(1, hw->hw_addr + tx_ring->itr_register);
1390 adapter->eiac_mask |= tx_ring->ims_val;
1391 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1392
1393 /* set vector for Other Causes, e.g. link changes */
1394 vector++;
1395 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1396 if (rx_ring->itr_val)
1397 writel(1000000000 / (rx_ring->itr_val * 256),
1398 hw->hw_addr + E1000_EITR_82574(vector));
1399 else
1400 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1401
1402 /* Cause Tx interrupts on every write back */
1403 ivar |= (1 << 31);
1404
1405 ew32(IVAR, ivar);
1406
1407 /* enable MSI-X PBA support */
1408 ctrl_ext = er32(CTRL_EXT);
1409 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1410
1411 /* Auto-Mask Other interrupts upon ICR read */
1412#define E1000_EIAC_MASK_82574 0x01F00000
1413 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1414 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1415 ew32(CTRL_EXT, ctrl_ext);
1416 e1e_flush();
1417}
1418
1419void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1420{
1421 if (adapter->msix_entries) {
1422 pci_disable_msix(adapter->pdev);
1423 kfree(adapter->msix_entries);
1424 adapter->msix_entries = NULL;
1425 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1426 pci_disable_msi(adapter->pdev);
1427 adapter->flags &= ~FLAG_MSI_ENABLED;
1428 }
1429
1430 return;
1431}
1432
1433/**
1434 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1435 *
1436 * Attempt to configure interrupts using the best available
1437 * capabilities of the hardware and kernel.
1438 **/
1439void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1440{
1441 int err;
1442 int numvecs, i;
1443
1444
1445 switch (adapter->int_mode) {
1446 case E1000E_INT_MODE_MSIX:
1447 if (adapter->flags & FLAG_HAS_MSIX) {
1448 numvecs = 3; /* RxQ0, TxQ0 and other */
1449 adapter->msix_entries = kcalloc(numvecs,
1450 sizeof(struct msix_entry),
1451 GFP_KERNEL);
1452 if (adapter->msix_entries) {
1453 for (i = 0; i < numvecs; i++)
1454 adapter->msix_entries[i].entry = i;
1455
1456 err = pci_enable_msix(adapter->pdev,
1457 adapter->msix_entries,
1458 numvecs);
1459 if (err == 0)
1460 return;
1461 }
1462 /* MSI-X failed, so fall through and try MSI */
1463 e_err("Failed to initialize MSI-X interrupts. "
1464 "Falling back to MSI interrupts.\n");
1465 e1000e_reset_interrupt_capability(adapter);
1466 }
1467 adapter->int_mode = E1000E_INT_MODE_MSI;
1468 /* Fall through */
1469 case E1000E_INT_MODE_MSI:
1470 if (!pci_enable_msi(adapter->pdev)) {
1471 adapter->flags |= FLAG_MSI_ENABLED;
1472 } else {
1473 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1474 e_err("Failed to initialize MSI interrupts. Falling "
1475 "back to legacy interrupts.\n");
1476 }
1477 /* Fall through */
1478 case E1000E_INT_MODE_LEGACY:
1479 /* Don't do anything; this is the system default */
1480 break;
1481 }
1482
1483 return;
1484}
1485
1486/**
1487 * e1000_request_msix - Initialize MSI-X interrupts
1488 *
1489 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1490 * kernel.
1491 **/
1492static int e1000_request_msix(struct e1000_adapter *adapter)
1493{
1494 struct net_device *netdev = adapter->netdev;
1495 int err = 0, vector = 0;
1496
1497 if (strlen(netdev->name) < (IFNAMSIZ - 5))
cb7b48f6 1498 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
4662e82b
BA
1499 else
1500 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1501 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1502 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
4662e82b
BA
1503 netdev);
1504 if (err)
1505 goto out;
1506 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1507 adapter->rx_ring->itr_val = adapter->itr;
1508 vector++;
1509
1510 if (strlen(netdev->name) < (IFNAMSIZ - 5))
cb7b48f6 1511 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
4662e82b
BA
1512 else
1513 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1514 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1515 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
4662e82b
BA
1516 netdev);
1517 if (err)
1518 goto out;
1519 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1520 adapter->tx_ring->itr_val = adapter->itr;
1521 vector++;
1522
1523 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1524 e1000_msix_other, 0, netdev->name, netdev);
4662e82b
BA
1525 if (err)
1526 goto out;
1527
1528 e1000_configure_msix(adapter);
1529 return 0;
1530out:
1531 return err;
1532}
1533
f8d59f78
BA
1534/**
1535 * e1000_request_irq - initialize interrupts
1536 *
1537 * Attempts to configure interrupts using the best available
1538 * capabilities of the hardware and kernel.
1539 **/
bc7f75fa
AK
1540static int e1000_request_irq(struct e1000_adapter *adapter)
1541{
1542 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
1543 int err;
1544
4662e82b
BA
1545 if (adapter->msix_entries) {
1546 err = e1000_request_msix(adapter);
1547 if (!err)
1548 return err;
1549 /* fall back to MSI */
1550 e1000e_reset_interrupt_capability(adapter);
1551 adapter->int_mode = E1000E_INT_MODE_MSI;
1552 e1000e_set_interrupt_capability(adapter);
bc7f75fa 1553 }
4662e82b 1554 if (adapter->flags & FLAG_MSI_ENABLED) {
a0607fd3 1555 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
4662e82b
BA
1556 netdev->name, netdev);
1557 if (!err)
1558 return err;
bc7f75fa 1559
4662e82b
BA
1560 /* fall back to legacy interrupt */
1561 e1000e_reset_interrupt_capability(adapter);
1562 adapter->int_mode = E1000E_INT_MODE_LEGACY;
bc7f75fa
AK
1563 }
1564
a0607fd3 1565 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
4662e82b
BA
1566 netdev->name, netdev);
1567 if (err)
1568 e_err("Unable to allocate interrupt, Error: %d\n", err);
1569
bc7f75fa
AK
1570 return err;
1571}
1572
1573static void e1000_free_irq(struct e1000_adapter *adapter)
1574{
1575 struct net_device *netdev = adapter->netdev;
1576
4662e82b
BA
1577 if (adapter->msix_entries) {
1578 int vector = 0;
1579
1580 free_irq(adapter->msix_entries[vector].vector, netdev);
1581 vector++;
1582
1583 free_irq(adapter->msix_entries[vector].vector, netdev);
1584 vector++;
1585
1586 /* Other Causes interrupt vector */
1587 free_irq(adapter->msix_entries[vector].vector, netdev);
1588 return;
bc7f75fa 1589 }
4662e82b
BA
1590
1591 free_irq(adapter->pdev->irq, netdev);
bc7f75fa
AK
1592}
1593
1594/**
1595 * e1000_irq_disable - Mask off interrupt generation on the NIC
1596 **/
1597static void e1000_irq_disable(struct e1000_adapter *adapter)
1598{
1599 struct e1000_hw *hw = &adapter->hw;
1600
bc7f75fa 1601 ew32(IMC, ~0);
4662e82b
BA
1602 if (adapter->msix_entries)
1603 ew32(EIAC_82574, 0);
bc7f75fa
AK
1604 e1e_flush();
1605 synchronize_irq(adapter->pdev->irq);
1606}
1607
1608/**
1609 * e1000_irq_enable - Enable default interrupt generation settings
1610 **/
1611static void e1000_irq_enable(struct e1000_adapter *adapter)
1612{
1613 struct e1000_hw *hw = &adapter->hw;
1614
4662e82b
BA
1615 if (adapter->msix_entries) {
1616 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1617 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1618 } else {
1619 ew32(IMS, IMS_ENABLE_MASK);
1620 }
74ef9c39 1621 e1e_flush();
bc7f75fa
AK
1622}
1623
1624/**
1625 * e1000_get_hw_control - get control of the h/w from f/w
1626 * @adapter: address of board private structure
1627 *
489815ce 1628 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
1629 * For ASF and Pass Through versions of f/w this means that
1630 * the driver is loaded. For AMT version (only with 82573)
1631 * of the f/w this means that the network i/f is open.
1632 **/
1633static void e1000_get_hw_control(struct e1000_adapter *adapter)
1634{
1635 struct e1000_hw *hw = &adapter->hw;
1636 u32 ctrl_ext;
1637 u32 swsm;
1638
1639 /* Let firmware know the driver has taken over */
1640 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1641 swsm = er32(SWSM);
1642 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1643 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1644 ctrl_ext = er32(CTRL_EXT);
ad68076e 1645 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
1646 }
1647}
1648
1649/**
1650 * e1000_release_hw_control - release control of the h/w to f/w
1651 * @adapter: address of board private structure
1652 *
489815ce 1653 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
1654 * For ASF and Pass Through versions of f/w this means that the
1655 * driver is no longer loaded. For AMT version (only with 82573) i
1656 * of the f/w this means that the network i/f is closed.
1657 *
1658 **/
1659static void e1000_release_hw_control(struct e1000_adapter *adapter)
1660{
1661 struct e1000_hw *hw = &adapter->hw;
1662 u32 ctrl_ext;
1663 u32 swsm;
1664
1665 /* Let firmware taken over control of h/w */
1666 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1667 swsm = er32(SWSM);
1668 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1669 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1670 ctrl_ext = er32(CTRL_EXT);
ad68076e 1671 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
1672 }
1673}
1674
bc7f75fa
AK
1675/**
1676 * @e1000_alloc_ring - allocate memory for a ring structure
1677 **/
1678static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1679 struct e1000_ring *ring)
1680{
1681 struct pci_dev *pdev = adapter->pdev;
1682
1683 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1684 GFP_KERNEL);
1685 if (!ring->desc)
1686 return -ENOMEM;
1687
1688 return 0;
1689}
1690
1691/**
1692 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1693 * @adapter: board private structure
1694 *
1695 * Return 0 on success, negative on failure
1696 **/
1697int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1698{
1699 struct e1000_ring *tx_ring = adapter->tx_ring;
1700 int err = -ENOMEM, size;
1701
1702 size = sizeof(struct e1000_buffer) * tx_ring->count;
1703 tx_ring->buffer_info = vmalloc(size);
1704 if (!tx_ring->buffer_info)
1705 goto err;
1706 memset(tx_ring->buffer_info, 0, size);
1707
1708 /* round up to nearest 4K */
1709 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1710 tx_ring->size = ALIGN(tx_ring->size, 4096);
1711
1712 err = e1000_alloc_ring_dma(adapter, tx_ring);
1713 if (err)
1714 goto err;
1715
1716 tx_ring->next_to_use = 0;
1717 tx_ring->next_to_clean = 0;
bc7f75fa
AK
1718
1719 return 0;
1720err:
1721 vfree(tx_ring->buffer_info);
44defeb3 1722 e_err("Unable to allocate memory for the transmit descriptor ring\n");
bc7f75fa
AK
1723 return err;
1724}
1725
1726/**
1727 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1728 * @adapter: board private structure
1729 *
1730 * Returns 0 on success, negative on failure
1731 **/
1732int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1733{
1734 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40
AK
1735 struct e1000_buffer *buffer_info;
1736 int i, size, desc_len, err = -ENOMEM;
bc7f75fa
AK
1737
1738 size = sizeof(struct e1000_buffer) * rx_ring->count;
1739 rx_ring->buffer_info = vmalloc(size);
1740 if (!rx_ring->buffer_info)
1741 goto err;
1742 memset(rx_ring->buffer_info, 0, size);
1743
47f44e40
AK
1744 for (i = 0; i < rx_ring->count; i++) {
1745 buffer_info = &rx_ring->buffer_info[i];
1746 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1747 sizeof(struct e1000_ps_page),
1748 GFP_KERNEL);
1749 if (!buffer_info->ps_pages)
1750 goto err_pages;
1751 }
bc7f75fa
AK
1752
1753 desc_len = sizeof(union e1000_rx_desc_packet_split);
1754
1755 /* Round up to nearest 4K */
1756 rx_ring->size = rx_ring->count * desc_len;
1757 rx_ring->size = ALIGN(rx_ring->size, 4096);
1758
1759 err = e1000_alloc_ring_dma(adapter, rx_ring);
1760 if (err)
47f44e40 1761 goto err_pages;
bc7f75fa
AK
1762
1763 rx_ring->next_to_clean = 0;
1764 rx_ring->next_to_use = 0;
1765 rx_ring->rx_skb_top = NULL;
1766
1767 return 0;
47f44e40
AK
1768
1769err_pages:
1770 for (i = 0; i < rx_ring->count; i++) {
1771 buffer_info = &rx_ring->buffer_info[i];
1772 kfree(buffer_info->ps_pages);
1773 }
bc7f75fa
AK
1774err:
1775 vfree(rx_ring->buffer_info);
44defeb3 1776 e_err("Unable to allocate memory for the transmit descriptor ring\n");
bc7f75fa
AK
1777 return err;
1778}
1779
1780/**
1781 * e1000_clean_tx_ring - Free Tx Buffers
1782 * @adapter: board private structure
1783 **/
1784static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1785{
1786 struct e1000_ring *tx_ring = adapter->tx_ring;
1787 struct e1000_buffer *buffer_info;
1788 unsigned long size;
1789 unsigned int i;
1790
1791 for (i = 0; i < tx_ring->count; i++) {
1792 buffer_info = &tx_ring->buffer_info[i];
1793 e1000_put_txbuf(adapter, buffer_info);
1794 }
1795
1796 size = sizeof(struct e1000_buffer) * tx_ring->count;
1797 memset(tx_ring->buffer_info, 0, size);
1798
1799 memset(tx_ring->desc, 0, tx_ring->size);
1800
1801 tx_ring->next_to_use = 0;
1802 tx_ring->next_to_clean = 0;
1803
1804 writel(0, adapter->hw.hw_addr + tx_ring->head);
1805 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1806}
1807
1808/**
1809 * e1000e_free_tx_resources - Free Tx Resources per Queue
1810 * @adapter: board private structure
1811 *
1812 * Free all transmit software resources
1813 **/
1814void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1815{
1816 struct pci_dev *pdev = adapter->pdev;
1817 struct e1000_ring *tx_ring = adapter->tx_ring;
1818
1819 e1000_clean_tx_ring(adapter);
1820
1821 vfree(tx_ring->buffer_info);
1822 tx_ring->buffer_info = NULL;
1823
1824 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1825 tx_ring->dma);
1826 tx_ring->desc = NULL;
1827}
1828
1829/**
1830 * e1000e_free_rx_resources - Free Rx Resources
1831 * @adapter: board private structure
1832 *
1833 * Free all receive software resources
1834 **/
1835
1836void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1837{
1838 struct pci_dev *pdev = adapter->pdev;
1839 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40 1840 int i;
bc7f75fa
AK
1841
1842 e1000_clean_rx_ring(adapter);
1843
47f44e40
AK
1844 for (i = 0; i < rx_ring->count; i++) {
1845 kfree(rx_ring->buffer_info[i].ps_pages);
1846 }
1847
bc7f75fa
AK
1848 vfree(rx_ring->buffer_info);
1849 rx_ring->buffer_info = NULL;
1850
bc7f75fa
AK
1851 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1852 rx_ring->dma);
1853 rx_ring->desc = NULL;
1854}
1855
1856/**
1857 * e1000_update_itr - update the dynamic ITR value based on statistics
489815ce
AK
1858 * @adapter: pointer to adapter
1859 * @itr_setting: current adapter->itr
1860 * @packets: the number of packets during this measurement interval
1861 * @bytes: the number of bytes during this measurement interval
1862 *
bc7f75fa
AK
1863 * Stores a new ITR value based on packets and byte
1864 * counts during the last interrupt. The advantage of per interrupt
1865 * computation is faster updates and more accurate ITR for the current
1866 * traffic pattern. Constants in this function were computed
1867 * based on theoretical maximum wire speed and thresholds were set based
1868 * on testing data as well as attempting to minimize response time
4662e82b
BA
1869 * while increasing bulk throughput. This functionality is controlled
1870 * by the InterruptThrottleRate module parameter.
bc7f75fa
AK
1871 **/
1872static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1873 u16 itr_setting, int packets,
1874 int bytes)
1875{
1876 unsigned int retval = itr_setting;
1877
1878 if (packets == 0)
1879 goto update_itr_done;
1880
1881 switch (itr_setting) {
1882 case lowest_latency:
1883 /* handle TSO and jumbo frames */
1884 if (bytes/packets > 8000)
1885 retval = bulk_latency;
1886 else if ((packets < 5) && (bytes > 512)) {
1887 retval = low_latency;
1888 }
1889 break;
1890 case low_latency: /* 50 usec aka 20000 ints/s */
1891 if (bytes > 10000) {
1892 /* this if handles the TSO accounting */
1893 if (bytes/packets > 8000) {
1894 retval = bulk_latency;
1895 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
1896 retval = bulk_latency;
1897 } else if ((packets > 35)) {
1898 retval = lowest_latency;
1899 }
1900 } else if (bytes/packets > 2000) {
1901 retval = bulk_latency;
1902 } else if (packets <= 2 && bytes < 512) {
1903 retval = lowest_latency;
1904 }
1905 break;
1906 case bulk_latency: /* 250 usec aka 4000 ints/s */
1907 if (bytes > 25000) {
1908 if (packets > 35) {
1909 retval = low_latency;
1910 }
1911 } else if (bytes < 6000) {
1912 retval = low_latency;
1913 }
1914 break;
1915 }
1916
1917update_itr_done:
1918 return retval;
1919}
1920
1921static void e1000_set_itr(struct e1000_adapter *adapter)
1922{
1923 struct e1000_hw *hw = &adapter->hw;
1924 u16 current_itr;
1925 u32 new_itr = adapter->itr;
1926
1927 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1928 if (adapter->link_speed != SPEED_1000) {
1929 current_itr = 0;
1930 new_itr = 4000;
1931 goto set_itr_now;
1932 }
1933
1934 adapter->tx_itr = e1000_update_itr(adapter,
1935 adapter->tx_itr,
1936 adapter->total_tx_packets,
1937 adapter->total_tx_bytes);
1938 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1939 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1940 adapter->tx_itr = low_latency;
1941
1942 adapter->rx_itr = e1000_update_itr(adapter,
1943 adapter->rx_itr,
1944 adapter->total_rx_packets,
1945 adapter->total_rx_bytes);
1946 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1947 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1948 adapter->rx_itr = low_latency;
1949
1950 current_itr = max(adapter->rx_itr, adapter->tx_itr);
1951
1952 switch (current_itr) {
1953 /* counts and packets in update_itr are dependent on these numbers */
1954 case lowest_latency:
1955 new_itr = 70000;
1956 break;
1957 case low_latency:
1958 new_itr = 20000; /* aka hwitr = ~200 */
1959 break;
1960 case bulk_latency:
1961 new_itr = 4000;
1962 break;
1963 default:
1964 break;
1965 }
1966
1967set_itr_now:
1968 if (new_itr != adapter->itr) {
ad68076e
BA
1969 /*
1970 * this attempts to bias the interrupt rate towards Bulk
bc7f75fa 1971 * by adding intermediate steps when interrupt rate is
ad68076e
BA
1972 * increasing
1973 */
bc7f75fa
AK
1974 new_itr = new_itr > adapter->itr ?
1975 min(adapter->itr + (new_itr >> 2), new_itr) :
1976 new_itr;
1977 adapter->itr = new_itr;
4662e82b
BA
1978 adapter->rx_ring->itr_val = new_itr;
1979 if (adapter->msix_entries)
1980 adapter->rx_ring->set_itr = 1;
1981 else
1982 ew32(ITR, 1000000000 / (new_itr * 256));
bc7f75fa
AK
1983 }
1984}
1985
4662e82b
BA
1986/**
1987 * e1000_alloc_queues - Allocate memory for all rings
1988 * @adapter: board private structure to initialize
1989 **/
1990static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1991{
1992 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1993 if (!adapter->tx_ring)
1994 goto err;
1995
1996 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1997 if (!adapter->rx_ring)
1998 goto err;
1999
2000 return 0;
2001err:
2002 e_err("Unable to allocate memory for queues\n");
2003 kfree(adapter->rx_ring);
2004 kfree(adapter->tx_ring);
2005 return -ENOMEM;
2006}
2007
bc7f75fa
AK
2008/**
2009 * e1000_clean - NAPI Rx polling callback
ad68076e 2010 * @napi: struct associated with this polling callback
489815ce 2011 * @budget: amount of packets driver is allowed to process this poll
bc7f75fa
AK
2012 **/
2013static int e1000_clean(struct napi_struct *napi, int budget)
2014{
2015 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
4662e82b 2016 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 2017 struct net_device *poll_dev = adapter->netdev;
679e8a0f 2018 int tx_cleaned = 1, work_done = 0;
bc7f75fa 2019
4cf1653a 2020 adapter = netdev_priv(poll_dev);
bc7f75fa 2021
4662e82b
BA
2022 if (adapter->msix_entries &&
2023 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2024 goto clean_rx;
2025
92af3e95 2026 tx_cleaned = e1000_clean_tx_irq(adapter);
bc7f75fa 2027
4662e82b 2028clean_rx:
bc7f75fa 2029 adapter->clean_rx(adapter, &work_done, budget);
d2c7ddd6 2030
12d04a3c 2031 if (!tx_cleaned)
d2c7ddd6 2032 work_done = budget;
bc7f75fa 2033
53e52c72
DM
2034 /* If budget not fully consumed, exit the polling mode */
2035 if (work_done < budget) {
bc7f75fa
AK
2036 if (adapter->itr_setting & 3)
2037 e1000_set_itr(adapter);
288379f0 2038 napi_complete(napi);
a3c69fef
JB
2039 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2040 if (adapter->msix_entries)
2041 ew32(IMS, adapter->rx_ring->ims_val);
2042 else
2043 e1000_irq_enable(adapter);
2044 }
bc7f75fa
AK
2045 }
2046
2047 return work_done;
2048}
2049
2050static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2051{
2052 struct e1000_adapter *adapter = netdev_priv(netdev);
2053 struct e1000_hw *hw = &adapter->hw;
2054 u32 vfta, index;
2055
2056 /* don't update vlan cookie if already programmed */
2057 if ((adapter->hw.mng_cookie.status &
2058 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2059 (vid == adapter->mng_vlan_id))
2060 return;
caaddaf8 2061
bc7f75fa 2062 /* add VID to filter table */
caaddaf8
BA
2063 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2064 index = (vid >> 5) & 0x7F;
2065 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2066 vfta |= (1 << (vid & 0x1F));
2067 hw->mac.ops.write_vfta(hw, index, vfta);
2068 }
bc7f75fa
AK
2069}
2070
2071static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2072{
2073 struct e1000_adapter *adapter = netdev_priv(netdev);
2074 struct e1000_hw *hw = &adapter->hw;
2075 u32 vfta, index;
2076
74ef9c39
JB
2077 if (!test_bit(__E1000_DOWN, &adapter->state))
2078 e1000_irq_disable(adapter);
bc7f75fa 2079 vlan_group_set_device(adapter->vlgrp, vid, NULL);
74ef9c39
JB
2080
2081 if (!test_bit(__E1000_DOWN, &adapter->state))
2082 e1000_irq_enable(adapter);
bc7f75fa
AK
2083
2084 if ((adapter->hw.mng_cookie.status &
2085 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2086 (vid == adapter->mng_vlan_id)) {
2087 /* release control to f/w */
2088 e1000_release_hw_control(adapter);
2089 return;
2090 }
2091
2092 /* remove VID from filter table */
caaddaf8
BA
2093 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2094 index = (vid >> 5) & 0x7F;
2095 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2096 vfta &= ~(1 << (vid & 0x1F));
2097 hw->mac.ops.write_vfta(hw, index, vfta);
2098 }
bc7f75fa
AK
2099}
2100
2101static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2102{
2103 struct net_device *netdev = adapter->netdev;
2104 u16 vid = adapter->hw.mng_cookie.vlan_id;
2105 u16 old_vid = adapter->mng_vlan_id;
2106
2107 if (!adapter->vlgrp)
2108 return;
2109
2110 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2111 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2112 if (adapter->hw.mng_cookie.status &
2113 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2114 e1000_vlan_rx_add_vid(netdev, vid);
2115 adapter->mng_vlan_id = vid;
2116 }
2117
2118 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2119 (vid != old_vid) &&
2120 !vlan_group_get_device(adapter->vlgrp, old_vid))
2121 e1000_vlan_rx_kill_vid(netdev, old_vid);
2122 } else {
2123 adapter->mng_vlan_id = vid;
2124 }
2125}
2126
2127
2128static void e1000_vlan_rx_register(struct net_device *netdev,
2129 struct vlan_group *grp)
2130{
2131 struct e1000_adapter *adapter = netdev_priv(netdev);
2132 struct e1000_hw *hw = &adapter->hw;
2133 u32 ctrl, rctl;
2134
74ef9c39
JB
2135 if (!test_bit(__E1000_DOWN, &adapter->state))
2136 e1000_irq_disable(adapter);
bc7f75fa
AK
2137 adapter->vlgrp = grp;
2138
2139 if (grp) {
2140 /* enable VLAN tag insert/strip */
2141 ctrl = er32(CTRL);
2142 ctrl |= E1000_CTRL_VME;
2143 ew32(CTRL, ctrl);
2144
2145 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2146 /* enable VLAN receive filtering */
2147 rctl = er32(RCTL);
bc7f75fa
AK
2148 rctl &= ~E1000_RCTL_CFIEN;
2149 ew32(RCTL, rctl);
2150 e1000_update_mng_vlan(adapter);
2151 }
2152 } else {
2153 /* disable VLAN tag insert/strip */
2154 ctrl = er32(CTRL);
2155 ctrl &= ~E1000_CTRL_VME;
2156 ew32(CTRL, ctrl);
2157
2158 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
bc7f75fa
AK
2159 if (adapter->mng_vlan_id !=
2160 (u16)E1000_MNG_VLAN_NONE) {
2161 e1000_vlan_rx_kill_vid(netdev,
2162 adapter->mng_vlan_id);
2163 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2164 }
2165 }
2166 }
2167
74ef9c39
JB
2168 if (!test_bit(__E1000_DOWN, &adapter->state))
2169 e1000_irq_enable(adapter);
bc7f75fa
AK
2170}
2171
2172static void e1000_restore_vlan(struct e1000_adapter *adapter)
2173{
2174 u16 vid;
2175
2176 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2177
2178 if (!adapter->vlgrp)
2179 return;
2180
2181 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2182 if (!vlan_group_get_device(adapter->vlgrp, vid))
2183 continue;
2184 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2185 }
2186}
2187
2188static void e1000_init_manageability(struct e1000_adapter *adapter)
2189{
2190 struct e1000_hw *hw = &adapter->hw;
2191 u32 manc, manc2h;
2192
2193 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2194 return;
2195
2196 manc = er32(MANC);
2197
ad68076e
BA
2198 /*
2199 * enable receiving management packets to the host. this will probably
bc7f75fa 2200 * generate destination unreachable messages from the host OS, but
ad68076e
BA
2201 * the packets will be handled on SMBUS
2202 */
bc7f75fa
AK
2203 manc |= E1000_MANC_EN_MNG2HOST;
2204 manc2h = er32(MANC2H);
2205#define E1000_MNG2HOST_PORT_623 (1 << 5)
2206#define E1000_MNG2HOST_PORT_664 (1 << 6)
2207 manc2h |= E1000_MNG2HOST_PORT_623;
2208 manc2h |= E1000_MNG2HOST_PORT_664;
2209 ew32(MANC2H, manc2h);
2210 ew32(MANC, manc);
2211}
2212
2213/**
2214 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2215 * @adapter: board private structure
2216 *
2217 * Configure the Tx unit of the MAC after a reset.
2218 **/
2219static void e1000_configure_tx(struct e1000_adapter *adapter)
2220{
2221 struct e1000_hw *hw = &adapter->hw;
2222 struct e1000_ring *tx_ring = adapter->tx_ring;
2223 u64 tdba;
2224 u32 tdlen, tctl, tipg, tarc;
2225 u32 ipgr1, ipgr2;
2226
2227 /* Setup the HW Tx Head and Tail descriptor pointers */
2228 tdba = tx_ring->dma;
2229 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
284901a9 2230 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
bc7f75fa
AK
2231 ew32(TDBAH, (tdba >> 32));
2232 ew32(TDLEN, tdlen);
2233 ew32(TDH, 0);
2234 ew32(TDT, 0);
2235 tx_ring->head = E1000_TDH;
2236 tx_ring->tail = E1000_TDT;
2237
2238 /* Set the default values for the Tx Inter Packet Gap timer */
2239 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2240 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2241 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2242
2243 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2244 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2245
2246 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2247 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2248 ew32(TIPG, tipg);
2249
2250 /* Set the Tx Interrupt Delay register */
2251 ew32(TIDV, adapter->tx_int_delay);
ad68076e 2252 /* Tx irq moderation */
bc7f75fa
AK
2253 ew32(TADV, adapter->tx_abs_int_delay);
2254
2255 /* Program the Transmit Control Register */
2256 tctl = er32(TCTL);
2257 tctl &= ~E1000_TCTL_CT;
2258 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2259 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2260
2261 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
e9ec2c0f 2262 tarc = er32(TARC(0));
ad68076e
BA
2263 /*
2264 * set the speed mode bit, we'll clear it if we're not at
2265 * gigabit link later
2266 */
bc7f75fa
AK
2267#define SPEED_MODE_BIT (1 << 21)
2268 tarc |= SPEED_MODE_BIT;
e9ec2c0f 2269 ew32(TARC(0), tarc);
bc7f75fa
AK
2270 }
2271
2272 /* errata: program both queues to unweighted RR */
2273 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
e9ec2c0f 2274 tarc = er32(TARC(0));
bc7f75fa 2275 tarc |= 1;
e9ec2c0f
JK
2276 ew32(TARC(0), tarc);
2277 tarc = er32(TARC(1));
bc7f75fa 2278 tarc |= 1;
e9ec2c0f 2279 ew32(TARC(1), tarc);
bc7f75fa
AK
2280 }
2281
bc7f75fa
AK
2282 /* Setup Transmit Descriptor Settings for eop descriptor */
2283 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2284
2285 /* only set IDE if we are delaying interrupts using the timers */
2286 if (adapter->tx_int_delay)
2287 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2288
2289 /* enable Report Status bit */
2290 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2291
2292 ew32(TCTL, tctl);
2293
edfea6e6 2294 e1000e_config_collision_dist(hw);
bc7f75fa
AK
2295}
2296
2297/**
2298 * e1000_setup_rctl - configure the receive control registers
2299 * @adapter: Board private structure
2300 **/
2301#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2302 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2303static void e1000_setup_rctl(struct e1000_adapter *adapter)
2304{
2305 struct e1000_hw *hw = &adapter->hw;
2306 u32 rctl, rfctl;
2307 u32 psrctl = 0;
2308 u32 pages = 0;
2309
2310 /* Program MC offset vector base */
2311 rctl = er32(RCTL);
2312 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2313 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2314 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2315 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2316
2317 /* Do not Store bad packets */
2318 rctl &= ~E1000_RCTL_SBP;
2319
2320 /* Enable Long Packet receive */
2321 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2322 rctl &= ~E1000_RCTL_LPE;
2323 else
2324 rctl |= E1000_RCTL_LPE;
2325
eb7c3adb
JK
2326 /* Some systems expect that the CRC is included in SMBUS traffic. The
2327 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2328 * host memory when this is enabled
2329 */
2330 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2331 rctl |= E1000_RCTL_SECRC;
5918bd88 2332
a4f58f54
BA
2333 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2334 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2335 u16 phy_data;
2336
2337 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2338 phy_data &= 0xfff8;
2339 phy_data |= (1 << 2);
2340 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2341
2342 e1e_rphy(hw, 22, &phy_data);
2343 phy_data &= 0x0fff;
2344 phy_data |= (1 << 14);
2345 e1e_wphy(hw, 0x10, 0x2823);
2346 e1e_wphy(hw, 0x11, 0x0003);
2347 e1e_wphy(hw, 22, phy_data);
2348 }
2349
bc7f75fa
AK
2350 /* Setup buffer sizes */
2351 rctl &= ~E1000_RCTL_SZ_4096;
2352 rctl |= E1000_RCTL_BSEX;
2353 switch (adapter->rx_buffer_len) {
bc7f75fa
AK
2354 case 2048:
2355 default:
2356 rctl |= E1000_RCTL_SZ_2048;
2357 rctl &= ~E1000_RCTL_BSEX;
2358 break;
2359 case 4096:
2360 rctl |= E1000_RCTL_SZ_4096;
2361 break;
2362 case 8192:
2363 rctl |= E1000_RCTL_SZ_8192;
2364 break;
2365 case 16384:
2366 rctl |= E1000_RCTL_SZ_16384;
2367 break;
2368 }
2369
2370 /*
2371 * 82571 and greater support packet-split where the protocol
2372 * header is placed in skb->data and the packet data is
2373 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2374 * In the case of a non-split, skb->data is linearly filled,
2375 * followed by the page buffers. Therefore, skb->data is
2376 * sized to hold the largest protocol header.
2377 *
2378 * allocations using alloc_page take too long for regular MTU
2379 * so only enable packet split for jumbo frames
2380 *
2381 * Using pages when the page size is greater than 16k wastes
2382 * a lot of memory, since we allocate 3 pages at all times
2383 * per packet.
2384 */
bc7f75fa 2385 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
97ac8cae
BA
2386 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2387 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
bc7f75fa 2388 adapter->rx_ps_pages = pages;
97ac8cae
BA
2389 else
2390 adapter->rx_ps_pages = 0;
bc7f75fa
AK
2391
2392 if (adapter->rx_ps_pages) {
2393 /* Configure extra packet-split registers */
2394 rfctl = er32(RFCTL);
2395 rfctl |= E1000_RFCTL_EXTEN;
ad68076e
BA
2396 /*
2397 * disable packet split support for IPv6 extension headers,
2398 * because some malformed IPv6 headers can hang the Rx
2399 */
bc7f75fa
AK
2400 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2401 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2402
2403 ew32(RFCTL, rfctl);
2404
140a7480
AK
2405 /* Enable Packet split descriptors */
2406 rctl |= E1000_RCTL_DTYP_PS;
bc7f75fa
AK
2407
2408 psrctl |= adapter->rx_ps_bsize0 >>
2409 E1000_PSRCTL_BSIZE0_SHIFT;
2410
2411 switch (adapter->rx_ps_pages) {
2412 case 3:
2413 psrctl |= PAGE_SIZE <<
2414 E1000_PSRCTL_BSIZE3_SHIFT;
2415 case 2:
2416 psrctl |= PAGE_SIZE <<
2417 E1000_PSRCTL_BSIZE2_SHIFT;
2418 case 1:
2419 psrctl |= PAGE_SIZE >>
2420 E1000_PSRCTL_BSIZE1_SHIFT;
2421 break;
2422 }
2423
2424 ew32(PSRCTL, psrctl);
2425 }
2426
2427 ew32(RCTL, rctl);
318a94d6
JK
2428 /* just started the receive unit, no need to restart */
2429 adapter->flags &= ~FLAG_RX_RESTART_NOW;
bc7f75fa
AK
2430}
2431
2432/**
2433 * e1000_configure_rx - Configure Receive Unit after Reset
2434 * @adapter: board private structure
2435 *
2436 * Configure the Rx unit of the MAC after a reset.
2437 **/
2438static void e1000_configure_rx(struct e1000_adapter *adapter)
2439{
2440 struct e1000_hw *hw = &adapter->hw;
2441 struct e1000_ring *rx_ring = adapter->rx_ring;
2442 u64 rdba;
2443 u32 rdlen, rctl, rxcsum, ctrl_ext;
2444
2445 if (adapter->rx_ps_pages) {
2446 /* this is a 32 byte descriptor */
2447 rdlen = rx_ring->count *
2448 sizeof(union e1000_rx_desc_packet_split);
2449 adapter->clean_rx = e1000_clean_rx_irq_ps;
2450 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
97ac8cae
BA
2451 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2452 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2453 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2454 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
bc7f75fa 2455 } else {
97ac8cae 2456 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
bc7f75fa
AK
2457 adapter->clean_rx = e1000_clean_rx_irq;
2458 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2459 }
2460
2461 /* disable receives while setting up the descriptors */
2462 rctl = er32(RCTL);
2463 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2464 e1e_flush();
2465 msleep(10);
2466
2467 /* set the Receive Delay Timer Register */
2468 ew32(RDTR, adapter->rx_int_delay);
2469
2470 /* irq moderation */
2471 ew32(RADV, adapter->rx_abs_int_delay);
2472 if (adapter->itr_setting != 0)
ad68076e 2473 ew32(ITR, 1000000000 / (adapter->itr * 256));
bc7f75fa
AK
2474
2475 ctrl_ext = er32(CTRL_EXT);
bc7f75fa
AK
2476 /* Auto-Mask interrupts upon ICR access */
2477 ctrl_ext |= E1000_CTRL_EXT_IAME;
2478 ew32(IAM, 0xffffffff);
2479 ew32(CTRL_EXT, ctrl_ext);
2480 e1e_flush();
2481
ad68076e
BA
2482 /*
2483 * Setup the HW Rx Head and Tail Descriptor Pointers and
2484 * the Base and Length of the Rx Descriptor Ring
2485 */
bc7f75fa 2486 rdba = rx_ring->dma;
284901a9 2487 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
bc7f75fa
AK
2488 ew32(RDBAH, (rdba >> 32));
2489 ew32(RDLEN, rdlen);
2490 ew32(RDH, 0);
2491 ew32(RDT, 0);
2492 rx_ring->head = E1000_RDH;
2493 rx_ring->tail = E1000_RDT;
2494
2495 /* Enable Receive Checksum Offload for TCP and UDP */
2496 rxcsum = er32(RXCSUM);
2497 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2498 rxcsum |= E1000_RXCSUM_TUOFL;
2499
ad68076e
BA
2500 /*
2501 * IPv4 payload checksum for UDP fragments must be
2502 * used in conjunction with packet-split.
2503 */
bc7f75fa
AK
2504 if (adapter->rx_ps_pages)
2505 rxcsum |= E1000_RXCSUM_IPPCSE;
2506 } else {
2507 rxcsum &= ~E1000_RXCSUM_TUOFL;
2508 /* no need to clear IPPCSE as it defaults to 0 */
2509 }
2510 ew32(RXCSUM, rxcsum);
2511
ad68076e
BA
2512 /*
2513 * Enable early receives on supported devices, only takes effect when
bc7f75fa 2514 * packet size is equal or larger than the specified value (in 8 byte
ad68076e
BA
2515 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2516 */
53ec5498
BA
2517 if (adapter->flags & FLAG_HAS_ERT) {
2518 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2519 u32 rxdctl = er32(RXDCTL(0));
2520 ew32(RXDCTL(0), rxdctl | 0x3);
2521 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2522 /*
2523 * With jumbo frames and early-receive enabled,
2524 * excessive C-state transition latencies result in
2525 * dropped transactions.
2526 */
2527 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2528 adapter->netdev->name, 55);
2529 } else {
2530 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2531 adapter->netdev->name,
2532 PM_QOS_DEFAULT_VALUE);
2533 }
97ac8cae 2534 }
bc7f75fa
AK
2535
2536 /* Enable Receives */
2537 ew32(RCTL, rctl);
2538}
2539
2540/**
e2de3eb6 2541 * e1000_update_mc_addr_list - Update Multicast addresses
bc7f75fa
AK
2542 * @hw: pointer to the HW structure
2543 * @mc_addr_list: array of multicast addresses to program
2544 * @mc_addr_count: number of multicast addresses to program
bc7f75fa 2545 *
ab8932f3 2546 * Updates the Multicast Table Array.
bc7f75fa 2547 * The caller must have a packed mc_addr_list of multicast addresses.
bc7f75fa 2548 **/
e2de3eb6 2549static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
ab8932f3 2550 u32 mc_addr_count)
bc7f75fa 2551{
ab8932f3 2552 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
bc7f75fa
AK
2553}
2554
2555/**
2556 * e1000_set_multi - Multicast and Promiscuous mode set
2557 * @netdev: network interface device structure
2558 *
2559 * The set_multi entry point is called whenever the multicast address
2560 * list or the network interface flags are updated. This routine is
2561 * responsible for configuring the hardware for proper multicast,
2562 * promiscuous mode, and all-multi behavior.
2563 **/
2564static void e1000_set_multi(struct net_device *netdev)
2565{
2566 struct e1000_adapter *adapter = netdev_priv(netdev);
2567 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
2568 struct dev_mc_list *mc_ptr;
2569 u8 *mta_list;
2570 u32 rctl;
2571 int i;
2572
2573 /* Check for Promiscuous and All Multicast modes */
2574
2575 rctl = er32(RCTL);
2576
2577 if (netdev->flags & IFF_PROMISC) {
2578 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02 2579 rctl &= ~E1000_RCTL_VFE;
bc7f75fa 2580 } else {
746b9f02
PM
2581 if (netdev->flags & IFF_ALLMULTI) {
2582 rctl |= E1000_RCTL_MPE;
2583 rctl &= ~E1000_RCTL_UPE;
2584 } else {
2585 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2586 }
78ed11a5 2587 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
746b9f02 2588 rctl |= E1000_RCTL_VFE;
bc7f75fa
AK
2589 }
2590
2591 ew32(RCTL, rctl);
2592
7aeef972
JP
2593 if (!netdev_mc_empty(netdev)) {
2594 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
bc7f75fa
AK
2595 if (!mta_list)
2596 return;
2597
2598 /* prepare a packed array of only addresses. */
7aeef972
JP
2599 i = 0;
2600 netdev_for_each_mc_addr(mc_ptr, netdev)
2601 memcpy(mta_list + (i++ * ETH_ALEN),
2602 mc_ptr->dmi_addr, ETH_ALEN);
bc7f75fa 2603
ab8932f3 2604 e1000_update_mc_addr_list(hw, mta_list, i);
bc7f75fa
AK
2605 kfree(mta_list);
2606 } else {
2607 /*
2608 * if we're called from probe, we might not have
2609 * anything to do here, so clear out the list
2610 */
ab8932f3 2611 e1000_update_mc_addr_list(hw, NULL, 0);
bc7f75fa
AK
2612 }
2613}
2614
2615/**
ad68076e 2616 * e1000_configure - configure the hardware for Rx and Tx
bc7f75fa
AK
2617 * @adapter: private board structure
2618 **/
2619static void e1000_configure(struct e1000_adapter *adapter)
2620{
2621 e1000_set_multi(adapter->netdev);
2622
2623 e1000_restore_vlan(adapter);
2624 e1000_init_manageability(adapter);
2625
2626 e1000_configure_tx(adapter);
2627 e1000_setup_rctl(adapter);
2628 e1000_configure_rx(adapter);
ad68076e 2629 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
bc7f75fa
AK
2630}
2631
2632/**
2633 * e1000e_power_up_phy - restore link in case the phy was powered down
2634 * @adapter: address of board private structure
2635 *
2636 * The phy may be powered down to save power and turn off link when the
2637 * driver is unloaded and wake on lan is not enabled (among others)
2638 * *** this routine MUST be followed by a call to e1000e_reset ***
2639 **/
2640void e1000e_power_up_phy(struct e1000_adapter *adapter)
2641{
17f208de
BA
2642 if (adapter->hw.phy.ops.power_up)
2643 adapter->hw.phy.ops.power_up(&adapter->hw);
bc7f75fa
AK
2644
2645 adapter->hw.mac.ops.setup_link(&adapter->hw);
2646}
2647
2648/**
2649 * e1000_power_down_phy - Power down the PHY
2650 *
17f208de
BA
2651 * Power down the PHY so no link is implied when interface is down.
2652 * The PHY cannot be powered down if management or WoL is active.
bc7f75fa
AK
2653 */
2654static void e1000_power_down_phy(struct e1000_adapter *adapter)
2655{
bc7f75fa 2656 /* WoL is enabled */
23b66e2b 2657 if (adapter->wol)
bc7f75fa
AK
2658 return;
2659
17f208de
BA
2660 if (adapter->hw.phy.ops.power_down)
2661 adapter->hw.phy.ops.power_down(&adapter->hw);
bc7f75fa
AK
2662}
2663
2664/**
2665 * e1000e_reset - bring the hardware into a known good state
2666 *
2667 * This function boots the hardware and enables some settings that
2668 * require a configuration cycle of the hardware - those cannot be
2669 * set/changed during runtime. After reset the device needs to be
ad68076e 2670 * properly configured for Rx, Tx etc.
bc7f75fa
AK
2671 */
2672void e1000e_reset(struct e1000_adapter *adapter)
2673{
2674 struct e1000_mac_info *mac = &adapter->hw.mac;
318a94d6 2675 struct e1000_fc_info *fc = &adapter->hw.fc;
bc7f75fa
AK
2676 struct e1000_hw *hw = &adapter->hw;
2677 u32 tx_space, min_tx_space, min_rx_space;
318a94d6 2678 u32 pba = adapter->pba;
bc7f75fa
AK
2679 u16 hwm;
2680
ad68076e 2681 /* reset Packet Buffer Allocation to default */
318a94d6 2682 ew32(PBA, pba);
df762464 2683
318a94d6 2684 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
ad68076e
BA
2685 /*
2686 * To maintain wire speed transmits, the Tx FIFO should be
bc7f75fa
AK
2687 * large enough to accommodate two full transmit packets,
2688 * rounded up to the next 1KB and expressed in KB. Likewise,
2689 * the Rx FIFO should be large enough to accommodate at least
2690 * one full receive packet and is similarly rounded up and
ad68076e
BA
2691 * expressed in KB.
2692 */
df762464 2693 pba = er32(PBA);
bc7f75fa 2694 /* upper 16 bits has Tx packet buffer allocation size in KB */
df762464 2695 tx_space = pba >> 16;
bc7f75fa 2696 /* lower 16 bits has Rx packet buffer allocation size in KB */
df762464 2697 pba &= 0xffff;
ad68076e
BA
2698 /*
2699 * the Tx fifo also stores 16 bytes of information about the tx
2700 * but don't include ethernet FCS because hardware appends it
318a94d6
JK
2701 */
2702 min_tx_space = (adapter->max_frame_size +
bc7f75fa
AK
2703 sizeof(struct e1000_tx_desc) -
2704 ETH_FCS_LEN) * 2;
2705 min_tx_space = ALIGN(min_tx_space, 1024);
2706 min_tx_space >>= 10;
2707 /* software strips receive CRC, so leave room for it */
318a94d6 2708 min_rx_space = adapter->max_frame_size;
bc7f75fa
AK
2709 min_rx_space = ALIGN(min_rx_space, 1024);
2710 min_rx_space >>= 10;
2711
ad68076e
BA
2712 /*
2713 * If current Tx allocation is less than the min Tx FIFO size,
bc7f75fa 2714 * and the min Tx FIFO size is less than the current Rx FIFO
ad68076e
BA
2715 * allocation, take space away from current Rx allocation
2716 */
df762464
AK
2717 if ((tx_space < min_tx_space) &&
2718 ((min_tx_space - tx_space) < pba)) {
2719 pba -= min_tx_space - tx_space;
bc7f75fa 2720
ad68076e
BA
2721 /*
2722 * if short on Rx space, Rx wins and must trump tx
2723 * adjustment or use Early Receive if available
2724 */
df762464 2725 if ((pba < min_rx_space) &&
bc7f75fa
AK
2726 (!(adapter->flags & FLAG_HAS_ERT)))
2727 /* ERT enabled in e1000_configure_rx */
df762464 2728 pba = min_rx_space;
bc7f75fa 2729 }
df762464
AK
2730
2731 ew32(PBA, pba);
bc7f75fa
AK
2732 }
2733
bc7f75fa 2734
ad68076e
BA
2735 /*
2736 * flow control settings
2737 *
38eb394e 2738 * The high water mark must be low enough to fit one full frame
bc7f75fa
AK
2739 * (or the size used for early receive) above it in the Rx FIFO.
2740 * Set it to the lower of:
2741 * - 90% of the Rx FIFO size, and
2742 * - the full Rx FIFO size minus the early receive size (for parts
2743 * with ERT support assuming ERT set to E1000_ERT_2048), or
38eb394e 2744 * - the full Rx FIFO size minus one full frame
ad68076e 2745 */
38eb394e
BA
2746 if (hw->mac.type == e1000_pchlan) {
2747 /*
2748 * Workaround PCH LOM adapter hangs with certain network
2749 * loads. If hangs persist, try disabling Tx flow control.
2750 */
2751 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2752 fc->high_water = 0x3500;
2753 fc->low_water = 0x1500;
2754 } else {
2755 fc->high_water = 0x5000;
2756 fc->low_water = 0x3000;
2757 }
2758 } else {
2759 if ((adapter->flags & FLAG_HAS_ERT) &&
2760 (adapter->netdev->mtu > ETH_DATA_LEN))
2761 hwm = min(((pba << 10) * 9 / 10),
2762 ((pba << 10) - (E1000_ERT_2048 << 3)));
2763 else
2764 hwm = min(((pba << 10) * 9 / 10),
2765 ((pba << 10) - adapter->max_frame_size));
bc7f75fa 2766
38eb394e
BA
2767 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
2768 fc->low_water = fc->high_water - 8;
2769 }
bc7f75fa
AK
2770
2771 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
318a94d6 2772 fc->pause_time = 0xFFFF;
bc7f75fa 2773 else
318a94d6
JK
2774 fc->pause_time = E1000_FC_PAUSE_TIME;
2775 fc->send_xon = 1;
5c48ef3e 2776 fc->current_mode = fc->requested_mode;
bc7f75fa
AK
2777
2778 /* Allow time for pending master requests to run */
2779 mac->ops.reset_hw(hw);
97ac8cae
BA
2780
2781 /*
2782 * For parts with AMT enabled, let the firmware know
2783 * that the network interface is in control
2784 */
c43bc57e 2785 if (adapter->flags & FLAG_HAS_AMT)
97ac8cae
BA
2786 e1000_get_hw_control(adapter);
2787
bc7f75fa 2788 ew32(WUC, 0);
a4f58f54
BA
2789 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
2790 e1e_wphy(&adapter->hw, BM_WUC, 0);
bc7f75fa
AK
2791
2792 if (mac->ops.init_hw(hw))
44defeb3 2793 e_err("Hardware Error\n");
bc7f75fa 2794
38eb394e
BA
2795 /* additional part of the flow-control workaround above */
2796 if (hw->mac.type == e1000_pchlan)
2797 ew32(FCRTV_PCH, 0x1000);
2798
bc7f75fa
AK
2799 e1000_update_mng_vlan(adapter);
2800
2801 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2802 ew32(VET, ETH_P_8021Q);
2803
2804 e1000e_reset_adaptive(hw);
2805 e1000_get_phy_info(hw);
2806
918d7197
BA
2807 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
2808 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
bc7f75fa 2809 u16 phy_data = 0;
ad68076e
BA
2810 /*
2811 * speed up time to link by disabling smart power down, ignore
bc7f75fa 2812 * the return value of this function because there is nothing
ad68076e
BA
2813 * different we would do if it failed
2814 */
bc7f75fa
AK
2815 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2816 phy_data &= ~IGP02E1000_PM_SPD;
2817 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2818 }
bc7f75fa
AK
2819}
2820
2821int e1000e_up(struct e1000_adapter *adapter)
2822{
2823 struct e1000_hw *hw = &adapter->hw;
2824
53ec5498
BA
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
2828 adapter->netdev->name,
2829 PM_QOS_DEFAULT_VALUE);
2830
bc7f75fa
AK
2831 /* hardware has been reset, we need to reload some things */
2832 e1000_configure(adapter);
2833
2834 clear_bit(__E1000_DOWN, &adapter->state);
2835
2836 napi_enable(&adapter->napi);
4662e82b
BA
2837 if (adapter->msix_entries)
2838 e1000_configure_msix(adapter);
bc7f75fa
AK
2839 e1000_irq_enable(adapter);
2840
4cb9be7a
JB
2841 netif_wake_queue(adapter->netdev);
2842
bc7f75fa
AK
2843 /* fire a link change interrupt to start the watchdog */
2844 ew32(ICS, E1000_ICS_LSC);
2845 return 0;
2846}
2847
2848void e1000e_down(struct e1000_adapter *adapter)
2849{
2850 struct net_device *netdev = adapter->netdev;
2851 struct e1000_hw *hw = &adapter->hw;
2852 u32 tctl, rctl;
2853
ad68076e
BA
2854 /*
2855 * signal that we're down so the interrupt handler does not
2856 * reschedule our watchdog timer
2857 */
bc7f75fa
AK
2858 set_bit(__E1000_DOWN, &adapter->state);
2859
2860 /* disable receives in the hardware */
2861 rctl = er32(RCTL);
2862 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2863 /* flush and sleep below */
2864
4cb9be7a 2865 netif_stop_queue(netdev);
bc7f75fa
AK
2866
2867 /* disable transmits in the hardware */
2868 tctl = er32(TCTL);
2869 tctl &= ~E1000_TCTL_EN;
2870 ew32(TCTL, tctl);
2871 /* flush both disables and wait for them to finish */
2872 e1e_flush();
2873 msleep(10);
2874
2875 napi_disable(&adapter->napi);
2876 e1000_irq_disable(adapter);
2877
2878 del_timer_sync(&adapter->watchdog_timer);
2879 del_timer_sync(&adapter->phy_info_timer);
2880
bc7f75fa
AK
2881 netif_carrier_off(netdev);
2882 adapter->link_speed = 0;
2883 adapter->link_duplex = 0;
2884
52cc3086
JK
2885 if (!pci_channel_offline(adapter->pdev))
2886 e1000e_reset(adapter);
bc7f75fa
AK
2887 e1000_clean_tx_ring(adapter);
2888 e1000_clean_rx_ring(adapter);
2889
53ec5498
BA
2890 if (adapter->flags & FLAG_HAS_ERT)
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
2892 adapter->netdev->name);
2893
bc7f75fa
AK
2894 /*
2895 * TODO: for power management, we could drop the link and
2896 * pci_disable_device here.
2897 */
2898}
2899
2900void e1000e_reinit_locked(struct e1000_adapter *adapter)
2901{
2902 might_sleep();
2903 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2904 msleep(1);
2905 e1000e_down(adapter);
2906 e1000e_up(adapter);
2907 clear_bit(__E1000_RESETTING, &adapter->state);
2908}
2909
2910/**
2911 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2912 * @adapter: board private structure to initialize
2913 *
2914 * e1000_sw_init initializes the Adapter private data structure.
2915 * Fields are initialized based on PCI device information and
2916 * OS network device settings (MTU size).
2917 **/
2918static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2919{
bc7f75fa
AK
2920 struct net_device *netdev = adapter->netdev;
2921
2922 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2923 adapter->rx_ps_bsize0 = 128;
318a94d6
JK
2924 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2925 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
bc7f75fa 2926
4662e82b 2927 e1000e_set_interrupt_capability(adapter);
bc7f75fa 2928
4662e82b
BA
2929 if (e1000_alloc_queues(adapter))
2930 return -ENOMEM;
bc7f75fa 2931
bc7f75fa 2932 /* Explicitly disable IRQ since the NIC can be in any state. */
bc7f75fa
AK
2933 e1000_irq_disable(adapter);
2934
bc7f75fa
AK
2935 set_bit(__E1000_DOWN, &adapter->state);
2936 return 0;
bc7f75fa
AK
2937}
2938
f8d59f78
BA
2939/**
2940 * e1000_intr_msi_test - Interrupt Handler
2941 * @irq: interrupt number
2942 * @data: pointer to a network interface device structure
2943 **/
2944static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2945{
2946 struct net_device *netdev = data;
2947 struct e1000_adapter *adapter = netdev_priv(netdev);
2948 struct e1000_hw *hw = &adapter->hw;
2949 u32 icr = er32(ICR);
2950
3bb99fe2 2951 e_dbg("icr is %08X\n", icr);
f8d59f78
BA
2952 if (icr & E1000_ICR_RXSEQ) {
2953 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2954 wmb();
2955 }
2956
2957 return IRQ_HANDLED;
2958}
2959
2960/**
2961 * e1000_test_msi_interrupt - Returns 0 for successful test
2962 * @adapter: board private struct
2963 *
2964 * code flow taken from tg3.c
2965 **/
2966static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2967{
2968 struct net_device *netdev = adapter->netdev;
2969 struct e1000_hw *hw = &adapter->hw;
2970 int err;
2971
2972 /* poll_enable hasn't been called yet, so don't need disable */
2973 /* clear any pending events */
2974 er32(ICR);
2975
2976 /* free the real vector and request a test handler */
2977 e1000_free_irq(adapter);
4662e82b 2978 e1000e_reset_interrupt_capability(adapter);
f8d59f78
BA
2979
2980 /* Assume that the test fails, if it succeeds then the test
2981 * MSI irq handler will unset this flag */
2982 adapter->flags |= FLAG_MSI_TEST_FAILED;
2983
2984 err = pci_enable_msi(adapter->pdev);
2985 if (err)
2986 goto msi_test_failed;
2987
a0607fd3 2988 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
f8d59f78
BA
2989 netdev->name, netdev);
2990 if (err) {
2991 pci_disable_msi(adapter->pdev);
2992 goto msi_test_failed;
2993 }
2994
2995 wmb();
2996
2997 e1000_irq_enable(adapter);
2998
2999 /* fire an unusual interrupt on the test handler */
3000 ew32(ICS, E1000_ICS_RXSEQ);
3001 e1e_flush();
3002 msleep(50);
3003
3004 e1000_irq_disable(adapter);
3005
3006 rmb();
3007
3008 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4662e82b 3009 adapter->int_mode = E1000E_INT_MODE_LEGACY;
f8d59f78
BA
3010 err = -EIO;
3011 e_info("MSI interrupt test failed!\n");
3012 }
3013
3014 free_irq(adapter->pdev->irq, netdev);
3015 pci_disable_msi(adapter->pdev);
3016
3017 if (err == -EIO)
3018 goto msi_test_failed;
3019
3020 /* okay so the test worked, restore settings */
3bb99fe2 3021 e_dbg("MSI interrupt test succeeded!\n");
f8d59f78 3022msi_test_failed:
4662e82b 3023 e1000e_set_interrupt_capability(adapter);
f8d59f78
BA
3024 e1000_request_irq(adapter);
3025 return err;
3026}
3027
3028/**
3029 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3030 * @adapter: board private struct
3031 *
3032 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3033 **/
3034static int e1000_test_msi(struct e1000_adapter *adapter)
3035{
3036 int err;
3037 u16 pci_cmd;
3038
3039 if (!(adapter->flags & FLAG_MSI_ENABLED))
3040 return 0;
3041
3042 /* disable SERR in case the MSI write causes a master abort */
3043 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3044 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3045 pci_cmd & ~PCI_COMMAND_SERR);
3046
3047 err = e1000_test_msi_interrupt(adapter);
3048
3049 /* restore previous setting of command word */
3050 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3051
3052 /* success ! */
3053 if (!err)
3054 return 0;
3055
3056 /* EIO means MSI test failed */
3057 if (err != -EIO)
3058 return err;
3059
3060 /* back to INTx mode */
3061 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3062
3063 e1000_free_irq(adapter);
3064
3065 err = e1000_request_irq(adapter);
3066
3067 return err;
3068}
3069
bc7f75fa
AK
3070/**
3071 * e1000_open - Called when a network interface is made active
3072 * @netdev: network interface device structure
3073 *
3074 * Returns 0 on success, negative value on failure
3075 *
3076 * The open entry point is called when a network interface is made
3077 * active by the system (IFF_UP). At this point all resources needed
3078 * for transmit and receive operations are allocated, the interrupt
3079 * handler is registered with the OS, the watchdog timer is started,
3080 * and the stack is notified that the interface is ready.
3081 **/
3082static int e1000_open(struct net_device *netdev)
3083{
3084 struct e1000_adapter *adapter = netdev_priv(netdev);
3085 struct e1000_hw *hw = &adapter->hw;
3086 int err;
3087
3088 /* disallow open during test */
3089 if (test_bit(__E1000_TESTING, &adapter->state))
3090 return -EBUSY;
3091
9c563d20
JB
3092 netif_carrier_off(netdev);
3093
bc7f75fa
AK
3094 /* allocate transmit descriptors */
3095 err = e1000e_setup_tx_resources(adapter);
3096 if (err)
3097 goto err_setup_tx;
3098
3099 /* allocate receive descriptors */
3100 err = e1000e_setup_rx_resources(adapter);
3101 if (err)
3102 goto err_setup_rx;
3103
3104 e1000e_power_up_phy(adapter);
3105
3106 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3107 if ((adapter->hw.mng_cookie.status &
3108 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3109 e1000_update_mng_vlan(adapter);
3110
ad68076e
BA
3111 /*
3112 * If AMT is enabled, let the firmware know that the network
3113 * interface is now open
3114 */
c43bc57e 3115 if (adapter->flags & FLAG_HAS_AMT)
bc7f75fa
AK
3116 e1000_get_hw_control(adapter);
3117
ad68076e
BA
3118 /*
3119 * before we allocate an interrupt, we must be ready to handle it.
bc7f75fa
AK
3120 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3121 * as soon as we call pci_request_irq, so we have to setup our
ad68076e
BA
3122 * clean_rx handler before we do so.
3123 */
bc7f75fa
AK
3124 e1000_configure(adapter);
3125
3126 err = e1000_request_irq(adapter);
3127 if (err)
3128 goto err_req_irq;
3129
f8d59f78
BA
3130 /*
3131 * Work around PCIe errata with MSI interrupts causing some chipsets to
3132 * ignore e1000e MSI messages, which means we need to test our MSI
3133 * interrupt now
3134 */
4662e82b 3135 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
f8d59f78
BA
3136 err = e1000_test_msi(adapter);
3137 if (err) {
3138 e_err("Interrupt allocation failed\n");
3139 goto err_req_irq;
3140 }
3141 }
3142
bc7f75fa
AK
3143 /* From here on the code is the same as e1000e_up() */
3144 clear_bit(__E1000_DOWN, &adapter->state);
3145
3146 napi_enable(&adapter->napi);
3147
3148 e1000_irq_enable(adapter);
3149
4cb9be7a 3150 netif_start_queue(netdev);
d55b53ff 3151
bc7f75fa
AK
3152 /* fire a link status change interrupt to start the watchdog */
3153 ew32(ICS, E1000_ICS_LSC);
3154
3155 return 0;
3156
3157err_req_irq:
3158 e1000_release_hw_control(adapter);
3159 e1000_power_down_phy(adapter);
3160 e1000e_free_rx_resources(adapter);
3161err_setup_rx:
3162 e1000e_free_tx_resources(adapter);
3163err_setup_tx:
3164 e1000e_reset(adapter);
3165
3166 return err;
3167}
3168
3169/**
3170 * e1000_close - Disables a network interface
3171 * @netdev: network interface device structure
3172 *
3173 * Returns 0, this is not allowed to fail
3174 *
3175 * The close entry point is called when an interface is de-activated
3176 * by the OS. The hardware is still under the drivers control, but
3177 * needs to be disabled. A global MAC reset is issued to stop the
3178 * hardware, and all transmit and receive resources are freed.
3179 **/
3180static int e1000_close(struct net_device *netdev)
3181{
3182 struct e1000_adapter *adapter = netdev_priv(netdev);
3183
3184 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3185 e1000e_down(adapter);
3186 e1000_power_down_phy(adapter);
3187 e1000_free_irq(adapter);
3188
3189 e1000e_free_tx_resources(adapter);
3190 e1000e_free_rx_resources(adapter);
3191
ad68076e
BA
3192 /*
3193 * kill manageability vlan ID if supported, but not if a vlan with
3194 * the same ID is registered on the host OS (let 8021q kill it)
3195 */
bc7f75fa
AK
3196 if ((adapter->hw.mng_cookie.status &
3197 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3198 !(adapter->vlgrp &&
3199 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3200 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3201
ad68076e
BA
3202 /*
3203 * If AMT is enabled, let the firmware know that the network
3204 * interface is now closed
3205 */
c43bc57e 3206 if (adapter->flags & FLAG_HAS_AMT)
bc7f75fa
AK
3207 e1000_release_hw_control(adapter);
3208
3209 return 0;
3210}
3211/**
3212 * e1000_set_mac - Change the Ethernet Address of the NIC
3213 * @netdev: network interface device structure
3214 * @p: pointer to an address structure
3215 *
3216 * Returns 0 on success, negative on failure
3217 **/
3218static int e1000_set_mac(struct net_device *netdev, void *p)
3219{
3220 struct e1000_adapter *adapter = netdev_priv(netdev);
3221 struct sockaddr *addr = p;
3222
3223 if (!is_valid_ether_addr(addr->sa_data))
3224 return -EADDRNOTAVAIL;
3225
3226 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3227 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3228
3229 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3230
3231 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3232 /* activate the work around */
3233 e1000e_set_laa_state_82571(&adapter->hw, 1);
3234
ad68076e
BA
3235 /*
3236 * Hold a copy of the LAA in RAR[14] This is done so that
bc7f75fa
AK
3237 * between the time RAR[0] gets clobbered and the time it
3238 * gets fixed (in e1000_watchdog), the actual LAA is in one
3239 * of the RARs and no incoming packets directed to this port
3240 * are dropped. Eventually the LAA will be in RAR[0] and
ad68076e
BA
3241 * RAR[14]
3242 */
bc7f75fa
AK
3243 e1000e_rar_set(&adapter->hw,
3244 adapter->hw.mac.addr,
3245 adapter->hw.mac.rar_entry_count - 1);
3246 }
3247
3248 return 0;
3249}
3250
a8f88ff5
JB
3251/**
3252 * e1000e_update_phy_task - work thread to update phy
3253 * @work: pointer to our work struct
3254 *
3255 * this worker thread exists because we must acquire a
3256 * semaphore to read the phy, which we could msleep while
3257 * waiting for it, and we can't msleep in a timer.
3258 **/
3259static void e1000e_update_phy_task(struct work_struct *work)
3260{
3261 struct e1000_adapter *adapter = container_of(work,
3262 struct e1000_adapter, update_phy_task);
3263 e1000_get_phy_info(&adapter->hw);
3264}
3265
ad68076e
BA
3266/*
3267 * Need to wait a few seconds after link up to get diagnostic information from
3268 * the phy
3269 */
bc7f75fa
AK
3270static void e1000_update_phy_info(unsigned long data)
3271{
3272 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
a8f88ff5 3273 schedule_work(&adapter->update_phy_task);
bc7f75fa
AK
3274}
3275
3276/**
3277 * e1000e_update_stats - Update the board statistics counters
3278 * @adapter: board private structure
3279 **/
3280void e1000e_update_stats(struct e1000_adapter *adapter)
3281{
7274c20f 3282 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
3283 struct e1000_hw *hw = &adapter->hw;
3284 struct pci_dev *pdev = adapter->pdev;
a4f58f54 3285 u16 phy_data;
bc7f75fa
AK
3286
3287 /*
3288 * Prevent stats update while adapter is being reset, or if the pci
3289 * connection is down.
3290 */
3291 if (adapter->link_speed == 0)
3292 return;
3293 if (pci_channel_offline(pdev))
3294 return;
3295
bc7f75fa
AK
3296 adapter->stats.crcerrs += er32(CRCERRS);
3297 adapter->stats.gprc += er32(GPRC);
7c25769f
BA
3298 adapter->stats.gorc += er32(GORCL);
3299 er32(GORCH); /* Clear gorc */
bc7f75fa
AK
3300 adapter->stats.bprc += er32(BPRC);
3301 adapter->stats.mprc += er32(MPRC);
3302 adapter->stats.roc += er32(ROC);
3303
bc7f75fa 3304 adapter->stats.mpc += er32(MPC);
a4f58f54
BA
3305 if ((hw->phy.type == e1000_phy_82578) ||
3306 (hw->phy.type == e1000_phy_82577)) {
3307 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
29477e24
BA
3308 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3309 adapter->stats.scc += phy_data;
a4f58f54
BA
3310
3311 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
29477e24
BA
3312 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3313 adapter->stats.ecol += phy_data;
a4f58f54
BA
3314
3315 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
29477e24
BA
3316 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3317 adapter->stats.mcc += phy_data;
a4f58f54
BA
3318
3319 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
29477e24
BA
3320 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3321 adapter->stats.latecol += phy_data;
a4f58f54
BA
3322
3323 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
29477e24
BA
3324 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3325 adapter->stats.dc += phy_data;
a4f58f54
BA
3326 } else {
3327 adapter->stats.scc += er32(SCC);
3328 adapter->stats.ecol += er32(ECOL);
3329 adapter->stats.mcc += er32(MCC);
3330 adapter->stats.latecol += er32(LATECOL);
3331 adapter->stats.dc += er32(DC);
3332 }
bc7f75fa
AK
3333 adapter->stats.xonrxc += er32(XONRXC);
3334 adapter->stats.xontxc += er32(XONTXC);
3335 adapter->stats.xoffrxc += er32(XOFFRXC);
3336 adapter->stats.xofftxc += er32(XOFFTXC);
bc7f75fa 3337 adapter->stats.gptc += er32(GPTC);
7c25769f
BA
3338 adapter->stats.gotc += er32(GOTCL);
3339 er32(GOTCH); /* Clear gotc */
bc7f75fa
AK
3340 adapter->stats.rnbc += er32(RNBC);
3341 adapter->stats.ruc += er32(RUC);
bc7f75fa
AK
3342
3343 adapter->stats.mptc += er32(MPTC);
3344 adapter->stats.bptc += er32(BPTC);
3345
3346 /* used for adaptive IFS */
3347
3348 hw->mac.tx_packet_delta = er32(TPT);
3349 adapter->stats.tpt += hw->mac.tx_packet_delta;
a4f58f54
BA
3350 if ((hw->phy.type == e1000_phy_82578) ||
3351 (hw->phy.type == e1000_phy_82577)) {
3352 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
29477e24
BA
3353 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3354 hw->mac.collision_delta = phy_data;
a4f58f54
BA
3355 } else {
3356 hw->mac.collision_delta = er32(COLC);
3357 }
bc7f75fa
AK
3358 adapter->stats.colc += hw->mac.collision_delta;
3359
3360 adapter->stats.algnerrc += er32(ALGNERRC);
3361 adapter->stats.rxerrc += er32(RXERRC);
a4f58f54
BA
3362 if ((hw->phy.type == e1000_phy_82578) ||
3363 (hw->phy.type == e1000_phy_82577)) {
3364 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
29477e24
BA
3365 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3366 adapter->stats.tncrs += phy_data;
a4f58f54
BA
3367 } else {
3368 if ((hw->mac.type != e1000_82574) &&
3369 (hw->mac.type != e1000_82583))
3370 adapter->stats.tncrs += er32(TNCRS);
3371 }
bc7f75fa
AK
3372 adapter->stats.cexterr += er32(CEXTERR);
3373 adapter->stats.tsctc += er32(TSCTC);
3374 adapter->stats.tsctfc += er32(TSCTFC);
3375
bc7f75fa 3376 /* Fill out the OS statistics structure */
7274c20f
AK
3377 netdev->stats.multicast = adapter->stats.mprc;
3378 netdev->stats.collisions = adapter->stats.colc;
bc7f75fa
AK
3379
3380 /* Rx Errors */
3381
ad68076e
BA
3382 /*
3383 * RLEC on some newer hardware can be incorrect so build
3384 * our own version based on RUC and ROC
3385 */
7274c20f 3386 netdev->stats.rx_errors = adapter->stats.rxerrc +
bc7f75fa
AK
3387 adapter->stats.crcerrs + adapter->stats.algnerrc +
3388 adapter->stats.ruc + adapter->stats.roc +
3389 adapter->stats.cexterr;
7274c20f 3390 netdev->stats.rx_length_errors = adapter->stats.ruc +
bc7f75fa 3391 adapter->stats.roc;
7274c20f
AK
3392 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3393 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3394 netdev->stats.rx_missed_errors = adapter->stats.mpc;
bc7f75fa
AK
3395
3396 /* Tx Errors */
7274c20f 3397 netdev->stats.tx_errors = adapter->stats.ecol +
bc7f75fa 3398 adapter->stats.latecol;
7274c20f
AK
3399 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3400 netdev->stats.tx_window_errors = adapter->stats.latecol;
3401 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
bc7f75fa
AK
3402
3403 /* Tx Dropped needs to be maintained elsewhere */
3404
bc7f75fa
AK
3405 /* Management Stats */
3406 adapter->stats.mgptc += er32(MGTPTC);
3407 adapter->stats.mgprc += er32(MGTPRC);
3408 adapter->stats.mgpdc += er32(MGTPDC);
bc7f75fa
AK
3409}
3410
7c25769f
BA
3411/**
3412 * e1000_phy_read_status - Update the PHY register status snapshot
3413 * @adapter: board private structure
3414 **/
3415static void e1000_phy_read_status(struct e1000_adapter *adapter)
3416{
3417 struct e1000_hw *hw = &adapter->hw;
3418 struct e1000_phy_regs *phy = &adapter->phy_regs;
3419 int ret_val;
7c25769f
BA
3420
3421 if ((er32(STATUS) & E1000_STATUS_LU) &&
3422 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3423 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3424 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3425 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3426 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3427 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3428 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3429 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3430 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3431 if (ret_val)
44defeb3 3432 e_warn("Error reading PHY register\n");
7c25769f
BA
3433 } else {
3434 /*
3435 * Do not read PHY registers if link is not up
3436 * Set values to typical power-on defaults
3437 */
3438 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3439 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3440 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3441 BMSR_ERCAP);
3442 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3443 ADVERTISE_ALL | ADVERTISE_CSMA);
3444 phy->lpa = 0;
3445 phy->expansion = EXPANSION_ENABLENPAGE;
3446 phy->ctrl1000 = ADVERTISE_1000FULL;
3447 phy->stat1000 = 0;
3448 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3449 }
7c25769f
BA
3450}
3451
bc7f75fa
AK
3452static void e1000_print_link_info(struct e1000_adapter *adapter)
3453{
bc7f75fa
AK
3454 struct e1000_hw *hw = &adapter->hw;
3455 u32 ctrl = er32(CTRL);
3456
8f12fe86
BA
3457 /* Link status message must follow this format for user tools */
3458 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3459 "Flow Control: %s\n",
3460 adapter->netdev->name,
44defeb3
JK
3461 adapter->link_speed,
3462 (adapter->link_duplex == FULL_DUPLEX) ?
3463 "Full Duplex" : "Half Duplex",
3464 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3465 "RX/TX" :
3466 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3467 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
bc7f75fa
AK
3468}
3469
b405e8df 3470bool e1000e_has_link(struct e1000_adapter *adapter)
318a94d6
JK
3471{
3472 struct e1000_hw *hw = &adapter->hw;
3473 bool link_active = 0;
3474 s32 ret_val = 0;
3475
3476 /*
3477 * get_link_status is set on LSC (link status) interrupt or
3478 * Rx sequence error interrupt. get_link_status will stay
3479 * false until the check_for_link establishes link
3480 * for copper adapters ONLY
3481 */
3482 switch (hw->phy.media_type) {
3483 case e1000_media_type_copper:
3484 if (hw->mac.get_link_status) {
3485 ret_val = hw->mac.ops.check_for_link(hw);
3486 link_active = !hw->mac.get_link_status;
3487 } else {
3488 link_active = 1;
3489 }
3490 break;
3491 case e1000_media_type_fiber:
3492 ret_val = hw->mac.ops.check_for_link(hw);
3493 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
3494 break;
3495 case e1000_media_type_internal_serdes:
3496 ret_val = hw->mac.ops.check_for_link(hw);
3497 link_active = adapter->hw.mac.serdes_has_link;
3498 break;
3499 default:
3500 case e1000_media_type_unknown:
3501 break;
3502 }
3503
3504 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
3505 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
3506 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
44defeb3 3507 e_info("Gigabit has been disabled, downgrading speed\n");
318a94d6
JK
3508 }
3509
3510 return link_active;
3511}
3512
3513static void e1000e_enable_receives(struct e1000_adapter *adapter)
3514{
3515 /* make sure the receive unit is started */
3516 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
3517 (adapter->flags & FLAG_RX_RESTART_NOW)) {
3518 struct e1000_hw *hw = &adapter->hw;
3519 u32 rctl = er32(RCTL);
3520 ew32(RCTL, rctl | E1000_RCTL_EN);
3521 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3522 }
3523}
3524
bc7f75fa
AK
3525/**
3526 * e1000_watchdog - Timer Call-back
3527 * @data: pointer to adapter cast into an unsigned long
3528 **/
3529static void e1000_watchdog(unsigned long data)
3530{
3531 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3532
3533 /* Do the rest outside of interrupt context */
3534 schedule_work(&adapter->watchdog_task);
3535
3536 /* TODO: make this use queue_delayed_work() */
3537}
3538
3539static void e1000_watchdog_task(struct work_struct *work)
3540{
3541 struct e1000_adapter *adapter = container_of(work,
3542 struct e1000_adapter, watchdog_task);
bc7f75fa
AK
3543 struct net_device *netdev = adapter->netdev;
3544 struct e1000_mac_info *mac = &adapter->hw.mac;
75eb0fad 3545 struct e1000_phy_info *phy = &adapter->hw.phy;
bc7f75fa
AK
3546 struct e1000_ring *tx_ring = adapter->tx_ring;
3547 struct e1000_hw *hw = &adapter->hw;
3548 u32 link, tctl;
bc7f75fa
AK
3549 int tx_pending = 0;
3550
b405e8df 3551 link = e1000e_has_link(adapter);
318a94d6
JK
3552 if ((netif_carrier_ok(netdev)) && link) {
3553 e1000e_enable_receives(adapter);
bc7f75fa 3554 goto link_up;
bc7f75fa
AK
3555 }
3556
3557 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
3558 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
3559 e1000_update_mng_vlan(adapter);
3560
bc7f75fa
AK
3561 if (link) {
3562 if (!netif_carrier_ok(netdev)) {
3563 bool txb2b = 1;
318a94d6 3564 /* update snapshot of PHY registers on LSC */
7c25769f 3565 e1000_phy_read_status(adapter);
bc7f75fa
AK
3566 mac->ops.get_link_up_info(&adapter->hw,
3567 &adapter->link_speed,
3568 &adapter->link_duplex);
3569 e1000_print_link_info(adapter);
f4187b56
BA
3570 /*
3571 * On supported PHYs, check for duplex mismatch only
3572 * if link has autonegotiated at 10/100 half
3573 */
3574 if ((hw->phy.type == e1000_phy_igp_3 ||
3575 hw->phy.type == e1000_phy_bm) &&
3576 (hw->mac.autoneg == true) &&
3577 (adapter->link_speed == SPEED_10 ||
3578 adapter->link_speed == SPEED_100) &&
3579 (adapter->link_duplex == HALF_DUPLEX)) {
3580 u16 autoneg_exp;
3581
3582 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3583
3584 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3585 e_info("Autonegotiated half duplex but"
3586 " link partner cannot autoneg. "
3587 " Try forcing full duplex if "
3588 "link gets many collisions.\n");
3589 }
3590
f49c57e1 3591 /* adjust timeout factor according to speed/duplex */
bc7f75fa
AK
3592 adapter->tx_timeout_factor = 1;
3593 switch (adapter->link_speed) {
3594 case SPEED_10:
3595 txb2b = 0;
10f1b492 3596 adapter->tx_timeout_factor = 16;
bc7f75fa
AK
3597 break;
3598 case SPEED_100:
3599 txb2b = 0;
4c86e0b9 3600 adapter->tx_timeout_factor = 10;
bc7f75fa
AK
3601 break;
3602 }
3603
ad68076e
BA
3604 /*
3605 * workaround: re-program speed mode bit after
3606 * link-up event
3607 */
bc7f75fa
AK
3608 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
3609 !txb2b) {
3610 u32 tarc0;
e9ec2c0f 3611 tarc0 = er32(TARC(0));
bc7f75fa 3612 tarc0 &= ~SPEED_MODE_BIT;
e9ec2c0f 3613 ew32(TARC(0), tarc0);
bc7f75fa
AK
3614 }
3615
ad68076e
BA
3616 /*
3617 * disable TSO for pcie and 10/100 speeds, to avoid
3618 * some hardware issues
3619 */
bc7f75fa
AK
3620 if (!(adapter->flags & FLAG_TSO_FORCE)) {
3621 switch (adapter->link_speed) {
3622 case SPEED_10:
3623 case SPEED_100:
44defeb3 3624 e_info("10/100 speed: disabling TSO\n");
bc7f75fa
AK
3625 netdev->features &= ~NETIF_F_TSO;
3626 netdev->features &= ~NETIF_F_TSO6;
3627 break;
3628 case SPEED_1000:
3629 netdev->features |= NETIF_F_TSO;
3630 netdev->features |= NETIF_F_TSO6;
3631 break;
3632 default:
3633 /* oops */
3634 break;
3635 }
3636 }
3637
ad68076e
BA
3638 /*
3639 * enable transmits in the hardware, need to do this
3640 * after setting TARC(0)
3641 */
bc7f75fa
AK
3642 tctl = er32(TCTL);
3643 tctl |= E1000_TCTL_EN;
3644 ew32(TCTL, tctl);
3645
75eb0fad
BA
3646 /*
3647 * Perform any post-link-up configuration before
3648 * reporting link up.
3649 */
3650 if (phy->ops.cfg_on_link_up)
3651 phy->ops.cfg_on_link_up(hw);
3652
bc7f75fa 3653 netif_carrier_on(netdev);
bc7f75fa
AK
3654
3655 if (!test_bit(__E1000_DOWN, &adapter->state))
3656 mod_timer(&adapter->phy_info_timer,
3657 round_jiffies(jiffies + 2 * HZ));
bc7f75fa
AK
3658 }
3659 } else {
3660 if (netif_carrier_ok(netdev)) {
3661 adapter->link_speed = 0;
3662 adapter->link_duplex = 0;
8f12fe86
BA
3663 /* Link status message must follow this format */
3664 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
3665 adapter->netdev->name);
bc7f75fa 3666 netif_carrier_off(netdev);
bc7f75fa
AK
3667 if (!test_bit(__E1000_DOWN, &adapter->state))
3668 mod_timer(&adapter->phy_info_timer,
3669 round_jiffies(jiffies + 2 * HZ));
3670
3671 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3672 schedule_work(&adapter->reset_task);
3673 }
3674 }
3675
3676link_up:
3677 e1000e_update_stats(adapter);
3678
3679 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3680 adapter->tpt_old = adapter->stats.tpt;
3681 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3682 adapter->colc_old = adapter->stats.colc;
3683
7c25769f
BA
3684 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3685 adapter->gorc_old = adapter->stats.gorc;
3686 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3687 adapter->gotc_old = adapter->stats.gotc;
bc7f75fa
AK
3688
3689 e1000e_update_adaptive(&adapter->hw);
3690
3691 if (!netif_carrier_ok(netdev)) {
3692 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3693 tx_ring->count);
3694 if (tx_pending) {
ad68076e
BA
3695 /*
3696 * We've lost link, so the controller stops DMA,
bc7f75fa
AK
3697 * but we've got queued Tx work that's never going
3698 * to get done, so reset controller to flush Tx.
ad68076e
BA
3699 * (Do the reset outside of interrupt context).
3700 */
bc7f75fa
AK
3701 adapter->tx_timeout_count++;
3702 schedule_work(&adapter->reset_task);
c2d5ab49
JB
3703 /* return immediately since reset is imminent */
3704 return;
bc7f75fa
AK
3705 }
3706 }
3707
ad68076e 3708 /* Cause software interrupt to ensure Rx ring is cleaned */
4662e82b
BA
3709 if (adapter->msix_entries)
3710 ew32(ICS, adapter->rx_ring->ims_val);
3711 else
3712 ew32(ICS, E1000_ICS_RXDMT0);
bc7f75fa
AK
3713
3714 /* Force detection of hung controller every watchdog period */
3715 adapter->detect_tx_hung = 1;
3716
ad68076e
BA
3717 /*
3718 * With 82571 controllers, LAA may be overwritten due to controller
3719 * reset from the other port. Set the appropriate LAA in RAR[0]
3720 */
bc7f75fa
AK
3721 if (e1000e_get_laa_state_82571(hw))
3722 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3723
3724 /* Reset the timer */
3725 if (!test_bit(__E1000_DOWN, &adapter->state))
3726 mod_timer(&adapter->watchdog_timer,
3727 round_jiffies(jiffies + 2 * HZ));
3728}
3729
3730#define E1000_TX_FLAGS_CSUM 0x00000001
3731#define E1000_TX_FLAGS_VLAN 0x00000002
3732#define E1000_TX_FLAGS_TSO 0x00000004
3733#define E1000_TX_FLAGS_IPV4 0x00000008
3734#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3735#define E1000_TX_FLAGS_VLAN_SHIFT 16
3736
3737static int e1000_tso(struct e1000_adapter *adapter,
3738 struct sk_buff *skb)
3739{
3740 struct e1000_ring *tx_ring = adapter->tx_ring;
3741 struct e1000_context_desc *context_desc;
3742 struct e1000_buffer *buffer_info;
3743 unsigned int i;
3744 u32 cmd_length = 0;
3745 u16 ipcse = 0, tucse, mss;
3746 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3747 int err;
3748
3d5e33c9
BA
3749 if (!skb_is_gso(skb))
3750 return 0;
bc7f75fa 3751
3d5e33c9
BA
3752 if (skb_header_cloned(skb)) {
3753 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3754 if (err)
3755 return err;
bc7f75fa
AK
3756 }
3757
3d5e33c9
BA
3758 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3759 mss = skb_shinfo(skb)->gso_size;
3760 if (skb->protocol == htons(ETH_P_IP)) {
3761 struct iphdr *iph = ip_hdr(skb);
3762 iph->tot_len = 0;
3763 iph->check = 0;
3764 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
3765 0, IPPROTO_TCP, 0);
3766 cmd_length = E1000_TXD_CMD_IP;
3767 ipcse = skb_transport_offset(skb) - 1;
8e1e8a47 3768 } else if (skb_is_gso_v6(skb)) {
3d5e33c9
BA
3769 ipv6_hdr(skb)->payload_len = 0;
3770 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3771 &ipv6_hdr(skb)->daddr,
3772 0, IPPROTO_TCP, 0);
3773 ipcse = 0;
3774 }
3775 ipcss = skb_network_offset(skb);
3776 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3777 tucss = skb_transport_offset(skb);
3778 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3779 tucse = 0;
3780
3781 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3782 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3783
3784 i = tx_ring->next_to_use;
3785 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3786 buffer_info = &tx_ring->buffer_info[i];
3787
3788 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3789 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3790 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3791 context_desc->upper_setup.tcp_fields.tucss = tucss;
3792 context_desc->upper_setup.tcp_fields.tucso = tucso;
3793 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3794 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3795 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3796 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3797
3798 buffer_info->time_stamp = jiffies;
3799 buffer_info->next_to_watch = i;
3800
3801 i++;
3802 if (i == tx_ring->count)
3803 i = 0;
3804 tx_ring->next_to_use = i;
3805
3806 return 1;
bc7f75fa
AK
3807}
3808
3809static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3810{
3811 struct e1000_ring *tx_ring = adapter->tx_ring;
3812 struct e1000_context_desc *context_desc;
3813 struct e1000_buffer *buffer_info;
3814 unsigned int i;
3815 u8 css;
af807c82 3816 u32 cmd_len = E1000_TXD_CMD_DEXT;
5f66f208 3817 __be16 protocol;
bc7f75fa 3818
af807c82
DG
3819 if (skb->ip_summed != CHECKSUM_PARTIAL)
3820 return 0;
bc7f75fa 3821
5f66f208
AJ
3822 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
3823 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
3824 else
3825 protocol = skb->protocol;
3826
3f518390 3827 switch (protocol) {
09640e63 3828 case cpu_to_be16(ETH_P_IP):
af807c82
DG
3829 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3830 cmd_len |= E1000_TXD_CMD_TCP;
3831 break;
09640e63 3832 case cpu_to_be16(ETH_P_IPV6):
af807c82
DG
3833 /* XXX not handling all IPV6 headers */
3834 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3835 cmd_len |= E1000_TXD_CMD_TCP;
3836 break;
3837 default:
3838 if (unlikely(net_ratelimit()))
5f66f208
AJ
3839 e_warn("checksum_partial proto=%x!\n",
3840 be16_to_cpu(protocol));
af807c82 3841 break;
bc7f75fa
AK
3842 }
3843
af807c82
DG
3844 css = skb_transport_offset(skb);
3845
3846 i = tx_ring->next_to_use;
3847 buffer_info = &tx_ring->buffer_info[i];
3848 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3849
3850 context_desc->lower_setup.ip_config = 0;
3851 context_desc->upper_setup.tcp_fields.tucss = css;
3852 context_desc->upper_setup.tcp_fields.tucso =
3853 css + skb->csum_offset;
3854 context_desc->upper_setup.tcp_fields.tucse = 0;
3855 context_desc->tcp_seg_setup.data = 0;
3856 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
3857
3858 buffer_info->time_stamp = jiffies;
3859 buffer_info->next_to_watch = i;
3860
3861 i++;
3862 if (i == tx_ring->count)
3863 i = 0;
3864 tx_ring->next_to_use = i;
3865
3866 return 1;
bc7f75fa
AK
3867}
3868
3869#define E1000_MAX_PER_TXD 8192
3870#define E1000_MAX_TXD_PWR 12
3871
3872static int e1000_tx_map(struct e1000_adapter *adapter,
3873 struct sk_buff *skb, unsigned int first,
3874 unsigned int max_per_txd, unsigned int nr_frags,
3875 unsigned int mss)
3876{
3877 struct e1000_ring *tx_ring = adapter->tx_ring;
03b1320d 3878 struct pci_dev *pdev = adapter->pdev;
1b7719c4 3879 struct e1000_buffer *buffer_info;
8ddc951c 3880 unsigned int len = skb_headlen(skb);
03b1320d 3881 unsigned int offset = 0, size, count = 0, i;
bc7f75fa
AK
3882 unsigned int f;
3883
3884 i = tx_ring->next_to_use;
3885
3886 while (len) {
1b7719c4 3887 buffer_info = &tx_ring->buffer_info[i];
bc7f75fa
AK
3888 size = min(len, max_per_txd);
3889
bc7f75fa 3890 buffer_info->length = size;
bc7f75fa 3891 buffer_info->time_stamp = jiffies;
bc7f75fa 3892 buffer_info->next_to_watch = i;
03b1320d
AD
3893 buffer_info->dma = pci_map_single(pdev, skb->data + offset,
3894 size, PCI_DMA_TODEVICE);
3895 buffer_info->mapped_as_page = false;
3896 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3897 goto dma_error;
bc7f75fa
AK
3898
3899 len -= size;
3900 offset += size;
03b1320d 3901 count++;
1b7719c4
AD
3902
3903 if (len) {
3904 i++;
3905 if (i == tx_ring->count)
3906 i = 0;
3907 }
bc7f75fa
AK
3908 }
3909
3910 for (f = 0; f < nr_frags; f++) {
3911 struct skb_frag_struct *frag;
3912
3913 frag = &skb_shinfo(skb)->frags[f];
3914 len = frag->size;
03b1320d 3915 offset = frag->page_offset;
bc7f75fa
AK
3916
3917 while (len) {
1b7719c4
AD
3918 i++;
3919 if (i == tx_ring->count)
3920 i = 0;
3921
bc7f75fa
AK
3922 buffer_info = &tx_ring->buffer_info[i];
3923 size = min(len, max_per_txd);
bc7f75fa
AK
3924
3925 buffer_info->length = size;
3926 buffer_info->time_stamp = jiffies;
bc7f75fa 3927 buffer_info->next_to_watch = i;
03b1320d
AD
3928 buffer_info->dma = pci_map_page(pdev, frag->page,
3929 offset, size,
3930 PCI_DMA_TODEVICE);
3931 buffer_info->mapped_as_page = true;
3932 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3933 goto dma_error;
bc7f75fa
AK
3934
3935 len -= size;
3936 offset += size;
3937 count++;
bc7f75fa
AK
3938 }
3939 }
3940
bc7f75fa
AK
3941 tx_ring->buffer_info[i].skb = skb;
3942 tx_ring->buffer_info[first].next_to_watch = i;
3943
3944 return count;
03b1320d
AD
3945
3946dma_error:
3947 dev_err(&pdev->dev, "TX DMA map failed\n");
3948 buffer_info->dma = 0;
c1fa347f 3949 if (count)
03b1320d 3950 count--;
c1fa347f
RK
3951
3952 while (count--) {
3953 if (i==0)
03b1320d 3954 i += tx_ring->count;
c1fa347f 3955 i--;
03b1320d
AD
3956 buffer_info = &tx_ring->buffer_info[i];
3957 e1000_put_txbuf(adapter, buffer_info);;
3958 }
3959
3960 return 0;
bc7f75fa
AK
3961}
3962
3963static void e1000_tx_queue(struct e1000_adapter *adapter,
3964 int tx_flags, int count)
3965{
3966 struct e1000_ring *tx_ring = adapter->tx_ring;
3967 struct e1000_tx_desc *tx_desc = NULL;
3968 struct e1000_buffer *buffer_info;
3969 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3970 unsigned int i;
3971
3972 if (tx_flags & E1000_TX_FLAGS_TSO) {
3973 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3974 E1000_TXD_CMD_TSE;
3975 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3976
3977 if (tx_flags & E1000_TX_FLAGS_IPV4)
3978 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3979 }
3980
3981 if (tx_flags & E1000_TX_FLAGS_CSUM) {
3982 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3983 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3984 }
3985
3986 if (tx_flags & E1000_TX_FLAGS_VLAN) {
3987 txd_lower |= E1000_TXD_CMD_VLE;
3988 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3989 }
3990
3991 i = tx_ring->next_to_use;
3992
3993 while (count--) {
3994 buffer_info = &tx_ring->buffer_info[i];
3995 tx_desc = E1000_TX_DESC(*tx_ring, i);
3996 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3997 tx_desc->lower.data =
3998 cpu_to_le32(txd_lower | buffer_info->length);
3999 tx_desc->upper.data = cpu_to_le32(txd_upper);
4000
4001 i++;
4002 if (i == tx_ring->count)
4003 i = 0;
4004 }
4005
4006 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4007
ad68076e
BA
4008 /*
4009 * Force memory writes to complete before letting h/w
bc7f75fa
AK
4010 * know there are new descriptors to fetch. (Only
4011 * applicable for weak-ordered memory model archs,
ad68076e
BA
4012 * such as IA-64).
4013 */
bc7f75fa
AK
4014 wmb();
4015
4016 tx_ring->next_to_use = i;
4017 writel(i, adapter->hw.hw_addr + tx_ring->tail);
ad68076e
BA
4018 /*
4019 * we need this if more than one processor can write to our tail
4020 * at a time, it synchronizes IO on IA64/Altix systems
4021 */
bc7f75fa
AK
4022 mmiowb();
4023}
4024
4025#define MINIMUM_DHCP_PACKET_SIZE 282
4026static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4027 struct sk_buff *skb)
4028{
4029 struct e1000_hw *hw = &adapter->hw;
4030 u16 length, offset;
4031
4032 if (vlan_tx_tag_present(skb)) {
8e95a202
JP
4033 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4034 (adapter->hw.mng_cookie.status &
bc7f75fa
AK
4035 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4036 return 0;
4037 }
4038
4039 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4040 return 0;
4041
4042 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4043 return 0;
4044
4045 {
4046 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4047 struct udphdr *udp;
4048
4049 if (ip->protocol != IPPROTO_UDP)
4050 return 0;
4051
4052 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4053 if (ntohs(udp->dest) != 67)
4054 return 0;
4055
4056 offset = (u8 *)udp + 8 - skb->data;
4057 length = skb->len - offset;
4058 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4059 }
4060
4061 return 0;
4062}
4063
4064static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4065{
4066 struct e1000_adapter *adapter = netdev_priv(netdev);
4067
4068 netif_stop_queue(netdev);
ad68076e
BA
4069 /*
4070 * Herbert's original patch had:
bc7f75fa 4071 * smp_mb__after_netif_stop_queue();
ad68076e
BA
4072 * but since that doesn't exist yet, just open code it.
4073 */
bc7f75fa
AK
4074 smp_mb();
4075
ad68076e
BA
4076 /*
4077 * We need to check again in a case another CPU has just
4078 * made room available.
4079 */
bc7f75fa
AK
4080 if (e1000_desc_unused(adapter->tx_ring) < size)
4081 return -EBUSY;
4082
4083 /* A reprieve! */
4084 netif_start_queue(netdev);
4085 ++adapter->restart_queue;
4086 return 0;
4087}
4088
4089static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4090{
4091 struct e1000_adapter *adapter = netdev_priv(netdev);
4092
4093 if (e1000_desc_unused(adapter->tx_ring) >= size)
4094 return 0;
4095 return __e1000_maybe_stop_tx(netdev, size);
4096}
4097
4098#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3b29a56d
SH
4099static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4100 struct net_device *netdev)
bc7f75fa
AK
4101{
4102 struct e1000_adapter *adapter = netdev_priv(netdev);
4103 struct e1000_ring *tx_ring = adapter->tx_ring;
4104 unsigned int first;
4105 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4106 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4107 unsigned int tx_flags = 0;
4e6c709c 4108 unsigned int len = skb->len - skb->data_len;
4e6c709c
AK
4109 unsigned int nr_frags;
4110 unsigned int mss;
bc7f75fa
AK
4111 int count = 0;
4112 int tso;
4113 unsigned int f;
bc7f75fa
AK
4114
4115 if (test_bit(__E1000_DOWN, &adapter->state)) {
4116 dev_kfree_skb_any(skb);
4117 return NETDEV_TX_OK;
4118 }
4119
4120 if (skb->len <= 0) {
4121 dev_kfree_skb_any(skb);
4122 return NETDEV_TX_OK;
4123 }
4124
4125 mss = skb_shinfo(skb)->gso_size;
ad68076e
BA
4126 /*
4127 * The controller does a simple calculation to
bc7f75fa
AK
4128 * make sure there is enough room in the FIFO before
4129 * initiating the DMA for each buffer. The calc is:
4130 * 4 = ceil(buffer len/mss). To make sure we don't
4131 * overrun the FIFO, adjust the max buffer len if mss
ad68076e
BA
4132 * drops.
4133 */
bc7f75fa
AK
4134 if (mss) {
4135 u8 hdr_len;
4136 max_per_txd = min(mss << 2, max_per_txd);
4137 max_txd_pwr = fls(max_per_txd) - 1;
4138
ad68076e
BA
4139 /*
4140 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4141 * points to just header, pull a few bytes of payload from
4142 * frags into skb->data
4143 */
bc7f75fa 4144 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
ad68076e
BA
4145 /*
4146 * we do this workaround for ES2LAN, but it is un-necessary,
4147 * avoiding it could save a lot of cycles
4148 */
4e6c709c 4149 if (skb->data_len && (hdr_len == len)) {
bc7f75fa
AK
4150 unsigned int pull_size;
4151
4152 pull_size = min((unsigned int)4, skb->data_len);
4153 if (!__pskb_pull_tail(skb, pull_size)) {
44defeb3 4154 e_err("__pskb_pull_tail failed.\n");
bc7f75fa
AK
4155 dev_kfree_skb_any(skb);
4156 return NETDEV_TX_OK;
4157 }
4158 len = skb->len - skb->data_len;
4159 }
4160 }
4161
4162 /* reserve a descriptor for the offload context */
4163 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4164 count++;
4165 count++;
4166
4167 count += TXD_USE_COUNT(len, max_txd_pwr);
4168
4169 nr_frags = skb_shinfo(skb)->nr_frags;
4170 for (f = 0; f < nr_frags; f++)
4171 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4172 max_txd_pwr);
4173
4174 if (adapter->hw.mac.tx_pkt_filtering)
4175 e1000_transfer_dhcp_info(adapter, skb);
4176
ad68076e
BA
4177 /*
4178 * need: count + 2 desc gap to keep tail from touching
4179 * head, otherwise try next time
4180 */
92af3e95 4181 if (e1000_maybe_stop_tx(netdev, count + 2))
bc7f75fa 4182 return NETDEV_TX_BUSY;
bc7f75fa
AK
4183
4184 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4185 tx_flags |= E1000_TX_FLAGS_VLAN;
4186 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4187 }
4188
4189 first = tx_ring->next_to_use;
4190
4191 tso = e1000_tso(adapter, skb);
4192 if (tso < 0) {
4193 dev_kfree_skb_any(skb);
bc7f75fa
AK
4194 return NETDEV_TX_OK;
4195 }
4196
4197 if (tso)
4198 tx_flags |= E1000_TX_FLAGS_TSO;
4199 else if (e1000_tx_csum(adapter, skb))
4200 tx_flags |= E1000_TX_FLAGS_CSUM;
4201
ad68076e
BA
4202 /*
4203 * Old method was to assume IPv4 packet by default if TSO was enabled.
bc7f75fa 4204 * 82571 hardware supports TSO capabilities for IPv6 as well...
ad68076e
BA
4205 * no longer assume, we must.
4206 */
bc7f75fa
AK
4207 if (skb->protocol == htons(ETH_P_IP))
4208 tx_flags |= E1000_TX_FLAGS_IPV4;
4209
1b7719c4 4210 /* if count is 0 then mapping error has occured */
bc7f75fa 4211 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
1b7719c4
AD
4212 if (count) {
4213 e1000_tx_queue(adapter, tx_flags, count);
1b7719c4
AD
4214 /* Make sure there is space in the ring for the next send. */
4215 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4216
4217 } else {
bc7f75fa 4218 dev_kfree_skb_any(skb);
1b7719c4
AD
4219 tx_ring->buffer_info[first].time_stamp = 0;
4220 tx_ring->next_to_use = first;
bc7f75fa
AK
4221 }
4222
bc7f75fa
AK
4223 return NETDEV_TX_OK;
4224}
4225
4226/**
4227 * e1000_tx_timeout - Respond to a Tx Hang
4228 * @netdev: network interface device structure
4229 **/
4230static void e1000_tx_timeout(struct net_device *netdev)
4231{
4232 struct e1000_adapter *adapter = netdev_priv(netdev);
4233
4234 /* Do the reset outside of interrupt context */
4235 adapter->tx_timeout_count++;
4236 schedule_work(&adapter->reset_task);
4237}
4238
4239static void e1000_reset_task(struct work_struct *work)
4240{
4241 struct e1000_adapter *adapter;
4242 adapter = container_of(work, struct e1000_adapter, reset_task);
4243
4244 e1000e_reinit_locked(adapter);
4245}
4246
4247/**
4248 * e1000_get_stats - Get System Network Statistics
4249 * @netdev: network interface device structure
4250 *
4251 * Returns the address of the device statistics structure.
4252 * The statistics are actually updated from the timer callback.
4253 **/
4254static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4255{
bc7f75fa 4256 /* only return the current stats */
7274c20f 4257 return &netdev->stats;
bc7f75fa
AK
4258}
4259
4260/**
4261 * e1000_change_mtu - Change the Maximum Transfer Unit
4262 * @netdev: network interface device structure
4263 * @new_mtu: new value for maximum frame size
4264 *
4265 * Returns 0 on success, negative on failure
4266 **/
4267static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4268{
4269 struct e1000_adapter *adapter = netdev_priv(netdev);
4270 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4271
2adc55c9
BA
4272 /* Jumbo frame support */
4273 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4274 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4275 e_err("Jumbo Frames not supported.\n");
bc7f75fa
AK
4276 return -EINVAL;
4277 }
4278
2adc55c9
BA
4279 /* Supported frame sizes */
4280 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4281 (max_frame > adapter->max_hw_frame_size)) {
4282 e_err("Unsupported MTU setting\n");
bc7f75fa
AK
4283 return -EINVAL;
4284 }
4285
6f461f6c
BA
4286 /* 82573 Errata 17 */
4287 if (((adapter->hw.mac.type == e1000_82573) ||
4288 (adapter->hw.mac.type == e1000_82574)) &&
4289 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4290 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4291 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4292 }
4293
bc7f75fa
AK
4294 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4295 msleep(1);
610c9928 4296 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
318a94d6 4297 adapter->max_frame_size = max_frame;
610c9928
BA
4298 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4299 netdev->mtu = new_mtu;
bc7f75fa
AK
4300 if (netif_running(netdev))
4301 e1000e_down(adapter);
4302
ad68076e
BA
4303 /*
4304 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
bc7f75fa
AK
4305 * means we reserve 2 more, this pushes us to allocate from the next
4306 * larger slab size.
ad68076e 4307 * i.e. RXBUFFER_2048 --> size-4096 slab
97ac8cae
BA
4308 * However with the new *_jumbo_rx* routines, jumbo receives will use
4309 * fragmented skbs
ad68076e 4310 */
bc7f75fa 4311
9926146b 4312 if (max_frame <= 2048)
bc7f75fa
AK
4313 adapter->rx_buffer_len = 2048;
4314 else
4315 adapter->rx_buffer_len = 4096;
4316
4317 /* adjust allocation if LPE protects us, and we aren't using SBP */
4318 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4319 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4320 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
ad68076e 4321 + ETH_FCS_LEN;
bc7f75fa 4322
bc7f75fa
AK
4323 if (netif_running(netdev))
4324 e1000e_up(adapter);
4325 else
4326 e1000e_reset(adapter);
4327
4328 clear_bit(__E1000_RESETTING, &adapter->state);
4329
4330 return 0;
4331}
4332
4333static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4334 int cmd)
4335{
4336 struct e1000_adapter *adapter = netdev_priv(netdev);
4337 struct mii_ioctl_data *data = if_mii(ifr);
bc7f75fa 4338
318a94d6 4339 if (adapter->hw.phy.media_type != e1000_media_type_copper)
bc7f75fa
AK
4340 return -EOPNOTSUPP;
4341
4342 switch (cmd) {
4343 case SIOCGMIIPHY:
4344 data->phy_id = adapter->hw.phy.addr;
4345 break;
4346 case SIOCGMIIREG:
b16a002e
BA
4347 e1000_phy_read_status(adapter);
4348
7c25769f
BA
4349 switch (data->reg_num & 0x1F) {
4350 case MII_BMCR:
4351 data->val_out = adapter->phy_regs.bmcr;
4352 break;
4353 case MII_BMSR:
4354 data->val_out = adapter->phy_regs.bmsr;
4355 break;
4356 case MII_PHYSID1:
4357 data->val_out = (adapter->hw.phy.id >> 16);
4358 break;
4359 case MII_PHYSID2:
4360 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4361 break;
4362 case MII_ADVERTISE:
4363 data->val_out = adapter->phy_regs.advertise;
4364 break;
4365 case MII_LPA:
4366 data->val_out = adapter->phy_regs.lpa;
4367 break;
4368 case MII_EXPANSION:
4369 data->val_out = adapter->phy_regs.expansion;
4370 break;
4371 case MII_CTRL1000:
4372 data->val_out = adapter->phy_regs.ctrl1000;
4373 break;
4374 case MII_STAT1000:
4375 data->val_out = adapter->phy_regs.stat1000;
4376 break;
4377 case MII_ESTATUS:
4378 data->val_out = adapter->phy_regs.estatus;
4379 break;
4380 default:
bc7f75fa
AK
4381 return -EIO;
4382 }
bc7f75fa
AK
4383 break;
4384 case SIOCSMIIREG:
4385 default:
4386 return -EOPNOTSUPP;
4387 }
4388 return 0;
4389}
4390
4391static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4392{
4393 switch (cmd) {
4394 case SIOCGMIIPHY:
4395 case SIOCGMIIREG:
4396 case SIOCSMIIREG:
4397 return e1000_mii_ioctl(netdev, ifr, cmd);
4398 default:
4399 return -EOPNOTSUPP;
4400 }
4401}
4402
a4f58f54
BA
4403static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4404{
4405 struct e1000_hw *hw = &adapter->hw;
4406 u32 i, mac_reg;
4407 u16 phy_reg;
4408 int retval = 0;
4409
4410 /* copy MAC RARs to PHY RARs */
4411 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4412 mac_reg = er32(RAL(i));
4413 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4414 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4415 mac_reg = er32(RAH(i));
4416 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4417 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4418 }
4419
4420 /* copy MAC MTA to PHY MTA */
4421 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4422 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4423 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4424 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4425 }
4426
4427 /* configure PHY Rx Control register */
4428 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4429 mac_reg = er32(RCTL);
4430 if (mac_reg & E1000_RCTL_UPE)
4431 phy_reg |= BM_RCTL_UPE;
4432 if (mac_reg & E1000_RCTL_MPE)
4433 phy_reg |= BM_RCTL_MPE;
4434 phy_reg &= ~(BM_RCTL_MO_MASK);
4435 if (mac_reg & E1000_RCTL_MO_3)
4436 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4437 << BM_RCTL_MO_SHIFT);
4438 if (mac_reg & E1000_RCTL_BAM)
4439 phy_reg |= BM_RCTL_BAM;
4440 if (mac_reg & E1000_RCTL_PMCF)
4441 phy_reg |= BM_RCTL_PMCF;
4442 mac_reg = er32(CTRL);
4443 if (mac_reg & E1000_CTRL_RFCE)
4444 phy_reg |= BM_RCTL_RFCE;
4445 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4446
4447 /* enable PHY wakeup in MAC register */
4448 ew32(WUFC, wufc);
4449 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4450
4451 /* configure and enable PHY wakeup in PHY registers */
4452 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4453 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4454
4455 /* activate PHY wakeup */
94d8186a 4456 retval = hw->phy.ops.acquire(hw);
a4f58f54
BA
4457 if (retval) {
4458 e_err("Could not acquire PHY\n");
4459 return retval;
4460 }
4461 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4462 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4463 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
4464 if (retval) {
4465 e_err("Could not read PHY page 769\n");
4466 goto out;
4467 }
4468 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4469 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
4470 if (retval)
4471 e_err("Could not set PHY Host Wakeup bit\n");
4472out:
94d8186a 4473 hw->phy.ops.release(hw);
a4f58f54
BA
4474
4475 return retval;
4476}
4477
4f9de721 4478static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
bc7f75fa
AK
4479{
4480 struct net_device *netdev = pci_get_drvdata(pdev);
4481 struct e1000_adapter *adapter = netdev_priv(netdev);
4482 struct e1000_hw *hw = &adapter->hw;
4483 u32 ctrl, ctrl_ext, rctl, status;
4484 u32 wufc = adapter->wol;
4485 int retval = 0;
4486
4487 netif_device_detach(netdev);
4488
4489 if (netif_running(netdev)) {
4490 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4491 e1000e_down(adapter);
4492 e1000_free_irq(adapter);
4493 }
4662e82b 4494 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
4495
4496 retval = pci_save_state(pdev);
4497 if (retval)
4498 return retval;
4499
4500 status = er32(STATUS);
4501 if (status & E1000_STATUS_LU)
4502 wufc &= ~E1000_WUFC_LNKC;
4503
4504 if (wufc) {
4505 e1000_setup_rctl(adapter);
4506 e1000_set_multi(netdev);
4507
4508 /* turn on all-multi mode if wake on multicast is enabled */
4509 if (wufc & E1000_WUFC_MC) {
4510 rctl = er32(RCTL);
4511 rctl |= E1000_RCTL_MPE;
4512 ew32(RCTL, rctl);
4513 }
4514
4515 ctrl = er32(CTRL);
4516 /* advertise wake from D3Cold */
4517 #define E1000_CTRL_ADVD3WUC 0x00100000
4518 /* phy power management enable */
4519 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
a4f58f54
BA
4520 ctrl |= E1000_CTRL_ADVD3WUC;
4521 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
4522 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
bc7f75fa
AK
4523 ew32(CTRL, ctrl);
4524
318a94d6
JK
4525 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4526 adapter->hw.phy.media_type ==
4527 e1000_media_type_internal_serdes) {
bc7f75fa
AK
4528 /* keep the laser running in D3 */
4529 ctrl_ext = er32(CTRL_EXT);
93a23f48 4530 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
bc7f75fa
AK
4531 ew32(CTRL_EXT, ctrl_ext);
4532 }
4533
97ac8cae
BA
4534 if (adapter->flags & FLAG_IS_ICH)
4535 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
4536
bc7f75fa
AK
4537 /* Allow time for pending master requests to run */
4538 e1000e_disable_pcie_master(&adapter->hw);
4539
82776a4b 4540 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
a4f58f54
BA
4541 /* enable wakeup by the PHY */
4542 retval = e1000_init_phy_wakeup(adapter, wufc);
4543 if (retval)
4544 return retval;
4545 } else {
4546 /* enable wakeup by the MAC */
4547 ew32(WUFC, wufc);
4548 ew32(WUC, E1000_WUC_PME_EN);
4549 }
bc7f75fa
AK
4550 } else {
4551 ew32(WUC, 0);
4552 ew32(WUFC, 0);
bc7f75fa
AK
4553 }
4554
4f9de721
RW
4555 *enable_wake = !!wufc;
4556
bc7f75fa 4557 /* make sure adapter isn't asleep if manageability is enabled */
82776a4b
BA
4558 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
4559 (hw->mac.ops.check_mng_mode(hw)))
4f9de721 4560 *enable_wake = true;
bc7f75fa
AK
4561
4562 if (adapter->hw.phy.type == e1000_phy_igp_3)
4563 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4564
ad68076e
BA
4565 /*
4566 * Release control of h/w to f/w. If f/w is AMT enabled, this
4567 * would have already happened in close and is redundant.
4568 */
bc7f75fa
AK
4569 e1000_release_hw_control(adapter);
4570
4571 pci_disable_device(pdev);
4572
4f9de721
RW
4573 return 0;
4574}
4575
4576static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
4577{
4578 if (sleep && wake) {
4579 pci_prepare_to_sleep(pdev);
4580 return;
4581 }
4582
4583 pci_wake_from_d3(pdev, wake);
4584 pci_set_power_state(pdev, PCI_D3hot);
4585}
4586
4587static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4588 bool wake)
4589{
4590 struct net_device *netdev = pci_get_drvdata(pdev);
4591 struct e1000_adapter *adapter = netdev_priv(netdev);
4592
005cbdfc
AD
4593 /*
4594 * The pci-e switch on some quad port adapters will report a
4595 * correctable error when the MAC transitions from D0 to D3. To
4596 * prevent this we need to mask off the correctable errors on the
4597 * downstream port of the pci-e switch.
4598 */
4599 if (adapter->flags & FLAG_IS_QUAD_PORT) {
4600 struct pci_dev *us_dev = pdev->bus->self;
4601 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
4602 u16 devctl;
4603
4604 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
4605 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4606 (devctl & ~PCI_EXP_DEVCTL_CERE));
4607
4f9de721 4608 e1000_power_off(pdev, sleep, wake);
005cbdfc
AD
4609
4610 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4611 } else {
4f9de721 4612 e1000_power_off(pdev, sleep, wake);
005cbdfc 4613 }
bc7f75fa
AK
4614}
4615
6f461f6c
BA
4616#ifdef CONFIG_PCIEASPM
4617static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4618{
4619 pci_disable_link_state(pdev, state);
4620}
4621#else
4622static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
1eae4eb2
AK
4623{
4624 int pos;
6f461f6c 4625 u16 reg16;
1eae4eb2
AK
4626
4627 /*
6f461f6c
BA
4628 * Both device and parent should have the same ASPM setting.
4629 * Disable ASPM in downstream component first and then upstream.
1eae4eb2 4630 */
6f461f6c
BA
4631 pos = pci_pcie_cap(pdev);
4632 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
4633 reg16 &= ~state;
4634 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
4635
4636 pos = pci_pcie_cap(pdev->bus->self);
4637 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
4638 reg16 &= ~state;
4639 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
4640}
4641#endif
4642void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4643{
4644 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
4645 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
4646 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
4647
4648 __e1000e_disable_aspm(pdev, state);
1eae4eb2
AK
4649}
4650
bc7f75fa 4651#ifdef CONFIG_PM
4f9de721
RW
4652static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4653{
4654 int retval;
4655 bool wake;
4656
4657 retval = __e1000_shutdown(pdev, &wake);
4658 if (!retval)
4659 e1000_complete_shutdown(pdev, true, wake);
4660
4661 return retval;
4662}
4663
bc7f75fa
AK
4664static int e1000_resume(struct pci_dev *pdev)
4665{
4666 struct net_device *netdev = pci_get_drvdata(pdev);
4667 struct e1000_adapter *adapter = netdev_priv(netdev);
4668 struct e1000_hw *hw = &adapter->hw;
4669 u32 err;
4670
4671 pci_set_power_state(pdev, PCI_D0);
4672 pci_restore_state(pdev);
28b8f04a 4673 pci_save_state(pdev);
6f461f6c
BA
4674 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4675 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
6e4f6f6b 4676
f0f422e5 4677 err = pci_enable_device_mem(pdev);
bc7f75fa
AK
4678 if (err) {
4679 dev_err(&pdev->dev,
4680 "Cannot enable PCI device from suspend\n");
4681 return err;
4682 }
4683
4684 pci_set_master(pdev);
4685
4686 pci_enable_wake(pdev, PCI_D3hot, 0);
4687 pci_enable_wake(pdev, PCI_D3cold, 0);
4688
4662e82b 4689 e1000e_set_interrupt_capability(adapter);
bc7f75fa
AK
4690 if (netif_running(netdev)) {
4691 err = e1000_request_irq(adapter);
4692 if (err)
4693 return err;
4694 }
4695
4696 e1000e_power_up_phy(adapter);
a4f58f54
BA
4697
4698 /* report the system wakeup cause from S3/S4 */
4699 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4700 u16 phy_data;
4701
4702 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
4703 if (phy_data) {
4704 e_info("PHY Wakeup cause - %s\n",
4705 phy_data & E1000_WUS_EX ? "Unicast Packet" :
4706 phy_data & E1000_WUS_MC ? "Multicast Packet" :
4707 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
4708 phy_data & E1000_WUS_MAG ? "Magic Packet" :
4709 phy_data & E1000_WUS_LNKC ? "Link Status "
4710 " Change" : "other");
4711 }
4712 e1e_wphy(&adapter->hw, BM_WUS, ~0);
4713 } else {
4714 u32 wus = er32(WUS);
4715 if (wus) {
4716 e_info("MAC Wakeup cause - %s\n",
4717 wus & E1000_WUS_EX ? "Unicast Packet" :
4718 wus & E1000_WUS_MC ? "Multicast Packet" :
4719 wus & E1000_WUS_BC ? "Broadcast Packet" :
4720 wus & E1000_WUS_MAG ? "Magic Packet" :
4721 wus & E1000_WUS_LNKC ? "Link Status Change" :
4722 "other");
4723 }
4724 ew32(WUS, ~0);
4725 }
4726
bc7f75fa 4727 e1000e_reset(adapter);
bc7f75fa
AK
4728
4729 e1000_init_manageability(adapter);
4730
4731 if (netif_running(netdev))
4732 e1000e_up(adapter);
4733
4734 netif_device_attach(netdev);
4735
ad68076e
BA
4736 /*
4737 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 4738 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
4739 * under the control of the driver.
4740 */
c43bc57e 4741 if (!(adapter->flags & FLAG_HAS_AMT))
bc7f75fa
AK
4742 e1000_get_hw_control(adapter);
4743
4744 return 0;
4745}
4746#endif
4747
4748static void e1000_shutdown(struct pci_dev *pdev)
4749{
4f9de721
RW
4750 bool wake = false;
4751
4752 __e1000_shutdown(pdev, &wake);
4753
4754 if (system_state == SYSTEM_POWER_OFF)
4755 e1000_complete_shutdown(pdev, false, wake);
bc7f75fa
AK
4756}
4757
4758#ifdef CONFIG_NET_POLL_CONTROLLER
4759/*
4760 * Polling 'interrupt' - used by things like netconsole to send skbs
4761 * without having to re-enable interrupts. It's not called while
4762 * the interrupt routine is executing.
4763 */
4764static void e1000_netpoll(struct net_device *netdev)
4765{
4766 struct e1000_adapter *adapter = netdev_priv(netdev);
4767
4768 disable_irq(adapter->pdev->irq);
4769 e1000_intr(adapter->pdev->irq, netdev);
4770
bc7f75fa
AK
4771 enable_irq(adapter->pdev->irq);
4772}
4773#endif
4774
4775/**
4776 * e1000_io_error_detected - called when PCI error is detected
4777 * @pdev: Pointer to PCI device
4778 * @state: The current pci connection state
4779 *
4780 * This function is called after a PCI bus error affecting
4781 * this device has been detected.
4782 */
4783static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4784 pci_channel_state_t state)
4785{
4786 struct net_device *netdev = pci_get_drvdata(pdev);
4787 struct e1000_adapter *adapter = netdev_priv(netdev);
4788
4789 netif_device_detach(netdev);
4790
c93b5a76
MM
4791 if (state == pci_channel_io_perm_failure)
4792 return PCI_ERS_RESULT_DISCONNECT;
4793
bc7f75fa
AK
4794 if (netif_running(netdev))
4795 e1000e_down(adapter);
4796 pci_disable_device(pdev);
4797
4798 /* Request a slot slot reset. */
4799 return PCI_ERS_RESULT_NEED_RESET;
4800}
4801
4802/**
4803 * e1000_io_slot_reset - called after the pci bus has been reset.
4804 * @pdev: Pointer to PCI device
4805 *
4806 * Restart the card from scratch, as if from a cold-boot. Implementation
4807 * resembles the first-half of the e1000_resume routine.
4808 */
4809static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4810{
4811 struct net_device *netdev = pci_get_drvdata(pdev);
4812 struct e1000_adapter *adapter = netdev_priv(netdev);
4813 struct e1000_hw *hw = &adapter->hw;
6e4f6f6b 4814 int err;
111b9dc5 4815 pci_ers_result_t result;
bc7f75fa 4816
6f461f6c
BA
4817 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4818 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
f0f422e5 4819 err = pci_enable_device_mem(pdev);
6e4f6f6b 4820 if (err) {
bc7f75fa
AK
4821 dev_err(&pdev->dev,
4822 "Cannot re-enable PCI device after reset.\n");
111b9dc5
JB
4823 result = PCI_ERS_RESULT_DISCONNECT;
4824 } else {
4825 pci_set_master(pdev);
4826 pci_restore_state(pdev);
28b8f04a 4827 pci_save_state(pdev);
bc7f75fa 4828
111b9dc5
JB
4829 pci_enable_wake(pdev, PCI_D3hot, 0);
4830 pci_enable_wake(pdev, PCI_D3cold, 0);
bc7f75fa 4831
111b9dc5
JB
4832 e1000e_reset(adapter);
4833 ew32(WUS, ~0);
4834 result = PCI_ERS_RESULT_RECOVERED;
4835 }
bc7f75fa 4836
111b9dc5
JB
4837 pci_cleanup_aer_uncorrect_error_status(pdev);
4838
4839 return result;
bc7f75fa
AK
4840}
4841
4842/**
4843 * e1000_io_resume - called when traffic can start flowing again.
4844 * @pdev: Pointer to PCI device
4845 *
4846 * This callback is called when the error recovery driver tells us that
4847 * its OK to resume normal operation. Implementation resembles the
4848 * second-half of the e1000_resume routine.
4849 */
4850static void e1000_io_resume(struct pci_dev *pdev)
4851{
4852 struct net_device *netdev = pci_get_drvdata(pdev);
4853 struct e1000_adapter *adapter = netdev_priv(netdev);
4854
4855 e1000_init_manageability(adapter);
4856
4857 if (netif_running(netdev)) {
4858 if (e1000e_up(adapter)) {
4859 dev_err(&pdev->dev,
4860 "can't bring device back up after reset\n");
4861 return;
4862 }
4863 }
4864
4865 netif_device_attach(netdev);
4866
ad68076e
BA
4867 /*
4868 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 4869 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
4870 * under the control of the driver.
4871 */
c43bc57e 4872 if (!(adapter->flags & FLAG_HAS_AMT))
bc7f75fa
AK
4873 e1000_get_hw_control(adapter);
4874
4875}
4876
4877static void e1000_print_device_info(struct e1000_adapter *adapter)
4878{
4879 struct e1000_hw *hw = &adapter->hw;
4880 struct net_device *netdev = adapter->netdev;
69e3fd8c 4881 u32 pba_num;
bc7f75fa
AK
4882
4883 /* print bus type/speed/width info */
7c510e4b 4884 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
44defeb3
JK
4885 /* bus width */
4886 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4887 "Width x1"),
4888 /* MAC address */
7c510e4b 4889 netdev->dev_addr);
44defeb3
JK
4890 e_info("Intel(R) PRO/%s Network Connection\n",
4891 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
69e3fd8c 4892 e1000e_read_pba_num(hw, &pba_num);
44defeb3
JK
4893 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4894 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
bc7f75fa
AK
4895}
4896
10aa4c04
AK
4897static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4898{
4899 struct e1000_hw *hw = &adapter->hw;
4900 int ret_val;
4901 u16 buf = 0;
4902
4903 if (hw->mac.type != e1000_82573)
4904 return;
4905
4906 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
e243455d 4907 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
10aa4c04 4908 /* Deep Smart Power Down (DSPD) */
6c2a9efa
FP
4909 dev_warn(&adapter->pdev->dev,
4910 "Warning: detected DSPD enabled in EEPROM\n");
10aa4c04 4911 }
10aa4c04
AK
4912}
4913
651c2466
SH
4914static const struct net_device_ops e1000e_netdev_ops = {
4915 .ndo_open = e1000_open,
4916 .ndo_stop = e1000_close,
00829823 4917 .ndo_start_xmit = e1000_xmit_frame,
651c2466
SH
4918 .ndo_get_stats = e1000_get_stats,
4919 .ndo_set_multicast_list = e1000_set_multi,
4920 .ndo_set_mac_address = e1000_set_mac,
4921 .ndo_change_mtu = e1000_change_mtu,
4922 .ndo_do_ioctl = e1000_ioctl,
4923 .ndo_tx_timeout = e1000_tx_timeout,
4924 .ndo_validate_addr = eth_validate_addr,
4925
4926 .ndo_vlan_rx_register = e1000_vlan_rx_register,
4927 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
4928 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
4929#ifdef CONFIG_NET_POLL_CONTROLLER
4930 .ndo_poll_controller = e1000_netpoll,
4931#endif
4932};
4933
bc7f75fa
AK
4934/**
4935 * e1000_probe - Device Initialization Routine
4936 * @pdev: PCI device information struct
4937 * @ent: entry in e1000_pci_tbl
4938 *
4939 * Returns 0 on success, negative on failure
4940 *
4941 * e1000_probe initializes an adapter identified by a pci_dev structure.
4942 * The OS initialization, configuring of the adapter private structure,
4943 * and a hardware reset occur.
4944 **/
4945static int __devinit e1000_probe(struct pci_dev *pdev,
4946 const struct pci_device_id *ent)
4947{
4948 struct net_device *netdev;
4949 struct e1000_adapter *adapter;
4950 struct e1000_hw *hw;
4951 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
f47e81fc
BB
4952 resource_size_t mmio_start, mmio_len;
4953 resource_size_t flash_start, flash_len;
bc7f75fa
AK
4954
4955 static int cards_found;
4956 int i, err, pci_using_dac;
4957 u16 eeprom_data = 0;
4958 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4959
6f461f6c
BA
4960 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
4961 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
6e4f6f6b 4962
f0f422e5 4963 err = pci_enable_device_mem(pdev);
bc7f75fa
AK
4964 if (err)
4965 return err;
4966
4967 pci_using_dac = 0;
6a35528a 4968 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
bc7f75fa 4969 if (!err) {
6a35528a 4970 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
bc7f75fa
AK
4971 if (!err)
4972 pci_using_dac = 1;
4973 } else {
284901a9 4974 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
bc7f75fa
AK
4975 if (err) {
4976 err = pci_set_consistent_dma_mask(pdev,
284901a9 4977 DMA_BIT_MASK(32));
bc7f75fa
AK
4978 if (err) {
4979 dev_err(&pdev->dev, "No usable DMA "
4980 "configuration, aborting\n");
4981 goto err_dma;
4982 }
4983 }
4984 }
4985
e8de1481 4986 err = pci_request_selected_regions_exclusive(pdev,
f0f422e5
BA
4987 pci_select_bars(pdev, IORESOURCE_MEM),
4988 e1000e_driver_name);
bc7f75fa
AK
4989 if (err)
4990 goto err_pci_reg;
4991
68eac460 4992 /* AER (Advanced Error Reporting) hooks */
19d5afd4 4993 pci_enable_pcie_error_reporting(pdev);
68eac460 4994
bc7f75fa 4995 pci_set_master(pdev);
438b365a
BA
4996 /* PCI config space info */
4997 err = pci_save_state(pdev);
4998 if (err)
4999 goto err_alloc_etherdev;
bc7f75fa
AK
5000
5001 err = -ENOMEM;
5002 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5003 if (!netdev)
5004 goto err_alloc_etherdev;
5005
bc7f75fa
AK
5006 SET_NETDEV_DEV(netdev, &pdev->dev);
5007
5008 pci_set_drvdata(pdev, netdev);
5009 adapter = netdev_priv(netdev);
5010 hw = &adapter->hw;
5011 adapter->netdev = netdev;
5012 adapter->pdev = pdev;
5013 adapter->ei = ei;
5014 adapter->pba = ei->pba;
5015 adapter->flags = ei->flags;
eb7c3adb 5016 adapter->flags2 = ei->flags2;
bc7f75fa
AK
5017 adapter->hw.adapter = adapter;
5018 adapter->hw.mac.type = ei->mac;
2adc55c9 5019 adapter->max_hw_frame_size = ei->max_hw_frame_size;
bc7f75fa
AK
5020 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5021
5022 mmio_start = pci_resource_start(pdev, 0);
5023 mmio_len = pci_resource_len(pdev, 0);
5024
5025 err = -EIO;
5026 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5027 if (!adapter->hw.hw_addr)
5028 goto err_ioremap;
5029
5030 if ((adapter->flags & FLAG_HAS_FLASH) &&
5031 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5032 flash_start = pci_resource_start(pdev, 1);
5033 flash_len = pci_resource_len(pdev, 1);
5034 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5035 if (!adapter->hw.flash_address)
5036 goto err_flashmap;
5037 }
5038
5039 /* construct the net_device struct */
651c2466 5040 netdev->netdev_ops = &e1000e_netdev_ops;
bc7f75fa 5041 e1000e_set_ethtool_ops(netdev);
bc7f75fa
AK
5042 netdev->watchdog_timeo = 5 * HZ;
5043 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
bc7f75fa
AK
5044 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5045
5046 netdev->mem_start = mmio_start;
5047 netdev->mem_end = mmio_start + mmio_len;
5048
5049 adapter->bd_number = cards_found++;
5050
4662e82b
BA
5051 e1000e_check_options(adapter);
5052
bc7f75fa
AK
5053 /* setup adapter struct */
5054 err = e1000_sw_init(adapter);
5055 if (err)
5056 goto err_sw_init;
5057
5058 err = -EIO;
5059
5060 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5061 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5062 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5063
69e3fd8c 5064 err = ei->get_variants(adapter);
bc7f75fa
AK
5065 if (err)
5066 goto err_hw_init;
5067
4a770358
BA
5068 if ((adapter->flags & FLAG_IS_ICH) &&
5069 (adapter->flags & FLAG_READ_ONLY_NVM))
5070 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5071
bc7f75fa
AK
5072 hw->mac.ops.get_bus_info(&adapter->hw);
5073
318a94d6 5074 adapter->hw.phy.autoneg_wait_to_complete = 0;
bc7f75fa
AK
5075
5076 /* Copper options */
318a94d6 5077 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
bc7f75fa
AK
5078 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5079 adapter->hw.phy.disable_polarity_correction = 0;
5080 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5081 }
5082
5083 if (e1000_check_reset_block(&adapter->hw))
44defeb3 5084 e_info("PHY reset is blocked due to SOL/IDER session.\n");
bc7f75fa
AK
5085
5086 netdev->features = NETIF_F_SG |
5087 NETIF_F_HW_CSUM |
5088 NETIF_F_HW_VLAN_TX |
5089 NETIF_F_HW_VLAN_RX;
5090
5091 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5092 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5093
5094 netdev->features |= NETIF_F_TSO;
5095 netdev->features |= NETIF_F_TSO6;
5096
a5136e23
JK
5097 netdev->vlan_features |= NETIF_F_TSO;
5098 netdev->vlan_features |= NETIF_F_TSO6;
5099 netdev->vlan_features |= NETIF_F_HW_CSUM;
5100 netdev->vlan_features |= NETIF_F_SG;
5101
bc7f75fa
AK
5102 if (pci_using_dac)
5103 netdev->features |= NETIF_F_HIGHDMA;
5104
bc7f75fa
AK
5105 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5106 adapter->flags |= FLAG_MNG_PT_ENABLED;
5107
ad68076e
BA
5108 /*
5109 * before reading the NVM, reset the controller to
5110 * put the device in a known good starting state
5111 */
bc7f75fa
AK
5112 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5113
5114 /*
5115 * systems with ASPM and others may see the checksum fail on the first
5116 * attempt. Let's give it a few tries
5117 */
5118 for (i = 0;; i++) {
5119 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5120 break;
5121 if (i == 2) {
44defeb3 5122 e_err("The NVM Checksum Is Not Valid\n");
bc7f75fa
AK
5123 err = -EIO;
5124 goto err_eeprom;
5125 }
5126 }
5127
10aa4c04
AK
5128 e1000_eeprom_checks(adapter);
5129
608f8a0d 5130 /* copy the MAC address */
bc7f75fa 5131 if (e1000e_read_mac_addr(&adapter->hw))
44defeb3 5132 e_err("NVM Read Error while reading MAC address\n");
bc7f75fa
AK
5133
5134 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5135 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5136
5137 if (!is_valid_ether_addr(netdev->perm_addr)) {
7c510e4b 5138 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
bc7f75fa
AK
5139 err = -EIO;
5140 goto err_eeprom;
5141 }
5142
5143 init_timer(&adapter->watchdog_timer);
5144 adapter->watchdog_timer.function = &e1000_watchdog;
5145 adapter->watchdog_timer.data = (unsigned long) adapter;
5146
5147 init_timer(&adapter->phy_info_timer);
5148 adapter->phy_info_timer.function = &e1000_update_phy_info;
5149 adapter->phy_info_timer.data = (unsigned long) adapter;
5150
5151 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5152 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
a8f88ff5
JB
5153 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5154 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
41cec6f1 5155 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
bc7f75fa 5156
bc7f75fa
AK
5157 /* Initialize link parameters. User can change them with ethtool */
5158 adapter->hw.mac.autoneg = 1;
309af40b 5159 adapter->fc_autoneg = 1;
5c48ef3e
BA
5160 adapter->hw.fc.requested_mode = e1000_fc_default;
5161 adapter->hw.fc.current_mode = e1000_fc_default;
bc7f75fa
AK
5162 adapter->hw.phy.autoneg_advertised = 0x2f;
5163
5164 /* ring size defaults */
5165 adapter->rx_ring->count = 256;
5166 adapter->tx_ring->count = 256;
5167
5168 /*
5169 * Initial Wake on LAN setting - If APM wake is enabled in
5170 * the EEPROM, enable the ACPI Magic Packet filter
5171 */
5172 if (adapter->flags & FLAG_APME_IN_WUC) {
5173 /* APME bit in EEPROM is mapped to WUC.APME */
5174 eeprom_data = er32(WUC);
5175 eeprom_apme_mask = E1000_WUC_APME;
a4f58f54
BA
5176 if (eeprom_data & E1000_WUC_PHY_WAKE)
5177 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
bc7f75fa
AK
5178 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5179 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5180 (adapter->hw.bus.func == 1))
5181 e1000_read_nvm(&adapter->hw,
5182 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
5183 else
5184 e1000_read_nvm(&adapter->hw,
5185 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5186 }
5187
5188 /* fetch WoL from EEPROM */
5189 if (eeprom_data & eeprom_apme_mask)
5190 adapter->eeprom_wol |= E1000_WUFC_MAG;
5191
5192 /*
5193 * now that we have the eeprom settings, apply the special cases
5194 * where the eeprom may be wrong or the board simply won't support
5195 * wake on lan on a particular port
5196 */
5197 if (!(adapter->flags & FLAG_HAS_WOL))
5198 adapter->eeprom_wol = 0;
5199
5200 /* initialize the wol settings based on the eeprom settings */
5201 adapter->wol = adapter->eeprom_wol;
6ff68026 5202 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
bc7f75fa 5203
84527590
BA
5204 /* save off EEPROM version number */
5205 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5206
bc7f75fa
AK
5207 /* reset the hardware with the new settings */
5208 e1000e_reset(adapter);
5209
ad68076e
BA
5210 /*
5211 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 5212 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
5213 * under the control of the driver.
5214 */
c43bc57e 5215 if (!(adapter->flags & FLAG_HAS_AMT))
bc7f75fa
AK
5216 e1000_get_hw_control(adapter);
5217
bc7f75fa
AK
5218 strcpy(netdev->name, "eth%d");
5219 err = register_netdev(netdev);
5220 if (err)
5221 goto err_register;
5222
9c563d20
JB
5223 /* carrier off reporting is important to ethtool even BEFORE open */
5224 netif_carrier_off(netdev);
5225
bc7f75fa
AK
5226 e1000_print_device_info(adapter);
5227
5228 return 0;
5229
5230err_register:
c43bc57e
JB
5231 if (!(adapter->flags & FLAG_HAS_AMT))
5232 e1000_release_hw_control(adapter);
bc7f75fa
AK
5233err_eeprom:
5234 if (!e1000_check_reset_block(&adapter->hw))
5235 e1000_phy_hw_reset(&adapter->hw);
c43bc57e 5236err_hw_init:
bc7f75fa 5237
bc7f75fa
AK
5238 kfree(adapter->tx_ring);
5239 kfree(adapter->rx_ring);
5240err_sw_init:
c43bc57e
JB
5241 if (adapter->hw.flash_address)
5242 iounmap(adapter->hw.flash_address);
e82f54ba 5243 e1000e_reset_interrupt_capability(adapter);
c43bc57e 5244err_flashmap:
bc7f75fa
AK
5245 iounmap(adapter->hw.hw_addr);
5246err_ioremap:
5247 free_netdev(netdev);
5248err_alloc_etherdev:
f0f422e5
BA
5249 pci_release_selected_regions(pdev,
5250 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
5251err_pci_reg:
5252err_dma:
5253 pci_disable_device(pdev);
5254 return err;
5255}
5256
5257/**
5258 * e1000_remove - Device Removal Routine
5259 * @pdev: PCI device information struct
5260 *
5261 * e1000_remove is called by the PCI subsystem to alert the driver
5262 * that it should release a PCI device. The could be caused by a
5263 * Hot-Plug event, or because the driver is going to be removed from
5264 * memory.
5265 **/
5266static void __devexit e1000_remove(struct pci_dev *pdev)
5267{
5268 struct net_device *netdev = pci_get_drvdata(pdev);
5269 struct e1000_adapter *adapter = netdev_priv(netdev);
5270
ad68076e
BA
5271 /*
5272 * flush_scheduled work may reschedule our watchdog task, so
5273 * explicitly disable watchdog tasks from being rescheduled
5274 */
bc7f75fa
AK
5275 set_bit(__E1000_DOWN, &adapter->state);
5276 del_timer_sync(&adapter->watchdog_timer);
5277 del_timer_sync(&adapter->phy_info_timer);
5278
41cec6f1
BA
5279 cancel_work_sync(&adapter->reset_task);
5280 cancel_work_sync(&adapter->watchdog_task);
5281 cancel_work_sync(&adapter->downshift_task);
5282 cancel_work_sync(&adapter->update_phy_task);
5283 cancel_work_sync(&adapter->print_hang_task);
bc7f75fa
AK
5284 flush_scheduled_work();
5285
17f208de
BA
5286 if (!(netdev->flags & IFF_UP))
5287 e1000_power_down_phy(adapter);
5288
5289 unregister_netdev(netdev);
5290
ad68076e
BA
5291 /*
5292 * Release control of h/w to f/w. If f/w is AMT enabled, this
5293 * would have already happened in close and is redundant.
5294 */
bc7f75fa
AK
5295 e1000_release_hw_control(adapter);
5296
4662e82b 5297 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
5298 kfree(adapter->tx_ring);
5299 kfree(adapter->rx_ring);
5300
5301 iounmap(adapter->hw.hw_addr);
5302 if (adapter->hw.flash_address)
5303 iounmap(adapter->hw.flash_address);
f0f422e5
BA
5304 pci_release_selected_regions(pdev,
5305 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
5306
5307 free_netdev(netdev);
5308
111b9dc5 5309 /* AER disable */
19d5afd4 5310 pci_disable_pcie_error_reporting(pdev);
111b9dc5 5311
bc7f75fa
AK
5312 pci_disable_device(pdev);
5313}
5314
5315/* PCI Error Recovery (ERS) */
5316static struct pci_error_handlers e1000_err_handler = {
5317 .error_detected = e1000_io_error_detected,
5318 .slot_reset = e1000_io_slot_reset,
5319 .resume = e1000_io_resume,
5320};
5321
a3aa1884 5322static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
bc7f75fa
AK
5323 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5324 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5325 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5326 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5327 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5328 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
040babf9
AK
5329 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
ad68076e 5332
bc7f75fa
AK
5333 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5334 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5335 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5336 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
ad68076e 5337
bc7f75fa
AK
5338 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5339 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5340 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
ad68076e 5341
4662e82b 5342 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
bef28b11 5343 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
8c81c9c3 5344 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
4662e82b 5345
bc7f75fa
AK
5346 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5347 board_80003es2lan },
5348 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5349 board_80003es2lan },
5350 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5351 board_80003es2lan },
5352 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5353 board_80003es2lan },
ad68076e 5354
bc7f75fa
AK
5355 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5356 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5357 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5358 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5359 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5360 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
9e135a2e 5362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
ad68076e 5363
bc7f75fa
AK
5364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5366 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5368 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
2f15f9d6 5369 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
97ac8cae
BA
5370 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5372 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5373
5374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
bc7f75fa 5377
f4187b56
BA
5378 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5379 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5380
a4f58f54
BA
5381 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5382 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
5383 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5384 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5385
bc7f75fa
AK
5386 { } /* terminate list */
5387};
5388MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5389
5390/* PCI Device API Driver */
5391static struct pci_driver e1000_driver = {
5392 .name = e1000e_driver_name,
5393 .id_table = e1000_pci_tbl,
5394 .probe = e1000_probe,
5395 .remove = __devexit_p(e1000_remove),
5396#ifdef CONFIG_PM
ad68076e 5397 /* Power Management Hooks */
bc7f75fa
AK
5398 .suspend = e1000_suspend,
5399 .resume = e1000_resume,
5400#endif
5401 .shutdown = e1000_shutdown,
5402 .err_handler = &e1000_err_handler
5403};
5404
5405/**
5406 * e1000_init_module - Driver Registration Routine
5407 *
5408 * e1000_init_module is the first routine called when the driver is
5409 * loaded. All it does is register with the PCI subsystem.
5410 **/
5411static int __init e1000_init_module(void)
5412{
5413 int ret;
5414 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
5415 e1000e_driver_name, e1000e_driver_version);
c7e54b1b 5416 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
bc7f75fa
AK
5417 e1000e_driver_name);
5418 ret = pci_register_driver(&e1000_driver);
53ec5498 5419
bc7f75fa
AK
5420 return ret;
5421}
5422module_init(e1000_init_module);
5423
5424/**
5425 * e1000_exit_module - Driver Exit Cleanup Routine
5426 *
5427 * e1000_exit_module is called just before the driver is removed
5428 * from memory.
5429 **/
5430static void __exit e1000_exit_module(void)
5431{
5432 pci_unregister_driver(&e1000_driver);
5433}
5434module_exit(e1000_exit_module);
5435
5436
5437MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
5438MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
5439MODULE_LICENSE("GPL");
5440MODULE_VERSION(DRV_VERSION);
5441
5442/* e1000_main.c */