]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/e1000/e1000_main.c
pci: use pci_ioremap_bar() in drivers/net
[net-next-2.6.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #define DRV_VERSION "7.3.20-k3-NAPI"
35 const char e1000_driver_version[] = DRV_VERSION;
36 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37
38 /* e1000_pci_tbl - PCI Device ID Table
39  *
40  * Last entry must be all 0s
41  *
42  * Macro expands to...
43  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
44  */
45 static struct pci_device_id e1000_pci_tbl[] = {
46         INTEL_E1000_ETHERNET_DEVICE(0x1000),
47         INTEL_E1000_ETHERNET_DEVICE(0x1001),
48         INTEL_E1000_ETHERNET_DEVICE(0x1004),
49         INTEL_E1000_ETHERNET_DEVICE(0x1008),
50         INTEL_E1000_ETHERNET_DEVICE(0x1009),
51         INTEL_E1000_ETHERNET_DEVICE(0x100C),
52         INTEL_E1000_ETHERNET_DEVICE(0x100D),
53         INTEL_E1000_ETHERNET_DEVICE(0x100E),
54         INTEL_E1000_ETHERNET_DEVICE(0x100F),
55         INTEL_E1000_ETHERNET_DEVICE(0x1010),
56         INTEL_E1000_ETHERNET_DEVICE(0x1011),
57         INTEL_E1000_ETHERNET_DEVICE(0x1012),
58         INTEL_E1000_ETHERNET_DEVICE(0x1013),
59         INTEL_E1000_ETHERNET_DEVICE(0x1014),
60         INTEL_E1000_ETHERNET_DEVICE(0x1015),
61         INTEL_E1000_ETHERNET_DEVICE(0x1016),
62         INTEL_E1000_ETHERNET_DEVICE(0x1017),
63         INTEL_E1000_ETHERNET_DEVICE(0x1018),
64         INTEL_E1000_ETHERNET_DEVICE(0x1019),
65         INTEL_E1000_ETHERNET_DEVICE(0x101A),
66         INTEL_E1000_ETHERNET_DEVICE(0x101D),
67         INTEL_E1000_ETHERNET_DEVICE(0x101E),
68         INTEL_E1000_ETHERNET_DEVICE(0x1026),
69         INTEL_E1000_ETHERNET_DEVICE(0x1027),
70         INTEL_E1000_ETHERNET_DEVICE(0x1028),
71         INTEL_E1000_ETHERNET_DEVICE(0x1075),
72         INTEL_E1000_ETHERNET_DEVICE(0x1076),
73         INTEL_E1000_ETHERNET_DEVICE(0x1077),
74         INTEL_E1000_ETHERNET_DEVICE(0x1078),
75         INTEL_E1000_ETHERNET_DEVICE(0x1079),
76         INTEL_E1000_ETHERNET_DEVICE(0x107A),
77         INTEL_E1000_ETHERNET_DEVICE(0x107B),
78         INTEL_E1000_ETHERNET_DEVICE(0x107C),
79         INTEL_E1000_ETHERNET_DEVICE(0x108A),
80         INTEL_E1000_ETHERNET_DEVICE(0x1099),
81         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
82         /* required last entry */
83         {0,}
84 };
85
86 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
87
88 int e1000_up(struct e1000_adapter *adapter);
89 void e1000_down(struct e1000_adapter *adapter);
90 void e1000_reinit_locked(struct e1000_adapter *adapter);
91 void e1000_reset(struct e1000_adapter *adapter);
92 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
93 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
94 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
95 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
96 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
97 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
98                              struct e1000_tx_ring *txdr);
99 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
100                              struct e1000_rx_ring *rxdr);
101 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *tx_ring);
103 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rx_ring);
105 void e1000_update_stats(struct e1000_adapter *adapter);
106
107 static int e1000_init_module(void);
108 static void e1000_exit_module(void);
109 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
110 static void __devexit e1000_remove(struct pci_dev *pdev);
111 static int e1000_alloc_queues(struct e1000_adapter *adapter);
112 static int e1000_sw_init(struct e1000_adapter *adapter);
113 static int e1000_open(struct net_device *netdev);
114 static int e1000_close(struct net_device *netdev);
115 static void e1000_configure_tx(struct e1000_adapter *adapter);
116 static void e1000_configure_rx(struct e1000_adapter *adapter);
117 static void e1000_setup_rctl(struct e1000_adapter *adapter);
118 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
119 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
120 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
121                                 struct e1000_tx_ring *tx_ring);
122 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
123                                 struct e1000_rx_ring *rx_ring);
124 static void e1000_set_rx_mode(struct net_device *netdev);
125 static void e1000_update_phy_info(unsigned long data);
126 static void e1000_watchdog(unsigned long data);
127 static void e1000_82547_tx_fifo_stall(unsigned long data);
128 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
129 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
130 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
131 static int e1000_set_mac(struct net_device *netdev, void *p);
132 static irqreturn_t e1000_intr(int irq, void *data);
133 static irqreturn_t e1000_intr_msi(int irq, void *data);
134 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
135                                struct e1000_tx_ring *tx_ring);
136 static int e1000_clean(struct napi_struct *napi, int budget);
137 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
138                                struct e1000_rx_ring *rx_ring,
139                                int *work_done, int work_to_do);
140 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
141                                    struct e1000_rx_ring *rx_ring,
142                                    int cleaned_count);
143 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
144 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
145                            int cmd);
146 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
147 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
148 static void e1000_tx_timeout(struct net_device *dev);
149 static void e1000_reset_task(struct work_struct *work);
150 static void e1000_smartspeed(struct e1000_adapter *adapter);
151 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
152                                        struct sk_buff *skb);
153
154 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
155 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
156 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
157 static void e1000_restore_vlan(struct e1000_adapter *adapter);
158
159 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
160 #ifdef CONFIG_PM
161 static int e1000_resume(struct pci_dev *pdev);
162 #endif
163 static void e1000_shutdown(struct pci_dev *pdev);
164
165 #ifdef CONFIG_NET_POLL_CONTROLLER
166 /* for netdump / net console */
167 static void e1000_netpoll (struct net_device *netdev);
168 #endif
169
170 #define COPYBREAK_DEFAULT 256
171 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
172 module_param(copybreak, uint, 0644);
173 MODULE_PARM_DESC(copybreak,
174         "Maximum size of packet that is copied to a new buffer on receive");
175
176 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
177                      pci_channel_state_t state);
178 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
179 static void e1000_io_resume(struct pci_dev *pdev);
180
181 static struct pci_error_handlers e1000_err_handler = {
182         .error_detected = e1000_io_error_detected,
183         .slot_reset = e1000_io_slot_reset,
184         .resume = e1000_io_resume,
185 };
186
187 static struct pci_driver e1000_driver = {
188         .name     = e1000_driver_name,
189         .id_table = e1000_pci_tbl,
190         .probe    = e1000_probe,
191         .remove   = __devexit_p(e1000_remove),
192 #ifdef CONFIG_PM
193         /* Power Managment Hooks */
194         .suspend  = e1000_suspend,
195         .resume   = e1000_resume,
196 #endif
197         .shutdown = e1000_shutdown,
198         .err_handler = &e1000_err_handler
199 };
200
201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
203 MODULE_LICENSE("GPL");
204 MODULE_VERSION(DRV_VERSION);
205
206 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
207 module_param(debug, int, 0);
208 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
209
210 /**
211  * e1000_init_module - Driver Registration Routine
212  *
213  * e1000_init_module is the first routine called when the driver is
214  * loaded. All it does is register with the PCI subsystem.
215  **/
216
217 static int __init e1000_init_module(void)
218 {
219         int ret;
220         printk(KERN_INFO "%s - version %s\n",
221                e1000_driver_string, e1000_driver_version);
222
223         printk(KERN_INFO "%s\n", e1000_copyright);
224
225         ret = pci_register_driver(&e1000_driver);
226         if (copybreak != COPYBREAK_DEFAULT) {
227                 if (copybreak == 0)
228                         printk(KERN_INFO "e1000: copybreak disabled\n");
229                 else
230                         printk(KERN_INFO "e1000: copybreak enabled for "
231                                "packets <= %u bytes\n", copybreak);
232         }
233         return ret;
234 }
235
236 module_init(e1000_init_module);
237
238 /**
239  * e1000_exit_module - Driver Exit Cleanup Routine
240  *
241  * e1000_exit_module is called just before the driver is removed
242  * from memory.
243  **/
244
245 static void __exit e1000_exit_module(void)
246 {
247         pci_unregister_driver(&e1000_driver);
248 }
249
250 module_exit(e1000_exit_module);
251
252 static int e1000_request_irq(struct e1000_adapter *adapter)
253 {
254         struct e1000_hw *hw = &adapter->hw;
255         struct net_device *netdev = adapter->netdev;
256         irq_handler_t handler = e1000_intr;
257         int irq_flags = IRQF_SHARED;
258         int err;
259
260         if (hw->mac_type >= e1000_82571) {
261                 adapter->have_msi = !pci_enable_msi(adapter->pdev);
262                 if (adapter->have_msi) {
263                         handler = e1000_intr_msi;
264                         irq_flags = 0;
265                 }
266         }
267
268         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
269                           netdev);
270         if (err) {
271                 if (adapter->have_msi)
272                         pci_disable_msi(adapter->pdev);
273                 DPRINTK(PROBE, ERR,
274                         "Unable to allocate interrupt Error: %d\n", err);
275         }
276
277         return err;
278 }
279
280 static void e1000_free_irq(struct e1000_adapter *adapter)
281 {
282         struct net_device *netdev = adapter->netdev;
283
284         free_irq(adapter->pdev->irq, netdev);
285
286         if (adapter->have_msi)
287                 pci_disable_msi(adapter->pdev);
288 }
289
290 /**
291  * e1000_irq_disable - Mask off interrupt generation on the NIC
292  * @adapter: board private structure
293  **/
294
295 static void e1000_irq_disable(struct e1000_adapter *adapter)
296 {
297         struct e1000_hw *hw = &adapter->hw;
298
299         ew32(IMC, ~0);
300         E1000_WRITE_FLUSH();
301         synchronize_irq(adapter->pdev->irq);
302 }
303
304 /**
305  * e1000_irq_enable - Enable default interrupt generation settings
306  * @adapter: board private structure
307  **/
308
309 static void e1000_irq_enable(struct e1000_adapter *adapter)
310 {
311         struct e1000_hw *hw = &adapter->hw;
312
313         ew32(IMS, IMS_ENABLE_MASK);
314         E1000_WRITE_FLUSH();
315 }
316
317 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
318 {
319         struct e1000_hw *hw = &adapter->hw;
320         struct net_device *netdev = adapter->netdev;
321         u16 vid = hw->mng_cookie.vlan_id;
322         u16 old_vid = adapter->mng_vlan_id;
323         if (adapter->vlgrp) {
324                 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
325                         if (hw->mng_cookie.status &
326                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
327                                 e1000_vlan_rx_add_vid(netdev, vid);
328                                 adapter->mng_vlan_id = vid;
329                         } else
330                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
331
332                         if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
333                                         (vid != old_vid) &&
334                             !vlan_group_get_device(adapter->vlgrp, old_vid))
335                                 e1000_vlan_rx_kill_vid(netdev, old_vid);
336                 } else
337                         adapter->mng_vlan_id = vid;
338         }
339 }
340
341 /**
342  * e1000_release_hw_control - release control of the h/w to f/w
343  * @adapter: address of board private structure
344  *
345  * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
346  * For ASF and Pass Through versions of f/w this means that the
347  * driver is no longer loaded. For AMT version (only with 82573) i
348  * of the f/w this means that the network i/f is closed.
349  *
350  **/
351
352 static void e1000_release_hw_control(struct e1000_adapter *adapter)
353 {
354         u32 ctrl_ext;
355         u32 swsm;
356         struct e1000_hw *hw = &adapter->hw;
357
358         /* Let firmware taken over control of h/w */
359         switch (hw->mac_type) {
360         case e1000_82573:
361                 swsm = er32(SWSM);
362                 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
363                 break;
364         case e1000_82571:
365         case e1000_82572:
366         case e1000_80003es2lan:
367         case e1000_ich8lan:
368                 ctrl_ext = er32(CTRL_EXT);
369                 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
370                 break;
371         default:
372                 break;
373         }
374 }
375
376 /**
377  * e1000_get_hw_control - get control of the h/w from f/w
378  * @adapter: address of board private structure
379  *
380  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
381  * For ASF and Pass Through versions of f/w this means that
382  * the driver is loaded. For AMT version (only with 82573)
383  * of the f/w this means that the network i/f is open.
384  *
385  **/
386
387 static void e1000_get_hw_control(struct e1000_adapter *adapter)
388 {
389         u32 ctrl_ext;
390         u32 swsm;
391         struct e1000_hw *hw = &adapter->hw;
392
393         /* Let firmware know the driver has taken over */
394         switch (hw->mac_type) {
395         case e1000_82573:
396                 swsm = er32(SWSM);
397                 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
398                 break;
399         case e1000_82571:
400         case e1000_82572:
401         case e1000_80003es2lan:
402         case e1000_ich8lan:
403                 ctrl_ext = er32(CTRL_EXT);
404                 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
405                 break;
406         default:
407                 break;
408         }
409 }
410
411 static void e1000_init_manageability(struct e1000_adapter *adapter)
412 {
413         struct e1000_hw *hw = &adapter->hw;
414
415         if (adapter->en_mng_pt) {
416                 u32 manc = er32(MANC);
417
418                 /* disable hardware interception of ARP */
419                 manc &= ~(E1000_MANC_ARP_EN);
420
421                 /* enable receiving management packets to the host */
422                 /* this will probably generate destination unreachable messages
423                  * from the host OS, but the packets will be handled on SMBUS */
424                 if (hw->has_manc2h) {
425                         u32 manc2h = er32(MANC2H);
426
427                         manc |= E1000_MANC_EN_MNG2HOST;
428 #define E1000_MNG2HOST_PORT_623 (1 << 5)
429 #define E1000_MNG2HOST_PORT_664 (1 << 6)
430                         manc2h |= E1000_MNG2HOST_PORT_623;
431                         manc2h |= E1000_MNG2HOST_PORT_664;
432                         ew32(MANC2H, manc2h);
433                 }
434
435                 ew32(MANC, manc);
436         }
437 }
438
439 static void e1000_release_manageability(struct e1000_adapter *adapter)
440 {
441         struct e1000_hw *hw = &adapter->hw;
442
443         if (adapter->en_mng_pt) {
444                 u32 manc = er32(MANC);
445
446                 /* re-enable hardware interception of ARP */
447                 manc |= E1000_MANC_ARP_EN;
448
449                 if (hw->has_manc2h)
450                         manc &= ~E1000_MANC_EN_MNG2HOST;
451
452                 /* don't explicitly have to mess with MANC2H since
453                  * MANC has an enable disable that gates MANC2H */
454
455                 ew32(MANC, manc);
456         }
457 }
458
459 /**
460  * e1000_configure - configure the hardware for RX and TX
461  * @adapter = private board structure
462  **/
463 static void e1000_configure(struct e1000_adapter *adapter)
464 {
465         struct net_device *netdev = adapter->netdev;
466         int i;
467
468         e1000_set_rx_mode(netdev);
469
470         e1000_restore_vlan(adapter);
471         e1000_init_manageability(adapter);
472
473         e1000_configure_tx(adapter);
474         e1000_setup_rctl(adapter);
475         e1000_configure_rx(adapter);
476         /* call E1000_DESC_UNUSED which always leaves
477          * at least 1 descriptor unused to make sure
478          * next_to_use != next_to_clean */
479         for (i = 0; i < adapter->num_rx_queues; i++) {
480                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
481                 adapter->alloc_rx_buf(adapter, ring,
482                                       E1000_DESC_UNUSED(ring));
483         }
484
485         adapter->tx_queue_len = netdev->tx_queue_len;
486 }
487
488 int e1000_up(struct e1000_adapter *adapter)
489 {
490         struct e1000_hw *hw = &adapter->hw;
491
492         /* hardware has been reset, we need to reload some things */
493         e1000_configure(adapter);
494
495         clear_bit(__E1000_DOWN, &adapter->flags);
496
497         napi_enable(&adapter->napi);
498
499         e1000_irq_enable(adapter);
500
501         /* fire a link change interrupt to start the watchdog */
502         ew32(ICS, E1000_ICS_LSC);
503         return 0;
504 }
505
506 /**
507  * e1000_power_up_phy - restore link in case the phy was powered down
508  * @adapter: address of board private structure
509  *
510  * The phy may be powered down to save power and turn off link when the
511  * driver is unloaded and wake on lan is not enabled (among others)
512  * *** this routine MUST be followed by a call to e1000_reset ***
513  *
514  **/
515
516 void e1000_power_up_phy(struct e1000_adapter *adapter)
517 {
518         struct e1000_hw *hw = &adapter->hw;
519         u16 mii_reg = 0;
520
521         /* Just clear the power down bit to wake the phy back up */
522         if (hw->media_type == e1000_media_type_copper) {
523                 /* according to the manual, the phy will retain its
524                  * settings across a power-down/up cycle */
525                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
526                 mii_reg &= ~MII_CR_POWER_DOWN;
527                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
528         }
529 }
530
531 static void e1000_power_down_phy(struct e1000_adapter *adapter)
532 {
533         struct e1000_hw *hw = &adapter->hw;
534
535         /* Power down the PHY so no link is implied when interface is down *
536          * The PHY cannot be powered down if any of the following is true *
537          * (a) WoL is enabled
538          * (b) AMT is active
539          * (c) SoL/IDER session is active */
540         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
541            hw->media_type == e1000_media_type_copper) {
542                 u16 mii_reg = 0;
543
544                 switch (hw->mac_type) {
545                 case e1000_82540:
546                 case e1000_82545:
547                 case e1000_82545_rev_3:
548                 case e1000_82546:
549                 case e1000_82546_rev_3:
550                 case e1000_82541:
551                 case e1000_82541_rev_2:
552                 case e1000_82547:
553                 case e1000_82547_rev_2:
554                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
555                                 goto out;
556                         break;
557                 case e1000_82571:
558                 case e1000_82572:
559                 case e1000_82573:
560                 case e1000_80003es2lan:
561                 case e1000_ich8lan:
562                         if (e1000_check_mng_mode(hw) ||
563                             e1000_check_phy_reset_block(hw))
564                                 goto out;
565                         break;
566                 default:
567                         goto out;
568                 }
569                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
570                 mii_reg |= MII_CR_POWER_DOWN;
571                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
572                 mdelay(1);
573         }
574 out:
575         return;
576 }
577
578 void e1000_down(struct e1000_adapter *adapter)
579 {
580         struct net_device *netdev = adapter->netdev;
581
582         /* signal that we're down so the interrupt handler does not
583          * reschedule our watchdog timer */
584         set_bit(__E1000_DOWN, &adapter->flags);
585
586         napi_disable(&adapter->napi);
587
588         e1000_irq_disable(adapter);
589
590         del_timer_sync(&adapter->tx_fifo_stall_timer);
591         del_timer_sync(&adapter->watchdog_timer);
592         del_timer_sync(&adapter->phy_info_timer);
593
594         netdev->tx_queue_len = adapter->tx_queue_len;
595         adapter->link_speed = 0;
596         adapter->link_duplex = 0;
597         netif_carrier_off(netdev);
598         netif_stop_queue(netdev);
599
600         e1000_reset(adapter);
601         e1000_clean_all_tx_rings(adapter);
602         e1000_clean_all_rx_rings(adapter);
603 }
604
605 void e1000_reinit_locked(struct e1000_adapter *adapter)
606 {
607         WARN_ON(in_interrupt());
608         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
609                 msleep(1);
610         e1000_down(adapter);
611         e1000_up(adapter);
612         clear_bit(__E1000_RESETTING, &adapter->flags);
613 }
614
615 void e1000_reset(struct e1000_adapter *adapter)
616 {
617         struct e1000_hw *hw = &adapter->hw;
618         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
619         u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
620         bool legacy_pba_adjust = false;
621
622         /* Repartition Pba for greater than 9k mtu
623          * To take effect CTRL.RST is required.
624          */
625
626         switch (hw->mac_type) {
627         case e1000_82542_rev2_0:
628         case e1000_82542_rev2_1:
629         case e1000_82543:
630         case e1000_82544:
631         case e1000_82540:
632         case e1000_82541:
633         case e1000_82541_rev_2:
634                 legacy_pba_adjust = true;
635                 pba = E1000_PBA_48K;
636                 break;
637         case e1000_82545:
638         case e1000_82545_rev_3:
639         case e1000_82546:
640         case e1000_82546_rev_3:
641                 pba = E1000_PBA_48K;
642                 break;
643         case e1000_82547:
644         case e1000_82547_rev_2:
645                 legacy_pba_adjust = true;
646                 pba = E1000_PBA_30K;
647                 break;
648         case e1000_82571:
649         case e1000_82572:
650         case e1000_80003es2lan:
651                 pba = E1000_PBA_38K;
652                 break;
653         case e1000_82573:
654                 pba = E1000_PBA_20K;
655                 break;
656         case e1000_ich8lan:
657                 pba = E1000_PBA_8K;
658         case e1000_undefined:
659         case e1000_num_macs:
660                 break;
661         }
662
663         if (legacy_pba_adjust) {
664                 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
665                         pba -= 8; /* allocate more FIFO for Tx */
666
667                 if (hw->mac_type == e1000_82547) {
668                         adapter->tx_fifo_head = 0;
669                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
670                         adapter->tx_fifo_size =
671                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
672                         atomic_set(&adapter->tx_fifo_stall, 0);
673                 }
674         } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
675                 /* adjust PBA for jumbo frames */
676                 ew32(PBA, pba);
677
678                 /* To maintain wire speed transmits, the Tx FIFO should be
679                  * large enough to accomodate two full transmit packets,
680                  * rounded up to the next 1KB and expressed in KB.  Likewise,
681                  * the Rx FIFO should be large enough to accomodate at least
682                  * one full receive packet and is similarly rounded up and
683                  * expressed in KB. */
684                 pba = er32(PBA);
685                 /* upper 16 bits has Tx packet buffer allocation size in KB */
686                 tx_space = pba >> 16;
687                 /* lower 16 bits has Rx packet buffer allocation size in KB */
688                 pba &= 0xffff;
689                 /* don't include ethernet FCS because hardware appends/strips */
690                 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
691                                VLAN_TAG_SIZE;
692                 min_tx_space = min_rx_space;
693                 min_tx_space *= 2;
694                 min_tx_space = ALIGN(min_tx_space, 1024);
695                 min_tx_space >>= 10;
696                 min_rx_space = ALIGN(min_rx_space, 1024);
697                 min_rx_space >>= 10;
698
699                 /* If current Tx allocation is less than the min Tx FIFO size,
700                  * and the min Tx FIFO size is less than the current Rx FIFO
701                  * allocation, take space away from current Rx allocation */
702                 if (tx_space < min_tx_space &&
703                     ((min_tx_space - tx_space) < pba)) {
704                         pba = pba - (min_tx_space - tx_space);
705
706                         /* PCI/PCIx hardware has PBA alignment constraints */
707                         switch (hw->mac_type) {
708                         case e1000_82545 ... e1000_82546_rev_3:
709                                 pba &= ~(E1000_PBA_8K - 1);
710                                 break;
711                         default:
712                                 break;
713                         }
714
715                         /* if short on rx space, rx wins and must trump tx
716                          * adjustment or use Early Receive if available */
717                         if (pba < min_rx_space) {
718                                 switch (hw->mac_type) {
719                                 case e1000_82573:
720                                         /* ERT enabled in e1000_configure_rx */
721                                         break;
722                                 default:
723                                         pba = min_rx_space;
724                                         break;
725                                 }
726                         }
727                 }
728         }
729
730         ew32(PBA, pba);
731
732         /* flow control settings */
733         /* Set the FC high water mark to 90% of the FIFO size.
734          * Required to clear last 3 LSB */
735         fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
736         /* We can't use 90% on small FIFOs because the remainder
737          * would be less than 1 full frame.  In this case, we size
738          * it to allow at least a full frame above the high water
739          *  mark. */
740         if (pba < E1000_PBA_16K)
741                 fc_high_water_mark = (pba * 1024) - 1600;
742
743         hw->fc_high_water = fc_high_water_mark;
744         hw->fc_low_water = fc_high_water_mark - 8;
745         if (hw->mac_type == e1000_80003es2lan)
746                 hw->fc_pause_time = 0xFFFF;
747         else
748                 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
749         hw->fc_send_xon = 1;
750         hw->fc = hw->original_fc;
751
752         /* Allow time for pending master requests to run */
753         e1000_reset_hw(hw);
754         if (hw->mac_type >= e1000_82544)
755                 ew32(WUC, 0);
756
757         if (e1000_init_hw(hw))
758                 DPRINTK(PROBE, ERR, "Hardware Error\n");
759         e1000_update_mng_vlan(adapter);
760
761         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
762         if (hw->mac_type >= e1000_82544 &&
763             hw->mac_type <= e1000_82547_rev_2 &&
764             hw->autoneg == 1 &&
765             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
766                 u32 ctrl = er32(CTRL);
767                 /* clear phy power management bit if we are in gig only mode,
768                  * which if enabled will attempt negotiation to 100Mb, which
769                  * can cause a loss of link at power off or driver unload */
770                 ctrl &= ~E1000_CTRL_SWDPIN3;
771                 ew32(CTRL, ctrl);
772         }
773
774         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
775         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
776
777         e1000_reset_adaptive(hw);
778         e1000_phy_get_info(hw, &adapter->phy_info);
779
780         if (!adapter->smart_power_down &&
781             (hw->mac_type == e1000_82571 ||
782              hw->mac_type == e1000_82572)) {
783                 u16 phy_data = 0;
784                 /* speed up time to link by disabling smart power down, ignore
785                  * the return value of this function because there is nothing
786                  * different we would do if it failed */
787                 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
788                                    &phy_data);
789                 phy_data &= ~IGP02E1000_PM_SPD;
790                 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
791                                     phy_data);
792         }
793
794         e1000_release_manageability(adapter);
795 }
796
797 /**
798  *  Dump the eeprom for users having checksum issues
799  **/
800 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
801 {
802         struct net_device *netdev = adapter->netdev;
803         struct ethtool_eeprom eeprom;
804         const struct ethtool_ops *ops = netdev->ethtool_ops;
805         u8 *data;
806         int i;
807         u16 csum_old, csum_new = 0;
808
809         eeprom.len = ops->get_eeprom_len(netdev);
810         eeprom.offset = 0;
811
812         data = kmalloc(eeprom.len, GFP_KERNEL);
813         if (!data) {
814                 printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
815                        " data\n");
816                 return;
817         }
818
819         ops->get_eeprom(netdev, &eeprom, data);
820
821         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
822                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
823         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
824                 csum_new += data[i] + (data[i + 1] << 8);
825         csum_new = EEPROM_SUM - csum_new;
826
827         printk(KERN_ERR "/*********************/\n");
828         printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
829         printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
830
831         printk(KERN_ERR "Offset    Values\n");
832         printk(KERN_ERR "========  ======\n");
833         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
834
835         printk(KERN_ERR "Include this output when contacting your support "
836                "provider.\n");
837         printk(KERN_ERR "This is not a software error! Something bad "
838                "happened to your hardware or\n");
839         printk(KERN_ERR "EEPROM image. Ignoring this "
840                "problem could result in further problems,\n");
841         printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
842         printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
843                "which is invalid\n");
844         printk(KERN_ERR "and requires you to set the proper MAC "
845                "address manually before continuing\n");
846         printk(KERN_ERR "to enable this network device.\n");
847         printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
848                "to your hardware vendor\n");
849         printk(KERN_ERR "or Intel Customer Support.\n");
850         printk(KERN_ERR "/*********************/\n");
851
852         kfree(data);
853 }
854
855 /**
856  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
857  * @pdev: PCI device information struct
858  *
859  * Return true if an adapter needs ioport resources
860  **/
861 static int e1000_is_need_ioport(struct pci_dev *pdev)
862 {
863         switch (pdev->device) {
864         case E1000_DEV_ID_82540EM:
865         case E1000_DEV_ID_82540EM_LOM:
866         case E1000_DEV_ID_82540EP:
867         case E1000_DEV_ID_82540EP_LOM:
868         case E1000_DEV_ID_82540EP_LP:
869         case E1000_DEV_ID_82541EI:
870         case E1000_DEV_ID_82541EI_MOBILE:
871         case E1000_DEV_ID_82541ER:
872         case E1000_DEV_ID_82541ER_LOM:
873         case E1000_DEV_ID_82541GI:
874         case E1000_DEV_ID_82541GI_LF:
875         case E1000_DEV_ID_82541GI_MOBILE:
876         case E1000_DEV_ID_82544EI_COPPER:
877         case E1000_DEV_ID_82544EI_FIBER:
878         case E1000_DEV_ID_82544GC_COPPER:
879         case E1000_DEV_ID_82544GC_LOM:
880         case E1000_DEV_ID_82545EM_COPPER:
881         case E1000_DEV_ID_82545EM_FIBER:
882         case E1000_DEV_ID_82546EB_COPPER:
883         case E1000_DEV_ID_82546EB_FIBER:
884         case E1000_DEV_ID_82546EB_QUAD_COPPER:
885                 return true;
886         default:
887                 return false;
888         }
889 }
890
891 /**
892  * e1000_probe - Device Initialization Routine
893  * @pdev: PCI device information struct
894  * @ent: entry in e1000_pci_tbl
895  *
896  * Returns 0 on success, negative on failure
897  *
898  * e1000_probe initializes an adapter identified by a pci_dev structure.
899  * The OS initialization, configuring of the adapter private structure,
900  * and a hardware reset occur.
901  **/
902 static int __devinit e1000_probe(struct pci_dev *pdev,
903                                  const struct pci_device_id *ent)
904 {
905         struct net_device *netdev;
906         struct e1000_adapter *adapter;
907         struct e1000_hw *hw;
908
909         static int cards_found = 0;
910         static int global_quad_port_a = 0; /* global ksp3 port a indication */
911         int i, err, pci_using_dac;
912         u16 eeprom_data = 0;
913         u16 eeprom_apme_mask = E1000_EEPROM_APME;
914         int bars, need_ioport;
915
916         /* do not allocate ioport bars when not needed */
917         need_ioport = e1000_is_need_ioport(pdev);
918         if (need_ioport) {
919                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
920                 err = pci_enable_device(pdev);
921         } else {
922                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
923                 err = pci_enable_device(pdev);
924         }
925         if (err)
926                 return err;
927
928         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
929             !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
930                 pci_using_dac = 1;
931         } else {
932                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
933                 if (err) {
934                         err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
935                         if (err) {
936                                 E1000_ERR("No usable DMA configuration, "
937                                           "aborting\n");
938                                 goto err_dma;
939                         }
940                 }
941                 pci_using_dac = 0;
942         }
943
944         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
945         if (err)
946                 goto err_pci_reg;
947
948         pci_set_master(pdev);
949
950         err = -ENOMEM;
951         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
952         if (!netdev)
953                 goto err_alloc_etherdev;
954
955         SET_NETDEV_DEV(netdev, &pdev->dev);
956
957         pci_set_drvdata(pdev, netdev);
958         adapter = netdev_priv(netdev);
959         adapter->netdev = netdev;
960         adapter->pdev = pdev;
961         adapter->msg_enable = (1 << debug) - 1;
962         adapter->bars = bars;
963         adapter->need_ioport = need_ioport;
964
965         hw = &adapter->hw;
966         hw->back = adapter;
967
968         err = -EIO;
969         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
970         if (!hw->hw_addr)
971                 goto err_ioremap;
972
973         if (adapter->need_ioport) {
974                 for (i = BAR_1; i <= BAR_5; i++) {
975                         if (pci_resource_len(pdev, i) == 0)
976                                 continue;
977                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
978                                 hw->io_base = pci_resource_start(pdev, i);
979                                 break;
980                         }
981                 }
982         }
983
984         netdev->open = &e1000_open;
985         netdev->stop = &e1000_close;
986         netdev->hard_start_xmit = &e1000_xmit_frame;
987         netdev->get_stats = &e1000_get_stats;
988         netdev->set_rx_mode = &e1000_set_rx_mode;
989         netdev->set_mac_address = &e1000_set_mac;
990         netdev->change_mtu = &e1000_change_mtu;
991         netdev->do_ioctl = &e1000_ioctl;
992         e1000_set_ethtool_ops(netdev);
993         netdev->tx_timeout = &e1000_tx_timeout;
994         netdev->watchdog_timeo = 5 * HZ;
995         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
996         netdev->vlan_rx_register = e1000_vlan_rx_register;
997         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
998         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
999 #ifdef CONFIG_NET_POLL_CONTROLLER
1000         netdev->poll_controller = e1000_netpoll;
1001 #endif
1002         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1003
1004         adapter->bd_number = cards_found;
1005
1006         /* setup the private structure */
1007
1008         err = e1000_sw_init(adapter);
1009         if (err)
1010                 goto err_sw_init;
1011
1012         err = -EIO;
1013         /* Flash BAR mapping must happen after e1000_sw_init
1014          * because it depends on mac_type */
1015         if ((hw->mac_type == e1000_ich8lan) &&
1016            (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1017                 hw->flash_address = pci_ioremap_bar(pdev, 1);
1018                 if (!hw->flash_address)
1019                         goto err_flashmap;
1020         }
1021
1022         if (e1000_check_phy_reset_block(hw))
1023                 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
1024
1025         if (hw->mac_type >= e1000_82543) {
1026                 netdev->features = NETIF_F_SG |
1027                                    NETIF_F_HW_CSUM |
1028                                    NETIF_F_HW_VLAN_TX |
1029                                    NETIF_F_HW_VLAN_RX |
1030                                    NETIF_F_HW_VLAN_FILTER;
1031                 if (hw->mac_type == e1000_ich8lan)
1032                         netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1033         }
1034
1035         if ((hw->mac_type >= e1000_82544) &&
1036            (hw->mac_type != e1000_82547))
1037                 netdev->features |= NETIF_F_TSO;
1038
1039         if (hw->mac_type > e1000_82547_rev_2)
1040                 netdev->features |= NETIF_F_TSO6;
1041         if (pci_using_dac)
1042                 netdev->features |= NETIF_F_HIGHDMA;
1043
1044         netdev->features |= NETIF_F_LLTX;
1045
1046         netdev->vlan_features |= NETIF_F_TSO;
1047         netdev->vlan_features |= NETIF_F_TSO6;
1048         netdev->vlan_features |= NETIF_F_HW_CSUM;
1049         netdev->vlan_features |= NETIF_F_SG;
1050
1051         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1052
1053         /* initialize eeprom parameters */
1054         if (e1000_init_eeprom_params(hw)) {
1055                 E1000_ERR("EEPROM initialization failed\n");
1056                 goto err_eeprom;
1057         }
1058
1059         /* before reading the EEPROM, reset the controller to
1060          * put the device in a known good starting state */
1061
1062         e1000_reset_hw(hw);
1063
1064         /* make sure the EEPROM is good */
1065         if (e1000_validate_eeprom_checksum(hw) < 0) {
1066                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
1067                 e1000_dump_eeprom(adapter);
1068                 /*
1069                  * set MAC address to all zeroes to invalidate and temporary
1070                  * disable this device for the user. This blocks regular
1071                  * traffic while still permitting ethtool ioctls from reaching
1072                  * the hardware as well as allowing the user to run the
1073                  * interface after manually setting a hw addr using
1074                  * `ip set address`
1075                  */
1076                 memset(hw->mac_addr, 0, netdev->addr_len);
1077         } else {
1078                 /* copy the MAC address out of the EEPROM */
1079                 if (e1000_read_mac_addr(hw))
1080                         DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1081         }
1082         /* don't block initalization here due to bad MAC address */
1083         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1084         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1085
1086         if (!is_valid_ether_addr(netdev->perm_addr))
1087                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1088
1089         e1000_get_bus_info(hw);
1090
1091         init_timer(&adapter->tx_fifo_stall_timer);
1092         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
1093         adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
1094
1095         init_timer(&adapter->watchdog_timer);
1096         adapter->watchdog_timer.function = &e1000_watchdog;
1097         adapter->watchdog_timer.data = (unsigned long) adapter;
1098
1099         init_timer(&adapter->phy_info_timer);
1100         adapter->phy_info_timer.function = &e1000_update_phy_info;
1101         adapter->phy_info_timer.data = (unsigned long)adapter;
1102
1103         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1104
1105         e1000_check_options(adapter);
1106
1107         /* Initial Wake on LAN setting
1108          * If APM wake is enabled in the EEPROM,
1109          * enable the ACPI Magic Packet filter
1110          */
1111
1112         switch (hw->mac_type) {
1113         case e1000_82542_rev2_0:
1114         case e1000_82542_rev2_1:
1115         case e1000_82543:
1116                 break;
1117         case e1000_82544:
1118                 e1000_read_eeprom(hw,
1119                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1120                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1121                 break;
1122         case e1000_ich8lan:
1123                 e1000_read_eeprom(hw,
1124                         EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1125                 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1126                 break;
1127         case e1000_82546:
1128         case e1000_82546_rev_3:
1129         case e1000_82571:
1130         case e1000_80003es2lan:
1131                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1132                         e1000_read_eeprom(hw,
1133                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1134                         break;
1135                 }
1136                 /* Fall Through */
1137         default:
1138                 e1000_read_eeprom(hw,
1139                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1140                 break;
1141         }
1142         if (eeprom_data & eeprom_apme_mask)
1143                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1144
1145         /* now that we have the eeprom settings, apply the special cases
1146          * where the eeprom may be wrong or the board simply won't support
1147          * wake on lan on a particular port */
1148         switch (pdev->device) {
1149         case E1000_DEV_ID_82546GB_PCIE:
1150                 adapter->eeprom_wol = 0;
1151                 break;
1152         case E1000_DEV_ID_82546EB_FIBER:
1153         case E1000_DEV_ID_82546GB_FIBER:
1154         case E1000_DEV_ID_82571EB_FIBER:
1155                 /* Wake events only supported on port A for dual fiber
1156                  * regardless of eeprom setting */
1157                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1158                         adapter->eeprom_wol = 0;
1159                 break;
1160         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1161         case E1000_DEV_ID_82571EB_QUAD_COPPER:
1162         case E1000_DEV_ID_82571EB_QUAD_FIBER:
1163         case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1164         case E1000_DEV_ID_82571PT_QUAD_COPPER:
1165                 /* if quad port adapter, disable WoL on all but port A */
1166                 if (global_quad_port_a != 0)
1167                         adapter->eeprom_wol = 0;
1168                 else
1169                         adapter->quad_port_a = 1;
1170                 /* Reset for multiple quad port adapters */
1171                 if (++global_quad_port_a == 4)
1172                         global_quad_port_a = 0;
1173                 break;
1174         }
1175
1176         /* initialize the wol settings based on the eeprom settings */
1177         adapter->wol = adapter->eeprom_wol;
1178
1179         /* print bus type/speed/width info */
1180         DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1181                 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1182                  (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1183                 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1184                  (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1185                  (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1186                  (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1187                  (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1188                 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1189                  (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1190                  (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1191                  "32-bit"));
1192
1193         printk("%pM\n", netdev->dev_addr);
1194
1195         if (hw->bus_type == e1000_bus_type_pci_express) {
1196                 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1197                         "longer be supported by this driver in the future.\n",
1198                         pdev->vendor, pdev->device);
1199                 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1200                         "driver instead.\n");
1201         }
1202
1203         /* reset the hardware with the new settings */
1204         e1000_reset(adapter);
1205
1206         /* If the controller is 82573 and f/w is AMT, do not set
1207          * DRV_LOAD until the interface is up.  For all other cases,
1208          * let the f/w know that the h/w is now under the control
1209          * of the driver. */
1210         if (hw->mac_type != e1000_82573 ||
1211             !e1000_check_mng_mode(hw))
1212                 e1000_get_hw_control(adapter);
1213
1214         /* tell the stack to leave us alone until e1000_open() is called */
1215         netif_carrier_off(netdev);
1216         netif_stop_queue(netdev);
1217
1218         strcpy(netdev->name, "eth%d");
1219         err = register_netdev(netdev);
1220         if (err)
1221                 goto err_register;
1222
1223         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1224
1225         cards_found++;
1226         return 0;
1227
1228 err_register:
1229         e1000_release_hw_control(adapter);
1230 err_eeprom:
1231         if (!e1000_check_phy_reset_block(hw))
1232                 e1000_phy_hw_reset(hw);
1233
1234         if (hw->flash_address)
1235                 iounmap(hw->flash_address);
1236 err_flashmap:
1237         for (i = 0; i < adapter->num_rx_queues; i++)
1238                 dev_put(&adapter->polling_netdev[i]);
1239
1240         kfree(adapter->tx_ring);
1241         kfree(adapter->rx_ring);
1242         kfree(adapter->polling_netdev);
1243 err_sw_init:
1244         iounmap(hw->hw_addr);
1245 err_ioremap:
1246         free_netdev(netdev);
1247 err_alloc_etherdev:
1248         pci_release_selected_regions(pdev, bars);
1249 err_pci_reg:
1250 err_dma:
1251         pci_disable_device(pdev);
1252         return err;
1253 }
1254
1255 /**
1256  * e1000_remove - Device Removal Routine
1257  * @pdev: PCI device information struct
1258  *
1259  * e1000_remove is called by the PCI subsystem to alert the driver
1260  * that it should release a PCI device.  The could be caused by a
1261  * Hot-Plug event, or because the driver is going to be removed from
1262  * memory.
1263  **/
1264
1265 static void __devexit e1000_remove(struct pci_dev *pdev)
1266 {
1267         struct net_device *netdev = pci_get_drvdata(pdev);
1268         struct e1000_adapter *adapter = netdev_priv(netdev);
1269         struct e1000_hw *hw = &adapter->hw;
1270         int i;
1271
1272         cancel_work_sync(&adapter->reset_task);
1273
1274         e1000_release_manageability(adapter);
1275
1276         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1277          * would have already happened in close and is redundant. */
1278         e1000_release_hw_control(adapter);
1279
1280         for (i = 0; i < adapter->num_rx_queues; i++)
1281                 dev_put(&adapter->polling_netdev[i]);
1282
1283         unregister_netdev(netdev);
1284
1285         if (!e1000_check_phy_reset_block(hw))
1286                 e1000_phy_hw_reset(hw);
1287
1288         kfree(adapter->tx_ring);
1289         kfree(adapter->rx_ring);
1290         kfree(adapter->polling_netdev);
1291
1292         iounmap(hw->hw_addr);
1293         if (hw->flash_address)
1294                 iounmap(hw->flash_address);
1295         pci_release_selected_regions(pdev, adapter->bars);
1296
1297         free_netdev(netdev);
1298
1299         pci_disable_device(pdev);
1300 }
1301
1302 /**
1303  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1304  * @adapter: board private structure to initialize
1305  *
1306  * e1000_sw_init initializes the Adapter private data structure.
1307  * Fields are initialized based on PCI device information and
1308  * OS network device settings (MTU size).
1309  **/
1310
1311 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1312 {
1313         struct e1000_hw *hw = &adapter->hw;
1314         struct net_device *netdev = adapter->netdev;
1315         struct pci_dev *pdev = adapter->pdev;
1316         int i;
1317
1318         /* PCI config space info */
1319
1320         hw->vendor_id = pdev->vendor;
1321         hw->device_id = pdev->device;
1322         hw->subsystem_vendor_id = pdev->subsystem_vendor;
1323         hw->subsystem_id = pdev->subsystem_device;
1324         hw->revision_id = pdev->revision;
1325
1326         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1327
1328         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1329         hw->max_frame_size = netdev->mtu +
1330                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1331         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1332
1333         /* identify the MAC */
1334
1335         if (e1000_set_mac_type(hw)) {
1336                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1337                 return -EIO;
1338         }
1339
1340         switch (hw->mac_type) {
1341         default:
1342                 break;
1343         case e1000_82541:
1344         case e1000_82547:
1345         case e1000_82541_rev_2:
1346         case e1000_82547_rev_2:
1347                 hw->phy_init_script = 1;
1348                 break;
1349         }
1350
1351         e1000_set_media_type(hw);
1352
1353         hw->wait_autoneg_complete = false;
1354         hw->tbi_compatibility_en = true;
1355         hw->adaptive_ifs = true;
1356
1357         /* Copper options */
1358
1359         if (hw->media_type == e1000_media_type_copper) {
1360                 hw->mdix = AUTO_ALL_MODES;
1361                 hw->disable_polarity_correction = false;
1362                 hw->master_slave = E1000_MASTER_SLAVE;
1363         }
1364
1365         adapter->num_tx_queues = 1;
1366         adapter->num_rx_queues = 1;
1367
1368         if (e1000_alloc_queues(adapter)) {
1369                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1370                 return -ENOMEM;
1371         }
1372
1373         for (i = 0; i < adapter->num_rx_queues; i++) {
1374                 adapter->polling_netdev[i].priv = adapter;
1375                 dev_hold(&adapter->polling_netdev[i]);
1376                 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1377         }
1378         spin_lock_init(&adapter->tx_queue_lock);
1379
1380         /* Explicitly disable IRQ since the NIC can be in any state. */
1381         e1000_irq_disable(adapter);
1382
1383         spin_lock_init(&adapter->stats_lock);
1384
1385         set_bit(__E1000_DOWN, &adapter->flags);
1386
1387         return 0;
1388 }
1389
1390 /**
1391  * e1000_alloc_queues - Allocate memory for all rings
1392  * @adapter: board private structure to initialize
1393  *
1394  * We allocate one ring per queue at run-time since we don't know the
1395  * number of queues at compile-time.  The polling_netdev array is
1396  * intended for Multiqueue, but should work fine with a single queue.
1397  **/
1398
1399 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1400 {
1401         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1402                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1403         if (!adapter->tx_ring)
1404                 return -ENOMEM;
1405
1406         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1407                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1408         if (!adapter->rx_ring) {
1409                 kfree(adapter->tx_ring);
1410                 return -ENOMEM;
1411         }
1412
1413         adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1414                                           sizeof(struct net_device),
1415                                           GFP_KERNEL);
1416         if (!adapter->polling_netdev) {
1417                 kfree(adapter->tx_ring);
1418                 kfree(adapter->rx_ring);
1419                 return -ENOMEM;
1420         }
1421
1422         return E1000_SUCCESS;
1423 }
1424
1425 /**
1426  * e1000_open - Called when a network interface is made active
1427  * @netdev: network interface device structure
1428  *
1429  * Returns 0 on success, negative value on failure
1430  *
1431  * The open entry point is called when a network interface is made
1432  * active by the system (IFF_UP).  At this point all resources needed
1433  * for transmit and receive operations are allocated, the interrupt
1434  * handler is registered with the OS, the watchdog timer is started,
1435  * and the stack is notified that the interface is ready.
1436  **/
1437
1438 static int e1000_open(struct net_device *netdev)
1439 {
1440         struct e1000_adapter *adapter = netdev_priv(netdev);
1441         struct e1000_hw *hw = &adapter->hw;
1442         int err;
1443
1444         /* disallow open during test */
1445         if (test_bit(__E1000_TESTING, &adapter->flags))
1446                 return -EBUSY;
1447
1448         /* allocate transmit descriptors */
1449         err = e1000_setup_all_tx_resources(adapter);
1450         if (err)
1451                 goto err_setup_tx;
1452
1453         /* allocate receive descriptors */
1454         err = e1000_setup_all_rx_resources(adapter);
1455         if (err)
1456                 goto err_setup_rx;
1457
1458         e1000_power_up_phy(adapter);
1459
1460         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1461         if ((hw->mng_cookie.status &
1462                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1463                 e1000_update_mng_vlan(adapter);
1464         }
1465
1466         /* If AMT is enabled, let the firmware know that the network
1467          * interface is now open */
1468         if (hw->mac_type == e1000_82573 &&
1469             e1000_check_mng_mode(hw))
1470                 e1000_get_hw_control(adapter);
1471
1472         /* before we allocate an interrupt, we must be ready to handle it.
1473          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1474          * as soon as we call pci_request_irq, so we have to setup our
1475          * clean_rx handler before we do so.  */
1476         e1000_configure(adapter);
1477
1478         err = e1000_request_irq(adapter);
1479         if (err)
1480                 goto err_req_irq;
1481
1482         /* From here on the code is the same as e1000_up() */
1483         clear_bit(__E1000_DOWN, &adapter->flags);
1484
1485         napi_enable(&adapter->napi);
1486
1487         e1000_irq_enable(adapter);
1488
1489         netif_start_queue(netdev);
1490
1491         /* fire a link status change interrupt to start the watchdog */
1492         ew32(ICS, E1000_ICS_LSC);
1493
1494         return E1000_SUCCESS;
1495
1496 err_req_irq:
1497         e1000_release_hw_control(adapter);
1498         e1000_power_down_phy(adapter);
1499         e1000_free_all_rx_resources(adapter);
1500 err_setup_rx:
1501         e1000_free_all_tx_resources(adapter);
1502 err_setup_tx:
1503         e1000_reset(adapter);
1504
1505         return err;
1506 }
1507
1508 /**
1509  * e1000_close - Disables a network interface
1510  * @netdev: network interface device structure
1511  *
1512  * Returns 0, this is not allowed to fail
1513  *
1514  * The close entry point is called when an interface is de-activated
1515  * by the OS.  The hardware is still under the drivers control, but
1516  * needs to be disabled.  A global MAC reset is issued to stop the
1517  * hardware, and all transmit and receive resources are freed.
1518  **/
1519
1520 static int e1000_close(struct net_device *netdev)
1521 {
1522         struct e1000_adapter *adapter = netdev_priv(netdev);
1523         struct e1000_hw *hw = &adapter->hw;
1524
1525         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1526         e1000_down(adapter);
1527         e1000_power_down_phy(adapter);
1528         e1000_free_irq(adapter);
1529
1530         e1000_free_all_tx_resources(adapter);
1531         e1000_free_all_rx_resources(adapter);
1532
1533         /* kill manageability vlan ID if supported, but not if a vlan with
1534          * the same ID is registered on the host OS (let 8021q kill it) */
1535         if ((hw->mng_cookie.status &
1536                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1537              !(adapter->vlgrp &&
1538                vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
1539                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1540         }
1541
1542         /* If AMT is enabled, let the firmware know that the network
1543          * interface is now closed */
1544         if (hw->mac_type == e1000_82573 &&
1545             e1000_check_mng_mode(hw))
1546                 e1000_release_hw_control(adapter);
1547
1548         return 0;
1549 }
1550
1551 /**
1552  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1553  * @adapter: address of board private structure
1554  * @start: address of beginning of memory
1555  * @len: length of memory
1556  **/
1557 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1558                                   unsigned long len)
1559 {
1560         struct e1000_hw *hw = &adapter->hw;
1561         unsigned long begin = (unsigned long)start;
1562         unsigned long end = begin + len;
1563
1564         /* First rev 82545 and 82546 need to not allow any memory
1565          * write location to cross 64k boundary due to errata 23 */
1566         if (hw->mac_type == e1000_82545 ||
1567             hw->mac_type == e1000_82546) {
1568                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1569         }
1570
1571         return true;
1572 }
1573
1574 /**
1575  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1576  * @adapter: board private structure
1577  * @txdr:    tx descriptor ring (for a specific queue) to setup
1578  *
1579  * Return 0 on success, negative on failure
1580  **/
1581
1582 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1583                                     struct e1000_tx_ring *txdr)
1584 {
1585         struct pci_dev *pdev = adapter->pdev;
1586         int size;
1587
1588         size = sizeof(struct e1000_buffer) * txdr->count;
1589         txdr->buffer_info = vmalloc(size);
1590         if (!txdr->buffer_info) {
1591                 DPRINTK(PROBE, ERR,
1592                 "Unable to allocate memory for the transmit descriptor ring\n");
1593                 return -ENOMEM;
1594         }
1595         memset(txdr->buffer_info, 0, size);
1596
1597         /* round up to nearest 4K */
1598
1599         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1600         txdr->size = ALIGN(txdr->size, 4096);
1601
1602         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1603         if (!txdr->desc) {
1604 setup_tx_desc_die:
1605                 vfree(txdr->buffer_info);
1606                 DPRINTK(PROBE, ERR,
1607                 "Unable to allocate memory for the transmit descriptor ring\n");
1608                 return -ENOMEM;
1609         }
1610
1611         /* Fix for errata 23, can't cross 64kB boundary */
1612         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1613                 void *olddesc = txdr->desc;
1614                 dma_addr_t olddma = txdr->dma;
1615                 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1616                                      "at %p\n", txdr->size, txdr->desc);
1617                 /* Try again, without freeing the previous */
1618                 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1619                 /* Failed allocation, critical failure */
1620                 if (!txdr->desc) {
1621                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1622                         goto setup_tx_desc_die;
1623                 }
1624
1625                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1626                         /* give up */
1627                         pci_free_consistent(pdev, txdr->size, txdr->desc,
1628                                             txdr->dma);
1629                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1630                         DPRINTK(PROBE, ERR,
1631                                 "Unable to allocate aligned memory "
1632                                 "for the transmit descriptor ring\n");
1633                         vfree(txdr->buffer_info);
1634                         return -ENOMEM;
1635                 } else {
1636                         /* Free old allocation, new allocation was successful */
1637                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1638                 }
1639         }
1640         memset(txdr->desc, 0, txdr->size);
1641
1642         txdr->next_to_use = 0;
1643         txdr->next_to_clean = 0;
1644         spin_lock_init(&txdr->tx_lock);
1645
1646         return 0;
1647 }
1648
1649 /**
1650  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1651  *                                (Descriptors) for all queues
1652  * @adapter: board private structure
1653  *
1654  * Return 0 on success, negative on failure
1655  **/
1656
1657 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1658 {
1659         int i, err = 0;
1660
1661         for (i = 0; i < adapter->num_tx_queues; i++) {
1662                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1663                 if (err) {
1664                         DPRINTK(PROBE, ERR,
1665                                 "Allocation for Tx Queue %u failed\n", i);
1666                         for (i-- ; i >= 0; i--)
1667                                 e1000_free_tx_resources(adapter,
1668                                                         &adapter->tx_ring[i]);
1669                         break;
1670                 }
1671         }
1672
1673         return err;
1674 }
1675
1676 /**
1677  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1678  * @adapter: board private structure
1679  *
1680  * Configure the Tx unit of the MAC after a reset.
1681  **/
1682
1683 static void e1000_configure_tx(struct e1000_adapter *adapter)
1684 {
1685         u64 tdba;
1686         struct e1000_hw *hw = &adapter->hw;
1687         u32 tdlen, tctl, tipg, tarc;
1688         u32 ipgr1, ipgr2;
1689
1690         /* Setup the HW Tx Head and Tail descriptor pointers */
1691
1692         switch (adapter->num_tx_queues) {
1693         case 1:
1694         default:
1695                 tdba = adapter->tx_ring[0].dma;
1696                 tdlen = adapter->tx_ring[0].count *
1697                         sizeof(struct e1000_tx_desc);
1698                 ew32(TDLEN, tdlen);
1699                 ew32(TDBAH, (tdba >> 32));
1700                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1701                 ew32(TDT, 0);
1702                 ew32(TDH, 0);
1703                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1704                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1705                 break;
1706         }
1707
1708         /* Set the default values for the Tx Inter Packet Gap timer */
1709         if (hw->mac_type <= e1000_82547_rev_2 &&
1710             (hw->media_type == e1000_media_type_fiber ||
1711              hw->media_type == e1000_media_type_internal_serdes))
1712                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1713         else
1714                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1715
1716         switch (hw->mac_type) {
1717         case e1000_82542_rev2_0:
1718         case e1000_82542_rev2_1:
1719                 tipg = DEFAULT_82542_TIPG_IPGT;
1720                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1721                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1722                 break;
1723         case e1000_80003es2lan:
1724                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1725                 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1726                 break;
1727         default:
1728                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1729                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1730                 break;
1731         }
1732         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1733         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1734         ew32(TIPG, tipg);
1735
1736         /* Set the Tx Interrupt Delay register */
1737
1738         ew32(TIDV, adapter->tx_int_delay);
1739         if (hw->mac_type >= e1000_82540)
1740                 ew32(TADV, adapter->tx_abs_int_delay);
1741
1742         /* Program the Transmit Control Register */
1743
1744         tctl = er32(TCTL);
1745         tctl &= ~E1000_TCTL_CT;
1746         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1747                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1748
1749         if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1750                 tarc = er32(TARC0);
1751                 /* set the speed mode bit, we'll clear it if we're not at
1752                  * gigabit link later */
1753                 tarc |= (1 << 21);
1754                 ew32(TARC0, tarc);
1755         } else if (hw->mac_type == e1000_80003es2lan) {
1756                 tarc = er32(TARC0);
1757                 tarc |= 1;
1758                 ew32(TARC0, tarc);
1759                 tarc = er32(TARC1);
1760                 tarc |= 1;
1761                 ew32(TARC1, tarc);
1762         }
1763
1764         e1000_config_collision_dist(hw);
1765
1766         /* Setup Transmit Descriptor Settings for eop descriptor */
1767         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1768
1769         /* only set IDE if we are delaying interrupts using the timers */
1770         if (adapter->tx_int_delay)
1771                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1772
1773         if (hw->mac_type < e1000_82543)
1774                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1775         else
1776                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1777
1778         /* Cache if we're 82544 running in PCI-X because we'll
1779          * need this to apply a workaround later in the send path. */
1780         if (hw->mac_type == e1000_82544 &&
1781             hw->bus_type == e1000_bus_type_pcix)
1782                 adapter->pcix_82544 = 1;
1783
1784         ew32(TCTL, tctl);
1785
1786 }
1787
1788 /**
1789  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1790  * @adapter: board private structure
1791  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1792  *
1793  * Returns 0 on success, negative on failure
1794  **/
1795
1796 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1797                                     struct e1000_rx_ring *rxdr)
1798 {
1799         struct e1000_hw *hw = &adapter->hw;
1800         struct pci_dev *pdev = adapter->pdev;
1801         int size, desc_len;
1802
1803         size = sizeof(struct e1000_buffer) * rxdr->count;
1804         rxdr->buffer_info = vmalloc(size);
1805         if (!rxdr->buffer_info) {
1806                 DPRINTK(PROBE, ERR,
1807                 "Unable to allocate memory for the receive descriptor ring\n");
1808                 return -ENOMEM;
1809         }
1810         memset(rxdr->buffer_info, 0, size);
1811
1812         if (hw->mac_type <= e1000_82547_rev_2)
1813                 desc_len = sizeof(struct e1000_rx_desc);
1814         else
1815                 desc_len = sizeof(union e1000_rx_desc_packet_split);
1816
1817         /* Round up to nearest 4K */
1818
1819         rxdr->size = rxdr->count * desc_len;
1820         rxdr->size = ALIGN(rxdr->size, 4096);
1821
1822         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1823
1824         if (!rxdr->desc) {
1825                 DPRINTK(PROBE, ERR,
1826                 "Unable to allocate memory for the receive descriptor ring\n");
1827 setup_rx_desc_die:
1828                 vfree(rxdr->buffer_info);
1829                 return -ENOMEM;
1830         }
1831
1832         /* Fix for errata 23, can't cross 64kB boundary */
1833         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1834                 void *olddesc = rxdr->desc;
1835                 dma_addr_t olddma = rxdr->dma;
1836                 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1837                                      "at %p\n", rxdr->size, rxdr->desc);
1838                 /* Try again, without freeing the previous */
1839                 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1840                 /* Failed allocation, critical failure */
1841                 if (!rxdr->desc) {
1842                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1843                         DPRINTK(PROBE, ERR,
1844                                 "Unable to allocate memory "
1845                                 "for the receive descriptor ring\n");
1846                         goto setup_rx_desc_die;
1847                 }
1848
1849                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1850                         /* give up */
1851                         pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1852                                             rxdr->dma);
1853                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1854                         DPRINTK(PROBE, ERR,
1855                                 "Unable to allocate aligned memory "
1856                                 "for the receive descriptor ring\n");
1857                         goto setup_rx_desc_die;
1858                 } else {
1859                         /* Free old allocation, new allocation was successful */
1860                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1861                 }
1862         }
1863         memset(rxdr->desc, 0, rxdr->size);
1864
1865         rxdr->next_to_clean = 0;
1866         rxdr->next_to_use = 0;
1867
1868         return 0;
1869 }
1870
1871 /**
1872  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1873  *                                (Descriptors) for all queues
1874  * @adapter: board private structure
1875  *
1876  * Return 0 on success, negative on failure
1877  **/
1878
1879 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1880 {
1881         int i, err = 0;
1882
1883         for (i = 0; i < adapter->num_rx_queues; i++) {
1884                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1885                 if (err) {
1886                         DPRINTK(PROBE, ERR,
1887                                 "Allocation for Rx Queue %u failed\n", i);
1888                         for (i-- ; i >= 0; i--)
1889                                 e1000_free_rx_resources(adapter,
1890                                                         &adapter->rx_ring[i]);
1891                         break;
1892                 }
1893         }
1894
1895         return err;
1896 }
1897
1898 /**
1899  * e1000_setup_rctl - configure the receive control registers
1900  * @adapter: Board private structure
1901  **/
1902 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1903                         (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1904 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1905 {
1906         struct e1000_hw *hw = &adapter->hw;
1907         u32 rctl;
1908
1909         rctl = er32(RCTL);
1910
1911         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1912
1913         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1914                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1915                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1916
1917         if (hw->tbi_compatibility_on == 1)
1918                 rctl |= E1000_RCTL_SBP;
1919         else
1920                 rctl &= ~E1000_RCTL_SBP;
1921
1922         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1923                 rctl &= ~E1000_RCTL_LPE;
1924         else
1925                 rctl |= E1000_RCTL_LPE;
1926
1927         /* Setup buffer sizes */
1928         rctl &= ~E1000_RCTL_SZ_4096;
1929         rctl |= E1000_RCTL_BSEX;
1930         switch (adapter->rx_buffer_len) {
1931                 case E1000_RXBUFFER_256:
1932                         rctl |= E1000_RCTL_SZ_256;
1933                         rctl &= ~E1000_RCTL_BSEX;
1934                         break;
1935                 case E1000_RXBUFFER_512:
1936                         rctl |= E1000_RCTL_SZ_512;
1937                         rctl &= ~E1000_RCTL_BSEX;
1938                         break;
1939                 case E1000_RXBUFFER_1024:
1940                         rctl |= E1000_RCTL_SZ_1024;
1941                         rctl &= ~E1000_RCTL_BSEX;
1942                         break;
1943                 case E1000_RXBUFFER_2048:
1944                 default:
1945                         rctl |= E1000_RCTL_SZ_2048;
1946                         rctl &= ~E1000_RCTL_BSEX;
1947                         break;
1948                 case E1000_RXBUFFER_4096:
1949                         rctl |= E1000_RCTL_SZ_4096;
1950                         break;
1951                 case E1000_RXBUFFER_8192:
1952                         rctl |= E1000_RCTL_SZ_8192;
1953                         break;
1954                 case E1000_RXBUFFER_16384:
1955                         rctl |= E1000_RCTL_SZ_16384;
1956                         break;
1957         }
1958
1959         ew32(RCTL, rctl);
1960 }
1961
1962 /**
1963  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1964  * @adapter: board private structure
1965  *
1966  * Configure the Rx unit of the MAC after a reset.
1967  **/
1968
1969 static void e1000_configure_rx(struct e1000_adapter *adapter)
1970 {
1971         u64 rdba;
1972         struct e1000_hw *hw = &adapter->hw;
1973         u32 rdlen, rctl, rxcsum, ctrl_ext;
1974
1975         rdlen = adapter->rx_ring[0].count *
1976                 sizeof(struct e1000_rx_desc);
1977         adapter->clean_rx = e1000_clean_rx_irq;
1978         adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1979
1980         /* disable receives while setting up the descriptors */
1981         rctl = er32(RCTL);
1982         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1983
1984         /* set the Receive Delay Timer Register */
1985         ew32(RDTR, adapter->rx_int_delay);
1986
1987         if (hw->mac_type >= e1000_82540) {
1988                 ew32(RADV, adapter->rx_abs_int_delay);
1989                 if (adapter->itr_setting != 0)
1990                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1991         }
1992
1993         if (hw->mac_type >= e1000_82571) {
1994                 ctrl_ext = er32(CTRL_EXT);
1995                 /* Reset delay timers after every interrupt */
1996                 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1997                 /* Auto-Mask interrupts upon ICR access */
1998                 ctrl_ext |= E1000_CTRL_EXT_IAME;
1999                 ew32(IAM, 0xffffffff);
2000                 ew32(CTRL_EXT, ctrl_ext);
2001                 E1000_WRITE_FLUSH();
2002         }
2003
2004         /* Setup the HW Rx Head and Tail Descriptor Pointers and
2005          * the Base and Length of the Rx Descriptor Ring */
2006         switch (adapter->num_rx_queues) {
2007         case 1:
2008         default:
2009                 rdba = adapter->rx_ring[0].dma;
2010                 ew32(RDLEN, rdlen);
2011                 ew32(RDBAH, (rdba >> 32));
2012                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
2013                 ew32(RDT, 0);
2014                 ew32(RDH, 0);
2015                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2016                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
2017                 break;
2018         }
2019
2020         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2021         if (hw->mac_type >= e1000_82543) {
2022                 rxcsum = er32(RXCSUM);
2023                 if (adapter->rx_csum)
2024                         rxcsum |= E1000_RXCSUM_TUOFL;
2025                 else
2026                         /* don't need to clear IPPCSE as it defaults to 0 */
2027                         rxcsum &= ~E1000_RXCSUM_TUOFL;
2028                 ew32(RXCSUM, rxcsum);
2029         }
2030
2031         /* Enable Receives */
2032         ew32(RCTL, rctl);
2033 }
2034
2035 /**
2036  * e1000_free_tx_resources - Free Tx Resources per Queue
2037  * @adapter: board private structure
2038  * @tx_ring: Tx descriptor ring for a specific queue
2039  *
2040  * Free all transmit software resources
2041  **/
2042
2043 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
2044                                     struct e1000_tx_ring *tx_ring)
2045 {
2046         struct pci_dev *pdev = adapter->pdev;
2047
2048         e1000_clean_tx_ring(adapter, tx_ring);
2049
2050         vfree(tx_ring->buffer_info);
2051         tx_ring->buffer_info = NULL;
2052
2053         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2054
2055         tx_ring->desc = NULL;
2056 }
2057
2058 /**
2059  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2060  * @adapter: board private structure
2061  *
2062  * Free all transmit software resources
2063  **/
2064
2065 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2066 {
2067         int i;
2068
2069         for (i = 0; i < adapter->num_tx_queues; i++)
2070                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
2071 }
2072
2073 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2074                                              struct e1000_buffer *buffer_info)
2075 {
2076         if (buffer_info->dma) {
2077                 pci_unmap_page(adapter->pdev,
2078                                 buffer_info->dma,
2079                                 buffer_info->length,
2080                                 PCI_DMA_TODEVICE);
2081                 buffer_info->dma = 0;
2082         }
2083         if (buffer_info->skb) {
2084                 dev_kfree_skb_any(buffer_info->skb);
2085                 buffer_info->skb = NULL;
2086         }
2087         /* buffer_info must be completely set up in the transmit path */
2088 }
2089
2090 /**
2091  * e1000_clean_tx_ring - Free Tx Buffers
2092  * @adapter: board private structure
2093  * @tx_ring: ring to be cleaned
2094  **/
2095
2096 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2097                                 struct e1000_tx_ring *tx_ring)
2098 {
2099         struct e1000_hw *hw = &adapter->hw;
2100         struct e1000_buffer *buffer_info;
2101         unsigned long size;
2102         unsigned int i;
2103
2104         /* Free all the Tx ring sk_buffs */
2105
2106         for (i = 0; i < tx_ring->count; i++) {
2107                 buffer_info = &tx_ring->buffer_info[i];
2108                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2109         }
2110
2111         size = sizeof(struct e1000_buffer) * tx_ring->count;
2112         memset(tx_ring->buffer_info, 0, size);
2113
2114         /* Zero out the descriptor ring */
2115
2116         memset(tx_ring->desc, 0, tx_ring->size);
2117
2118         tx_ring->next_to_use = 0;
2119         tx_ring->next_to_clean = 0;
2120         tx_ring->last_tx_tso = 0;
2121
2122         writel(0, hw->hw_addr + tx_ring->tdh);
2123         writel(0, hw->hw_addr + tx_ring->tdt);
2124 }
2125
2126 /**
2127  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2128  * @adapter: board private structure
2129  **/
2130
2131 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2132 {
2133         int i;
2134
2135         for (i = 0; i < adapter->num_tx_queues; i++)
2136                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2137 }
2138
2139 /**
2140  * e1000_free_rx_resources - Free Rx Resources
2141  * @adapter: board private structure
2142  * @rx_ring: ring to clean the resources from
2143  *
2144  * Free all receive software resources
2145  **/
2146
2147 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2148                                     struct e1000_rx_ring *rx_ring)
2149 {
2150         struct pci_dev *pdev = adapter->pdev;
2151
2152         e1000_clean_rx_ring(adapter, rx_ring);
2153
2154         vfree(rx_ring->buffer_info);
2155         rx_ring->buffer_info = NULL;
2156
2157         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2158
2159         rx_ring->desc = NULL;
2160 }
2161
2162 /**
2163  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2164  * @adapter: board private structure
2165  *
2166  * Free all receive software resources
2167  **/
2168
2169 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2170 {
2171         int i;
2172
2173         for (i = 0; i < adapter->num_rx_queues; i++)
2174                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2175 }
2176
2177 /**
2178  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2179  * @adapter: board private structure
2180  * @rx_ring: ring to free buffers from
2181  **/
2182
2183 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2184                                 struct e1000_rx_ring *rx_ring)
2185 {
2186         struct e1000_hw *hw = &adapter->hw;
2187         struct e1000_buffer *buffer_info;
2188         struct pci_dev *pdev = adapter->pdev;
2189         unsigned long size;
2190         unsigned int i;
2191
2192         /* Free all the Rx ring sk_buffs */
2193         for (i = 0; i < rx_ring->count; i++) {
2194                 buffer_info = &rx_ring->buffer_info[i];
2195                 if (buffer_info->skb) {
2196                         pci_unmap_single(pdev,
2197                                          buffer_info->dma,
2198                                          buffer_info->length,
2199                                          PCI_DMA_FROMDEVICE);
2200
2201                         dev_kfree_skb(buffer_info->skb);
2202                         buffer_info->skb = NULL;
2203                 }
2204         }
2205
2206         size = sizeof(struct e1000_buffer) * rx_ring->count;
2207         memset(rx_ring->buffer_info, 0, size);
2208
2209         /* Zero out the descriptor ring */
2210
2211         memset(rx_ring->desc, 0, rx_ring->size);
2212
2213         rx_ring->next_to_clean = 0;
2214         rx_ring->next_to_use = 0;
2215
2216         writel(0, hw->hw_addr + rx_ring->rdh);
2217         writel(0, hw->hw_addr + rx_ring->rdt);
2218 }
2219
2220 /**
2221  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2222  * @adapter: board private structure
2223  **/
2224
2225 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2226 {
2227         int i;
2228
2229         for (i = 0; i < adapter->num_rx_queues; i++)
2230                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2231 }
2232
2233 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2234  * and memory write and invalidate disabled for certain operations
2235  */
2236 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2237 {
2238         struct e1000_hw *hw = &adapter->hw;
2239         struct net_device *netdev = adapter->netdev;
2240         u32 rctl;
2241
2242         e1000_pci_clear_mwi(hw);
2243
2244         rctl = er32(RCTL);
2245         rctl |= E1000_RCTL_RST;
2246         ew32(RCTL, rctl);
2247         E1000_WRITE_FLUSH();
2248         mdelay(5);
2249
2250         if (netif_running(netdev))
2251                 e1000_clean_all_rx_rings(adapter);
2252 }
2253
2254 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2255 {
2256         struct e1000_hw *hw = &adapter->hw;
2257         struct net_device *netdev = adapter->netdev;
2258         u32 rctl;
2259
2260         rctl = er32(RCTL);
2261         rctl &= ~E1000_RCTL_RST;
2262         ew32(RCTL, rctl);
2263         E1000_WRITE_FLUSH();
2264         mdelay(5);
2265
2266         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2267                 e1000_pci_set_mwi(hw);
2268
2269         if (netif_running(netdev)) {
2270                 /* No need to loop, because 82542 supports only 1 queue */
2271                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2272                 e1000_configure_rx(adapter);
2273                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2274         }
2275 }
2276
2277 /**
2278  * e1000_set_mac - Change the Ethernet Address of the NIC
2279  * @netdev: network interface device structure
2280  * @p: pointer to an address structure
2281  *
2282  * Returns 0 on success, negative on failure
2283  **/
2284
2285 static int e1000_set_mac(struct net_device *netdev, void *p)
2286 {
2287         struct e1000_adapter *adapter = netdev_priv(netdev);
2288         struct e1000_hw *hw = &adapter->hw;
2289         struct sockaddr *addr = p;
2290
2291         if (!is_valid_ether_addr(addr->sa_data))
2292                 return -EADDRNOTAVAIL;
2293
2294         /* 82542 2.0 needs to be in reset to write receive address registers */
2295
2296         if (hw->mac_type == e1000_82542_rev2_0)
2297                 e1000_enter_82542_rst(adapter);
2298
2299         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2300         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2301
2302         e1000_rar_set(hw, hw->mac_addr, 0);
2303
2304         /* With 82571 controllers, LAA may be overwritten (with the default)
2305          * due to controller reset from the other port. */
2306         if (hw->mac_type == e1000_82571) {
2307                 /* activate the work around */
2308                 hw->laa_is_present = 1;
2309
2310                 /* Hold a copy of the LAA in RAR[14] This is done so that
2311                  * between the time RAR[0] gets clobbered  and the time it
2312                  * gets fixed (in e1000_watchdog), the actual LAA is in one
2313                  * of the RARs and no incoming packets directed to this port
2314                  * are dropped. Eventaully the LAA will be in RAR[0] and
2315                  * RAR[14] */
2316                 e1000_rar_set(hw, hw->mac_addr,
2317                                         E1000_RAR_ENTRIES - 1);
2318         }
2319
2320         if (hw->mac_type == e1000_82542_rev2_0)
2321                 e1000_leave_82542_rst(adapter);
2322
2323         return 0;
2324 }
2325
2326 /**
2327  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2328  * @netdev: network interface device structure
2329  *
2330  * The set_rx_mode entry point is called whenever the unicast or multicast
2331  * address lists or the network interface flags are updated. This routine is
2332  * responsible for configuring the hardware for proper unicast, multicast,
2333  * promiscuous mode, and all-multi behavior.
2334  **/
2335
2336 static void e1000_set_rx_mode(struct net_device *netdev)
2337 {
2338         struct e1000_adapter *adapter = netdev_priv(netdev);
2339         struct e1000_hw *hw = &adapter->hw;
2340         struct dev_addr_list *uc_ptr;
2341         struct dev_addr_list *mc_ptr;
2342         u32 rctl;
2343         u32 hash_value;
2344         int i, rar_entries = E1000_RAR_ENTRIES;
2345         int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2346                                 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2347                                 E1000_NUM_MTA_REGISTERS;
2348
2349         if (hw->mac_type == e1000_ich8lan)
2350                 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2351
2352         /* reserve RAR[14] for LAA over-write work-around */
2353         if (hw->mac_type == e1000_82571)
2354                 rar_entries--;
2355
2356         /* Check for Promiscuous and All Multicast modes */
2357
2358         rctl = er32(RCTL);
2359
2360         if (netdev->flags & IFF_PROMISC) {
2361                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2362                 rctl &= ~E1000_RCTL_VFE;
2363         } else {
2364                 if (netdev->flags & IFF_ALLMULTI) {
2365                         rctl |= E1000_RCTL_MPE;
2366                 } else {
2367                         rctl &= ~E1000_RCTL_MPE;
2368                 }
2369                 if (adapter->hw.mac_type != e1000_ich8lan)
2370                         rctl |= E1000_RCTL_VFE;
2371         }
2372
2373         uc_ptr = NULL;
2374         if (netdev->uc_count > rar_entries - 1) {
2375                 rctl |= E1000_RCTL_UPE;
2376         } else if (!(netdev->flags & IFF_PROMISC)) {
2377                 rctl &= ~E1000_RCTL_UPE;
2378                 uc_ptr = netdev->uc_list;
2379         }
2380
2381         ew32(RCTL, rctl);
2382
2383         /* 82542 2.0 needs to be in reset to write receive address registers */
2384
2385         if (hw->mac_type == e1000_82542_rev2_0)
2386                 e1000_enter_82542_rst(adapter);
2387
2388         /* load the first 14 addresses into the exact filters 1-14. Unicast
2389          * addresses take precedence to avoid disabling unicast filtering
2390          * when possible.
2391          *
2392          * RAR 0 is used for the station MAC adddress
2393          * if there are not 14 addresses, go ahead and clear the filters
2394          * -- with 82571 controllers only 0-13 entries are filled here
2395          */
2396         mc_ptr = netdev->mc_list;
2397
2398         for (i = 1; i < rar_entries; i++) {
2399                 if (uc_ptr) {
2400                         e1000_rar_set(hw, uc_ptr->da_addr, i);
2401                         uc_ptr = uc_ptr->next;
2402                 } else if (mc_ptr) {
2403                         e1000_rar_set(hw, mc_ptr->da_addr, i);
2404                         mc_ptr = mc_ptr->next;
2405                 } else {
2406                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2407                         E1000_WRITE_FLUSH();
2408                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2409                         E1000_WRITE_FLUSH();
2410                 }
2411         }
2412         WARN_ON(uc_ptr != NULL);
2413
2414         /* clear the old settings from the multicast hash table */
2415
2416         for (i = 0; i < mta_reg_count; i++) {
2417                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2418                 E1000_WRITE_FLUSH();
2419         }
2420
2421         /* load any remaining addresses into the hash table */
2422
2423         for (; mc_ptr; mc_ptr = mc_ptr->next) {
2424                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
2425                 e1000_mta_set(hw, hash_value);
2426         }
2427
2428         if (hw->mac_type == e1000_82542_rev2_0)
2429                 e1000_leave_82542_rst(adapter);
2430 }
2431
2432 /* Need to wait a few seconds after link up to get diagnostic information from
2433  * the phy */
2434
2435 static void e1000_update_phy_info(unsigned long data)
2436 {
2437         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2438         struct e1000_hw *hw = &adapter->hw;
2439         e1000_phy_get_info(hw, &adapter->phy_info);
2440 }
2441
2442 /**
2443  * e1000_82547_tx_fifo_stall - Timer Call-back
2444  * @data: pointer to adapter cast into an unsigned long
2445  **/
2446
2447 static void e1000_82547_tx_fifo_stall(unsigned long data)
2448 {
2449         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2450         struct e1000_hw *hw = &adapter->hw;
2451         struct net_device *netdev = adapter->netdev;
2452         u32 tctl;
2453
2454         if (atomic_read(&adapter->tx_fifo_stall)) {
2455                 if ((er32(TDT) == er32(TDH)) &&
2456                    (er32(TDFT) == er32(TDFH)) &&
2457                    (er32(TDFTS) == er32(TDFHS))) {
2458                         tctl = er32(TCTL);
2459                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2460                         ew32(TDFT, adapter->tx_head_addr);
2461                         ew32(TDFH, adapter->tx_head_addr);
2462                         ew32(TDFTS, adapter->tx_head_addr);
2463                         ew32(TDFHS, adapter->tx_head_addr);
2464                         ew32(TCTL, tctl);
2465                         E1000_WRITE_FLUSH();
2466
2467                         adapter->tx_fifo_head = 0;
2468                         atomic_set(&adapter->tx_fifo_stall, 0);
2469                         netif_wake_queue(netdev);
2470                 } else {
2471                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2472                 }
2473         }
2474 }
2475
2476 /**
2477  * e1000_watchdog - Timer Call-back
2478  * @data: pointer to adapter cast into an unsigned long
2479  **/
2480 static void e1000_watchdog(unsigned long data)
2481 {
2482         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2483         struct e1000_hw *hw = &adapter->hw;
2484         struct net_device *netdev = adapter->netdev;
2485         struct e1000_tx_ring *txdr = adapter->tx_ring;
2486         u32 link, tctl;
2487         s32 ret_val;
2488
2489         ret_val = e1000_check_for_link(hw);
2490         if ((ret_val == E1000_ERR_PHY) &&
2491             (hw->phy_type == e1000_phy_igp_3) &&
2492             (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2493                 /* See e1000_kumeran_lock_loss_workaround() */
2494                 DPRINTK(LINK, INFO,
2495                         "Gigabit has been disabled, downgrading speed\n");
2496         }
2497
2498         if (hw->mac_type == e1000_82573) {
2499                 e1000_enable_tx_pkt_filtering(hw);
2500                 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
2501                         e1000_update_mng_vlan(adapter);
2502         }
2503
2504         if ((hw->media_type == e1000_media_type_internal_serdes) &&
2505            !(er32(TXCW) & E1000_TXCW_ANE))
2506                 link = !hw->serdes_link_down;
2507         else
2508                 link = er32(STATUS) & E1000_STATUS_LU;
2509
2510         if (link) {
2511                 if (!netif_carrier_ok(netdev)) {
2512                         u32 ctrl;
2513                         bool txb2b = true;
2514                         e1000_get_speed_and_duplex(hw,
2515                                                    &adapter->link_speed,
2516                                                    &adapter->link_duplex);
2517
2518                         ctrl = er32(CTRL);
2519                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2520                                 "Flow Control: %s\n",
2521                                 adapter->link_speed,
2522                                 adapter->link_duplex == FULL_DUPLEX ?
2523                                 "Full Duplex" : "Half Duplex",
2524                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2525                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2526                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2527                                 E1000_CTRL_TFCE) ? "TX" : "None" )));
2528
2529                         /* tweak tx_queue_len according to speed/duplex
2530                          * and adjust the timeout factor */
2531                         netdev->tx_queue_len = adapter->tx_queue_len;
2532                         adapter->tx_timeout_factor = 1;
2533                         switch (adapter->link_speed) {
2534                         case SPEED_10:
2535                                 txb2b = false;
2536                                 netdev->tx_queue_len = 10;
2537                                 adapter->tx_timeout_factor = 8;
2538                                 break;
2539                         case SPEED_100:
2540                                 txb2b = false;
2541                                 netdev->tx_queue_len = 100;
2542                                 /* maybe add some timeout factor ? */
2543                                 break;
2544                         }
2545
2546                         if ((hw->mac_type == e1000_82571 ||
2547                              hw->mac_type == e1000_82572) &&
2548                             !txb2b) {
2549                                 u32 tarc0;
2550                                 tarc0 = er32(TARC0);
2551                                 tarc0 &= ~(1 << 21);
2552                                 ew32(TARC0, tarc0);
2553                         }
2554
2555                         /* disable TSO for pcie and 10/100 speeds, to avoid
2556                          * some hardware issues */
2557                         if (!adapter->tso_force &&
2558                             hw->bus_type == e1000_bus_type_pci_express){
2559                                 switch (adapter->link_speed) {
2560                                 case SPEED_10:
2561                                 case SPEED_100:
2562                                         DPRINTK(PROBE,INFO,
2563                                         "10/100 speed: disabling TSO\n");
2564                                         netdev->features &= ~NETIF_F_TSO;
2565                                         netdev->features &= ~NETIF_F_TSO6;
2566                                         break;
2567                                 case SPEED_1000:
2568                                         netdev->features |= NETIF_F_TSO;
2569                                         netdev->features |= NETIF_F_TSO6;
2570                                         break;
2571                                 default:
2572                                         /* oops */
2573                                         break;
2574                                 }
2575                         }
2576
2577                         /* enable transmits in the hardware, need to do this
2578                          * after setting TARC0 */
2579                         tctl = er32(TCTL);
2580                         tctl |= E1000_TCTL_EN;
2581                         ew32(TCTL, tctl);
2582
2583                         netif_carrier_on(netdev);
2584                         netif_wake_queue(netdev);
2585                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2586                         adapter->smartspeed = 0;
2587                 } else {
2588                         /* make sure the receive unit is started */
2589                         if (hw->rx_needs_kicking) {
2590                                 u32 rctl = er32(RCTL);
2591                                 ew32(RCTL, rctl | E1000_RCTL_EN);
2592                         }
2593                 }
2594         } else {
2595                 if (netif_carrier_ok(netdev)) {
2596                         adapter->link_speed = 0;
2597                         adapter->link_duplex = 0;
2598                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
2599                         netif_carrier_off(netdev);
2600                         netif_stop_queue(netdev);
2601                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2602
2603                         /* 80003ES2LAN workaround--
2604                          * For packet buffer work-around on link down event;
2605                          * disable receives in the ISR and
2606                          * reset device here in the watchdog
2607                          */
2608                         if (hw->mac_type == e1000_80003es2lan)
2609                                 /* reset device */
2610                                 schedule_work(&adapter->reset_task);
2611                 }
2612
2613                 e1000_smartspeed(adapter);
2614         }
2615
2616         e1000_update_stats(adapter);
2617
2618         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2619         adapter->tpt_old = adapter->stats.tpt;
2620         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2621         adapter->colc_old = adapter->stats.colc;
2622
2623         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2624         adapter->gorcl_old = adapter->stats.gorcl;
2625         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2626         adapter->gotcl_old = adapter->stats.gotcl;
2627
2628         e1000_update_adaptive(hw);
2629
2630         if (!netif_carrier_ok(netdev)) {
2631                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2632                         /* We've lost link, so the controller stops DMA,
2633                          * but we've got queued Tx work that's never going
2634                          * to get done, so reset controller to flush Tx.
2635                          * (Do the reset outside of interrupt context). */
2636                         adapter->tx_timeout_count++;
2637                         schedule_work(&adapter->reset_task);
2638                 }
2639         }
2640
2641         /* Cause software interrupt to ensure rx ring is cleaned */
2642         ew32(ICS, E1000_ICS_RXDMT0);
2643
2644         /* Force detection of hung controller every watchdog period */
2645         adapter->detect_tx_hung = true;
2646
2647         /* With 82571 controllers, LAA may be overwritten due to controller
2648          * reset from the other port. Set the appropriate LAA in RAR[0] */
2649         if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2650                 e1000_rar_set(hw, hw->mac_addr, 0);
2651
2652         /* Reset the timer */
2653         mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
2654 }
2655
2656 enum latency_range {
2657         lowest_latency = 0,
2658         low_latency = 1,
2659         bulk_latency = 2,
2660         latency_invalid = 255
2661 };
2662
2663 /**
2664  * e1000_update_itr - update the dynamic ITR value based on statistics
2665  *      Stores a new ITR value based on packets and byte
2666  *      counts during the last interrupt.  The advantage of per interrupt
2667  *      computation is faster updates and more accurate ITR for the current
2668  *      traffic pattern.  Constants in this function were computed
2669  *      based on theoretical maximum wire speed and thresholds were set based
2670  *      on testing data as well as attempting to minimize response time
2671  *      while increasing bulk throughput.
2672  *      this functionality is controlled by the InterruptThrottleRate module
2673  *      parameter (see e1000_param.c)
2674  * @adapter: pointer to adapter
2675  * @itr_setting: current adapter->itr
2676  * @packets: the number of packets during this measurement interval
2677  * @bytes: the number of bytes during this measurement interval
2678  **/
2679 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2680                                      u16 itr_setting, int packets, int bytes)
2681 {
2682         unsigned int retval = itr_setting;
2683         struct e1000_hw *hw = &adapter->hw;
2684
2685         if (unlikely(hw->mac_type < e1000_82540))
2686                 goto update_itr_done;
2687
2688         if (packets == 0)
2689                 goto update_itr_done;
2690
2691         switch (itr_setting) {
2692         case lowest_latency:
2693                 /* jumbo frames get bulk treatment*/
2694                 if (bytes/packets > 8000)
2695                         retval = bulk_latency;
2696                 else if ((packets < 5) && (bytes > 512))
2697                         retval = low_latency;
2698                 break;
2699         case low_latency:  /* 50 usec aka 20000 ints/s */
2700                 if (bytes > 10000) {
2701                         /* jumbo frames need bulk latency setting */
2702                         if (bytes/packets > 8000)
2703                                 retval = bulk_latency;
2704                         else if ((packets < 10) || ((bytes/packets) > 1200))
2705                                 retval = bulk_latency;
2706                         else if ((packets > 35))
2707                                 retval = lowest_latency;
2708                 } else if (bytes/packets > 2000)
2709                         retval = bulk_latency;
2710                 else if (packets <= 2 && bytes < 512)
2711                         retval = lowest_latency;
2712                 break;
2713         case bulk_latency: /* 250 usec aka 4000 ints/s */
2714                 if (bytes > 25000) {
2715                         if (packets > 35)
2716                                 retval = low_latency;
2717                 } else if (bytes < 6000) {
2718                         retval = low_latency;
2719                 }
2720                 break;
2721         }
2722
2723 update_itr_done:
2724         return retval;
2725 }
2726
2727 static void e1000_set_itr(struct e1000_adapter *adapter)
2728 {
2729         struct e1000_hw *hw = &adapter->hw;
2730         u16 current_itr;
2731         u32 new_itr = adapter->itr;
2732
2733         if (unlikely(hw->mac_type < e1000_82540))
2734                 return;
2735
2736         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2737         if (unlikely(adapter->link_speed != SPEED_1000)) {
2738                 current_itr = 0;
2739                 new_itr = 4000;
2740                 goto set_itr_now;
2741         }
2742
2743         adapter->tx_itr = e1000_update_itr(adapter,
2744                                     adapter->tx_itr,
2745                                     adapter->total_tx_packets,
2746                                     adapter->total_tx_bytes);
2747         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2748         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2749                 adapter->tx_itr = low_latency;
2750
2751         adapter->rx_itr = e1000_update_itr(adapter,
2752                                     adapter->rx_itr,
2753                                     adapter->total_rx_packets,
2754                                     adapter->total_rx_bytes);
2755         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2756         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2757                 adapter->rx_itr = low_latency;
2758
2759         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2760
2761         switch (current_itr) {
2762         /* counts and packets in update_itr are dependent on these numbers */
2763         case lowest_latency:
2764                 new_itr = 70000;
2765                 break;
2766         case low_latency:
2767                 new_itr = 20000; /* aka hwitr = ~200 */
2768                 break;
2769         case bulk_latency:
2770                 new_itr = 4000;
2771                 break;
2772         default:
2773                 break;
2774         }
2775
2776 set_itr_now:
2777         if (new_itr != adapter->itr) {
2778                 /* this attempts to bias the interrupt rate towards Bulk
2779                  * by adding intermediate steps when interrupt rate is
2780                  * increasing */
2781                 new_itr = new_itr > adapter->itr ?
2782                              min(adapter->itr + (new_itr >> 2), new_itr) :
2783                              new_itr;
2784                 adapter->itr = new_itr;
2785                 ew32(ITR, 1000000000 / (new_itr * 256));
2786         }
2787
2788         return;
2789 }
2790
2791 #define E1000_TX_FLAGS_CSUM             0x00000001
2792 #define E1000_TX_FLAGS_VLAN             0x00000002
2793 #define E1000_TX_FLAGS_TSO              0x00000004
2794 #define E1000_TX_FLAGS_IPV4             0x00000008
2795 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2796 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2797
2798 static int e1000_tso(struct e1000_adapter *adapter,
2799                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2800 {
2801         struct e1000_context_desc *context_desc;
2802         struct e1000_buffer *buffer_info;
2803         unsigned int i;
2804         u32 cmd_length = 0;
2805         u16 ipcse = 0, tucse, mss;
2806         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2807         int err;
2808
2809         if (skb_is_gso(skb)) {
2810                 if (skb_header_cloned(skb)) {
2811                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2812                         if (err)
2813                                 return err;
2814                 }
2815
2816                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2817                 mss = skb_shinfo(skb)->gso_size;
2818                 if (skb->protocol == htons(ETH_P_IP)) {
2819                         struct iphdr *iph = ip_hdr(skb);
2820                         iph->tot_len = 0;
2821                         iph->check = 0;
2822                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2823                                                                  iph->daddr, 0,
2824                                                                  IPPROTO_TCP,
2825                                                                  0);
2826                         cmd_length = E1000_TXD_CMD_IP;
2827                         ipcse = skb_transport_offset(skb) - 1;
2828                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2829                         ipv6_hdr(skb)->payload_len = 0;
2830                         tcp_hdr(skb)->check =
2831                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2832                                                  &ipv6_hdr(skb)->daddr,
2833                                                  0, IPPROTO_TCP, 0);
2834                         ipcse = 0;
2835                 }
2836                 ipcss = skb_network_offset(skb);
2837                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2838                 tucss = skb_transport_offset(skb);
2839                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2840                 tucse = 0;
2841
2842                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2843                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2844
2845                 i = tx_ring->next_to_use;
2846                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2847                 buffer_info = &tx_ring->buffer_info[i];
2848
2849                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2850                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2851                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2852                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2853                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2854                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2855                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2856                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2857                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2858
2859                 buffer_info->time_stamp = jiffies;
2860                 buffer_info->next_to_watch = i;
2861
2862                 if (++i == tx_ring->count) i = 0;
2863                 tx_ring->next_to_use = i;
2864
2865                 return true;
2866         }
2867         return false;
2868 }
2869
2870 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2871                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2872 {
2873         struct e1000_context_desc *context_desc;
2874         struct e1000_buffer *buffer_info;
2875         unsigned int i;
2876         u8 css;
2877         u32 cmd_len = E1000_TXD_CMD_DEXT;
2878
2879         if (skb->ip_summed != CHECKSUM_PARTIAL)
2880                 return false;
2881
2882         switch (skb->protocol) {
2883         case __constant_htons(ETH_P_IP):
2884                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2885                         cmd_len |= E1000_TXD_CMD_TCP;
2886                 break;
2887         case __constant_htons(ETH_P_IPV6):
2888                 /* XXX not handling all IPV6 headers */
2889                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2890                         cmd_len |= E1000_TXD_CMD_TCP;
2891                 break;
2892         default:
2893                 if (unlikely(net_ratelimit()))
2894                         DPRINTK(DRV, WARNING,
2895                                 "checksum_partial proto=%x!\n", skb->protocol);
2896                 break;
2897         }
2898
2899         css = skb_transport_offset(skb);
2900
2901         i = tx_ring->next_to_use;
2902         buffer_info = &tx_ring->buffer_info[i];
2903         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2904
2905         context_desc->lower_setup.ip_config = 0;
2906         context_desc->upper_setup.tcp_fields.tucss = css;
2907         context_desc->upper_setup.tcp_fields.tucso =
2908                 css + skb->csum_offset;
2909         context_desc->upper_setup.tcp_fields.tucse = 0;
2910         context_desc->tcp_seg_setup.data = 0;
2911         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2912
2913         buffer_info->time_stamp = jiffies;
2914         buffer_info->next_to_watch = i;
2915
2916         if (unlikely(++i == tx_ring->count)) i = 0;
2917         tx_ring->next_to_use = i;
2918
2919         return true;
2920 }
2921
2922 #define E1000_MAX_TXD_PWR       12
2923 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2924
2925 static int e1000_tx_map(struct e1000_adapter *adapter,
2926                         struct e1000_tx_ring *tx_ring,
2927                         struct sk_buff *skb, unsigned int first,
2928                         unsigned int max_per_txd, unsigned int nr_frags,
2929                         unsigned int mss)
2930 {
2931         struct e1000_hw *hw = &adapter->hw;
2932         struct e1000_buffer *buffer_info;
2933         unsigned int len = skb->len;
2934         unsigned int offset = 0, size, count = 0, i;
2935         unsigned int f;
2936         len -= skb->data_len;
2937
2938         i = tx_ring->next_to_use;
2939
2940         while (len) {
2941                 buffer_info = &tx_ring->buffer_info[i];
2942                 size = min(len, max_per_txd);
2943                 /* Workaround for Controller erratum --
2944                  * descriptor for non-tso packet in a linear SKB that follows a
2945                  * tso gets written back prematurely before the data is fully
2946                  * DMA'd to the controller */
2947                 if (!skb->data_len && tx_ring->last_tx_tso &&
2948                     !skb_is_gso(skb)) {
2949                         tx_ring->last_tx_tso = 0;
2950                         size -= 4;
2951                 }
2952
2953                 /* Workaround for premature desc write-backs
2954                  * in TSO mode.  Append 4-byte sentinel desc */
2955                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2956                         size -= 4;
2957                 /* work-around for errata 10 and it applies
2958                  * to all controllers in PCI-X mode
2959                  * The fix is to make sure that the first descriptor of a
2960                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2961                  */
2962                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2963                                 (size > 2015) && count == 0))
2964                         size = 2015;
2965
2966                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2967                  * terminating buffers within evenly-aligned dwords. */
2968                 if (unlikely(adapter->pcix_82544 &&
2969                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2970                    size > 4))
2971                         size -= 4;
2972
2973                 buffer_info->length = size;
2974                 buffer_info->dma =
2975                         pci_map_single(adapter->pdev,
2976                                 skb->data + offset,
2977                                 size,
2978                                 PCI_DMA_TODEVICE);
2979                 buffer_info->time_stamp = jiffies;
2980                 buffer_info->next_to_watch = i;
2981
2982                 len -= size;
2983                 offset += size;
2984                 count++;
2985                 if (unlikely(++i == tx_ring->count)) i = 0;
2986         }
2987
2988         for (f = 0; f < nr_frags; f++) {
2989                 struct skb_frag_struct *frag;
2990
2991                 frag = &skb_shinfo(skb)->frags[f];
2992                 len = frag->size;
2993                 offset = frag->page_offset;
2994
2995                 while (len) {
2996                         buffer_info = &tx_ring->buffer_info[i];
2997                         size = min(len, max_per_txd);
2998                         /* Workaround for premature desc write-backs
2999                          * in TSO mode.  Append 4-byte sentinel desc */
3000                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3001                                 size -= 4;
3002                         /* Workaround for potential 82544 hang in PCI-X.
3003                          * Avoid terminating buffers within evenly-aligned
3004                          * dwords. */
3005                         if (unlikely(adapter->pcix_82544 &&
3006                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
3007                            size > 4))
3008                                 size -= 4;
3009
3010                         buffer_info->length = size;
3011                         buffer_info->dma =
3012                                 pci_map_page(adapter->pdev,
3013                                         frag->page,
3014                                         offset,
3015                                         size,
3016                                         PCI_DMA_TODEVICE);
3017                         buffer_info->time_stamp = jiffies;
3018                         buffer_info->next_to_watch = i;
3019
3020                         len -= size;
3021                         offset += size;
3022                         count++;
3023                         if (unlikely(++i == tx_ring->count)) i = 0;
3024                 }
3025         }
3026
3027         i = (i == 0) ? tx_ring->count - 1 : i - 1;
3028         tx_ring->buffer_info[i].skb = skb;
3029         tx_ring->buffer_info[first].next_to_watch = i;
3030
3031         return count;
3032 }
3033
3034 static void e1000_tx_queue(struct e1000_adapter *adapter,
3035                            struct e1000_tx_ring *tx_ring, int tx_flags,
3036                            int count)
3037 {
3038         struct e1000_hw *hw = &adapter->hw;
3039         struct e1000_tx_desc *tx_desc = NULL;
3040         struct e1000_buffer *buffer_info;
3041         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3042         unsigned int i;
3043
3044         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3045                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3046                              E1000_TXD_CMD_TSE;
3047                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3048
3049                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3050                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3051         }
3052
3053         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3054                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3055                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3056         }
3057
3058         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3059                 txd_lower |= E1000_TXD_CMD_VLE;
3060                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3061         }
3062
3063         i = tx_ring->next_to_use;
3064
3065         while (count--) {
3066                 buffer_info = &tx_ring->buffer_info[i];
3067                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3068                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3069                 tx_desc->lower.data =
3070                         cpu_to_le32(txd_lower | buffer_info->length);
3071                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3072                 if (unlikely(++i == tx_ring->count)) i = 0;
3073         }
3074
3075         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3076
3077         /* Force memory writes to complete before letting h/w
3078          * know there are new descriptors to fetch.  (Only
3079          * applicable for weak-ordered memory model archs,
3080          * such as IA-64). */
3081         wmb();
3082
3083         tx_ring->next_to_use = i;
3084         writel(i, hw->hw_addr + tx_ring->tdt);
3085         /* we need this if more than one processor can write to our tail
3086          * at a time, it syncronizes IO on IA64/Altix systems */
3087         mmiowb();
3088 }
3089
3090 /**
3091  * 82547 workaround to avoid controller hang in half-duplex environment.
3092  * The workaround is to avoid queuing a large packet that would span
3093  * the internal Tx FIFO ring boundary by notifying the stack to resend
3094  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3095  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3096  * to the beginning of the Tx FIFO.
3097  **/
3098
3099 #define E1000_FIFO_HDR                  0x10
3100 #define E1000_82547_PAD_LEN             0x3E0
3101
3102 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3103                                        struct sk_buff *skb)
3104 {
3105         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3106         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3107
3108         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3109
3110         if (adapter->link_duplex != HALF_DUPLEX)
3111                 goto no_fifo_stall_required;
3112
3113         if (atomic_read(&adapter->tx_fifo_stall))
3114                 return 1;
3115
3116         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3117                 atomic_set(&adapter->tx_fifo_stall, 1);
3118                 return 1;
3119         }
3120
3121 no_fifo_stall_required:
3122         adapter->tx_fifo_head += skb_fifo_len;
3123         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3124                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3125         return 0;
3126 }
3127
3128 #define MINIMUM_DHCP_PACKET_SIZE 282
3129 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3130                                     struct sk_buff *skb)
3131 {
3132         struct e1000_hw *hw =  &adapter->hw;
3133         u16 length, offset;
3134         if (vlan_tx_tag_present(skb)) {
3135                 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3136                         ( hw->mng_cookie.status &
3137                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3138                         return 0;
3139         }
3140         if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3141                 struct ethhdr *eth = (struct ethhdr *)skb->data;
3142                 if ((htons(ETH_P_IP) == eth->h_proto)) {
3143                         const struct iphdr *ip =
3144                                 (struct iphdr *)((u8 *)skb->data+14);
3145                         if (IPPROTO_UDP == ip->protocol) {
3146                                 struct udphdr *udp =
3147                                         (struct udphdr *)((u8 *)ip +
3148                                                 (ip->ihl << 2));
3149                                 if (ntohs(udp->dest) == 67) {
3150                                         offset = (u8 *)udp + 8 - skb->data;
3151                                         length = skb->len - offset;
3152
3153                                         return e1000_mng_write_dhcp_info(hw,
3154                                                         (u8 *)udp + 8,
3155                                                         length);
3156                                 }
3157                         }
3158                 }
3159         }
3160         return 0;
3161 }
3162
3163 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3164 {
3165         struct e1000_adapter *adapter = netdev_priv(netdev);
3166         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3167
3168         netif_stop_queue(netdev);
3169         /* Herbert's original patch had:
3170          *  smp_mb__after_netif_stop_queue();
3171          * but since that doesn't exist yet, just open code it. */
3172         smp_mb();
3173
3174         /* We need to check again in a case another CPU has just
3175          * made room available. */
3176         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3177                 return -EBUSY;
3178
3179         /* A reprieve! */
3180         netif_start_queue(netdev);
3181         ++adapter->restart_queue;
3182         return 0;
3183 }
3184
3185 static int e1000_maybe_stop_tx(struct net_device *netdev,
3186                                struct e1000_tx_ring *tx_ring, int size)
3187 {
3188         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3189                 return 0;
3190         return __e1000_maybe_stop_tx(netdev, size);
3191 }
3192
3193 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3194 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3195 {
3196         struct e1000_adapter *adapter = netdev_priv(netdev);
3197         struct e1000_hw *hw = &adapter->hw;
3198         struct e1000_tx_ring *tx_ring;
3199         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3200         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3201         unsigned int tx_flags = 0;
3202         unsigned int len = skb->len - skb->data_len;
3203         unsigned long flags;
3204         unsigned int nr_frags;
3205         unsigned int mss;
3206         int count = 0;
3207         int tso;
3208         unsigned int f;
3209
3210         /* This goes back to the question of how to logically map a tx queue
3211          * to a flow.  Right now, performance is impacted slightly negatively
3212          * if using multiple tx queues.  If the stack breaks away from a
3213          * single qdisc implementation, we can look at this again. */
3214         tx_ring = adapter->tx_ring;
3215
3216         if (unlikely(skb->len <= 0)) {
3217                 dev_kfree_skb_any(skb);
3218                 return NETDEV_TX_OK;
3219         }
3220
3221         /* 82571 and newer doesn't need the workaround that limited descriptor
3222          * length to 4kB */
3223         if (hw->mac_type >= e1000_82571)
3224                 max_per_txd = 8192;
3225
3226         mss = skb_shinfo(skb)->gso_size;
3227         /* The controller does a simple calculation to
3228          * make sure there is enough room in the FIFO before
3229          * initiating the DMA for each buffer.  The calc is:
3230          * 4 = ceil(buffer len/mss).  To make sure we don't
3231          * overrun the FIFO, adjust the max buffer len if mss
3232          * drops. */
3233         if (mss) {
3234                 u8 hdr_len;
3235                 max_per_txd = min(mss << 2, max_per_txd);
3236                 max_txd_pwr = fls(max_per_txd) - 1;
3237
3238                 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3239                 * points to just header, pull a few bytes of payload from
3240                 * frags into skb->data */
3241                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3242                 if (skb->data_len && hdr_len == len) {
3243                         switch (hw->mac_type) {
3244                                 unsigned int pull_size;
3245                         case e1000_82544:
3246                                 /* Make sure we have room to chop off 4 bytes,
3247                                  * and that the end alignment will work out to
3248                                  * this hardware's requirements
3249                                  * NOTE: this is a TSO only workaround
3250                                  * if end byte alignment not correct move us
3251                                  * into the next dword */
3252                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3253                                         break;
3254                                 /* fall through */
3255                         case e1000_82571:
3256                         case e1000_82572:
3257                         case e1000_82573:
3258                         case e1000_ich8lan:
3259                                 pull_size = min((unsigned int)4, skb->data_len);
3260                                 if (!__pskb_pull_tail(skb, pull_size)) {
3261                                         DPRINTK(DRV, ERR,
3262                                                 "__pskb_pull_tail failed.\n");
3263                                         dev_kfree_skb_any(skb);
3264                                         return NETDEV_TX_OK;
3265                                 }
3266                                 len = skb->len - skb->data_len;
3267                                 break;
3268                         default:
3269                                 /* do nothing */
3270                                 break;
3271                         }
3272                 }
3273         }
3274
3275         /* reserve a descriptor for the offload context */
3276         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3277                 count++;
3278         count++;
3279
3280         /* Controller Erratum workaround */
3281         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3282                 count++;
3283
3284         count += TXD_USE_COUNT(len, max_txd_pwr);
3285
3286         if (adapter->pcix_82544)
3287                 count++;
3288
3289         /* work-around for errata 10 and it applies to all controllers
3290          * in PCI-X mode, so add one more descriptor to the count
3291          */
3292         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3293                         (len > 2015)))
3294                 count++;
3295
3296         nr_frags = skb_shinfo(skb)->nr_frags;
3297         for (f = 0; f < nr_frags; f++)
3298                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3299                                        max_txd_pwr);
3300         if (adapter->pcix_82544)
3301                 count += nr_frags;
3302
3303
3304         if (hw->tx_pkt_filtering &&
3305             (hw->mac_type == e1000_82573))
3306                 e1000_transfer_dhcp_info(adapter, skb);
3307
3308         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3309                 /* Collision - tell upper layer to requeue */
3310                 return NETDEV_TX_LOCKED;
3311
3312         /* need: count + 2 desc gap to keep tail from touching
3313          * head, otherwise try next time */
3314         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
3315                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3316                 return NETDEV_TX_BUSY;
3317         }
3318
3319         if (unlikely(hw->mac_type == e1000_82547)) {
3320                 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3321                         netif_stop_queue(netdev);
3322                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3323                         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3324                         return NETDEV_TX_BUSY;
3325                 }
3326         }
3327
3328         if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3329                 tx_flags |= E1000_TX_FLAGS_VLAN;
3330                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3331         }
3332
3333         first = tx_ring->next_to_use;
3334
3335         tso = e1000_tso(adapter, tx_ring, skb);
3336         if (tso < 0) {
3337                 dev_kfree_skb_any(skb);
3338                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3339                 return NETDEV_TX_OK;
3340         }
3341
3342         if (likely(tso)) {
3343                 tx_ring->last_tx_tso = 1;
3344                 tx_flags |= E1000_TX_FLAGS_TSO;
3345         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3346                 tx_flags |= E1000_TX_FLAGS_CSUM;
3347
3348         /* Old method was to assume IPv4 packet by default if TSO was enabled.
3349          * 82571 hardware supports TSO capabilities for IPv6 as well...
3350          * no longer assume, we must. */
3351         if (likely(skb->protocol == htons(ETH_P_IP)))
3352                 tx_flags |= E1000_TX_FLAGS_IPV4;
3353
3354         e1000_tx_queue(adapter, tx_ring, tx_flags,
3355                        e1000_tx_map(adapter, tx_ring, skb, first,
3356                                     max_per_txd, nr_frags, mss));
3357
3358         netdev->trans_start = jiffies;
3359
3360         /* Make sure there is space in the ring for the next send. */
3361         e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3362
3363         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3364         return NETDEV_TX_OK;
3365 }
3366
3367 /**
3368  * e1000_tx_timeout - Respond to a Tx Hang
3369  * @netdev: network interface device structure
3370  **/
3371
3372 static void e1000_tx_timeout(struct net_device *netdev)
3373 {
3374         struct e1000_adapter *adapter = netdev_priv(netdev);
3375
3376         /* Do the reset outside of interrupt context */
3377         adapter->tx_timeout_count++;
3378         schedule_work(&adapter->reset_task);
3379 }
3380
3381 static void e1000_reset_task(struct work_struct *work)
3382 {
3383         struct e1000_adapter *adapter =
3384                 container_of(work, struct e1000_adapter, reset_task);
3385
3386         e1000_reinit_locked(adapter);
3387 }
3388
3389 /**
3390  * e1000_get_stats - Get System Network Statistics
3391  * @netdev: network interface device structure
3392  *
3393  * Returns the address of the device statistics structure.
3394  * The statistics are actually updated from the timer callback.
3395  **/
3396
3397 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3398 {
3399         struct e1000_adapter *adapter = netdev_priv(netdev);
3400
3401         /* only return the current stats */
3402         return &adapter->net_stats;
3403 }
3404
3405 /**
3406  * e1000_change_mtu - Change the Maximum Transfer Unit
3407  * @netdev: network interface device structure
3408  * @new_mtu: new value for maximum frame size
3409  *
3410  * Returns 0 on success, negative on failure
3411  **/
3412
3413 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3414 {
3415         struct e1000_adapter *adapter = netdev_priv(netdev);
3416         struct e1000_hw *hw = &adapter->hw;
3417         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3418         u16 eeprom_data = 0;
3419
3420         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3421             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3422                 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3423                 return -EINVAL;
3424         }
3425
3426         /* Adapter-specific max frame size limits. */
3427         switch (hw->mac_type) {
3428         case e1000_undefined ... e1000_82542_rev2_1:
3429         case e1000_ich8lan:
3430                 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3431                         DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3432                         return -EINVAL;
3433                 }
3434                 break;
3435         case e1000_82573:
3436                 /* Jumbo Frames not supported if:
3437                  * - this is not an 82573L device
3438                  * - ASPM is enabled in any way (0x1A bits 3:2) */
3439                 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3440                                   &eeprom_data);
3441                 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3442                     (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3443                         if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3444                                 DPRINTK(PROBE, ERR,
3445                                         "Jumbo Frames not supported.\n");
3446                                 return -EINVAL;
3447                         }
3448                         break;
3449                 }
3450                 /* ERT will be enabled later to enable wire speed receives */
3451
3452                 /* fall through to get support */
3453         case e1000_82571:
3454         case e1000_82572:
3455         case e1000_80003es2lan:
3456 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3457                 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3458                         DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3459                         return -EINVAL;
3460                 }
3461                 break;
3462         default:
3463                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3464                 break;
3465         }
3466
3467         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3468          * means we reserve 2 more, this pushes us to allocate from the next
3469          * larger slab size
3470          * i.e. RXBUFFER_2048 --> size-4096 slab */
3471
3472         if (max_frame <= E1000_RXBUFFER_256)
3473                 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3474         else if (max_frame <= E1000_RXBUFFER_512)
3475                 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3476         else if (max_frame <= E1000_RXBUFFER_1024)
3477                 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3478         else if (max_frame <= E1000_RXBUFFER_2048)
3479                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3480         else if (max_frame <= E1000_RXBUFFER_4096)
3481                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3482         else if (max_frame <= E1000_RXBUFFER_8192)
3483                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3484         else if (max_frame <= E1000_RXBUFFER_16384)
3485                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3486
3487         /* adjust allocation if LPE protects us, and we aren't using SBP */
3488         if (!hw->tbi_compatibility_on &&
3489             ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3490              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3491                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3492
3493         netdev->mtu = new_mtu;
3494         hw->max_frame_size = max_frame;
3495
3496         if (netif_running(netdev))
3497                 e1000_reinit_locked(adapter);
3498
3499         return 0;
3500 }
3501
3502 /**
3503  * e1000_update_stats - Update the board statistics counters
3504  * @adapter: board private structure
3505  **/
3506
3507 void e1000_update_stats(struct e1000_adapter *adapter)
3508 {
3509         struct e1000_hw *hw = &adapter->hw;
3510         struct pci_dev *pdev = adapter->pdev;
3511         unsigned long flags;
3512         u16 phy_tmp;
3513
3514 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3515
3516         /*
3517          * Prevent stats update while adapter is being reset, or if the pci
3518          * connection is down.
3519          */
3520         if (adapter->link_speed == 0)
3521                 return;
3522         if (pci_channel_offline(pdev))
3523                 return;
3524
3525         spin_lock_irqsave(&adapter->stats_lock, flags);
3526
3527         /* these counters are modified from e1000_tbi_adjust_stats,
3528          * called from the interrupt context, so they must only
3529          * be written while holding adapter->stats_lock
3530          */
3531
3532         adapter->stats.crcerrs += er32(CRCERRS);
3533         adapter->stats.gprc += er32(GPRC);
3534         adapter->stats.gorcl += er32(GORCL);
3535         adapter->stats.gorch += er32(GORCH);
3536         adapter->stats.bprc += er32(BPRC);
3537         adapter->stats.mprc += er32(MPRC);
3538         adapter->stats.roc += er32(ROC);
3539
3540         if (hw->mac_type != e1000_ich8lan) {
3541                 adapter->stats.prc64 += er32(PRC64);
3542                 adapter->stats.prc127 += er32(PRC127);
3543                 adapter->stats.prc255 += er32(PRC255);
3544                 adapter->stats.prc511 += er32(PRC511);
3545                 adapter->stats.prc1023 += er32(PRC1023);
3546                 adapter->stats.prc1522 += er32(PRC1522);
3547         }
3548
3549         adapter->stats.symerrs += er32(SYMERRS);
3550         adapter->stats.mpc += er32(MPC);
3551         adapter->stats.scc += er32(SCC);
3552         adapter->stats.ecol += er32(ECOL);
3553         adapter->stats.mcc += er32(MCC);
3554         adapter->stats.latecol += er32(LATECOL);
3555         adapter->stats.dc += er32(DC);
3556         adapter->stats.sec += er32(SEC);
3557         adapter->stats.rlec += er32(RLEC);
3558         adapter->stats.xonrxc += er32(XONRXC);
3559         adapter->stats.xontxc += er32(XONTXC);
3560         adapter->stats.xoffrxc += er32(XOFFRXC);
3561         adapter->stats.xofftxc += er32(XOFFTXC);
3562         adapter->stats.fcruc += er32(FCRUC);
3563         adapter->stats.gptc += er32(GPTC);
3564         adapter->stats.gotcl += er32(GOTCL);
3565         adapter->stats.gotch += er32(GOTCH);
3566         adapter->stats.rnbc += er32(RNBC);
3567         adapter->stats.ruc += er32(RUC);
3568         adapter->stats.rfc += er32(RFC);
3569         adapter->stats.rjc += er32(RJC);
3570         adapter->stats.torl += er32(TORL);
3571         adapter->stats.torh += er32(TORH);
3572         adapter->stats.totl += er32(TOTL);
3573         adapter->stats.toth += er32(TOTH);
3574         adapter->stats.tpr += er32(TPR);
3575
3576         if (hw->mac_type != e1000_ich8lan) {
3577                 adapter->stats.ptc64 += er32(PTC64);
3578                 adapter->stats.ptc127 += er32(PTC127);
3579                 adapter->stats.ptc255 += er32(PTC255);
3580                 adapter->stats.ptc511 += er32(PTC511);
3581                 adapter->stats.ptc1023 += er32(PTC1023);
3582                 adapter->stats.ptc1522 += er32(PTC1522);
3583         }
3584
3585         adapter->stats.mptc += er32(MPTC);
3586         adapter->stats.bptc += er32(BPTC);
3587
3588         /* used for adaptive IFS */
3589
3590         hw->tx_packet_delta = er32(TPT);
3591         adapter->stats.tpt += hw->tx_packet_delta;
3592         hw->collision_delta = er32(COLC);
3593         adapter->stats.colc += hw->collision_delta;
3594
3595         if (hw->mac_type >= e1000_82543) {
3596                 adapter->stats.algnerrc += er32(ALGNERRC);
3597                 adapter->stats.rxerrc += er32(RXERRC);
3598                 adapter->stats.tncrs += er32(TNCRS);
3599                 adapter->stats.cexterr += er32(CEXTERR);
3600                 adapter->stats.tsctc += er32(TSCTC);
3601                 adapter->stats.tsctfc += er32(TSCTFC);
3602         }
3603         if (hw->mac_type > e1000_82547_rev_2) {
3604                 adapter->stats.iac += er32(IAC);
3605                 adapter->stats.icrxoc += er32(ICRXOC);
3606
3607                 if (hw->mac_type != e1000_ich8lan) {
3608                         adapter->stats.icrxptc += er32(ICRXPTC);
3609                         adapter->stats.icrxatc += er32(ICRXATC);
3610                         adapter->stats.ictxptc += er32(ICTXPTC);
3611                         adapter->stats.ictxatc += er32(ICTXATC);
3612                         adapter->stats.ictxqec += er32(ICTXQEC);
3613                         adapter->stats.ictxqmtc += er32(ICTXQMTC);
3614                         adapter->stats.icrxdmtc += er32(ICRXDMTC);
3615                 }
3616         }
3617
3618         /* Fill out the OS statistics structure */
3619         adapter->net_stats.multicast = adapter->stats.mprc;
3620         adapter->net_stats.collisions = adapter->stats.colc;
3621
3622         /* Rx Errors */
3623
3624         /* RLEC on some newer hardware can be incorrect so build
3625         * our own version based on RUC and ROC */
3626         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3627                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3628                 adapter->stats.ruc + adapter->stats.roc +
3629                 adapter->stats.cexterr;
3630         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3631         adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
3632         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3633         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3634         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3635
3636         /* Tx Errors */
3637         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3638         adapter->net_stats.tx_errors = adapter->stats.txerrc;
3639         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3640         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3641         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3642         if (hw->bad_tx_carr_stats_fd &&
3643             adapter->link_duplex == FULL_DUPLEX) {
3644                 adapter->net_stats.tx_carrier_errors = 0;
3645                 adapter->stats.tncrs = 0;
3646         }
3647
3648         /* Tx Dropped needs to be maintained elsewhere */
3649
3650         /* Phy Stats */
3651         if (hw->media_type == e1000_media_type_copper) {
3652                 if ((adapter->link_speed == SPEED_1000) &&
3653                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3654                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3655                         adapter->phy_stats.idle_errors += phy_tmp;
3656                 }
3657
3658                 if ((hw->mac_type <= e1000_82546) &&
3659                    (hw->phy_type == e1000_phy_m88) &&
3660                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3661                         adapter->phy_stats.receive_errors += phy_tmp;
3662         }
3663
3664         /* Management Stats */
3665         if (hw->has_smbus) {
3666                 adapter->stats.mgptc += er32(MGTPTC);
3667                 adapter->stats.mgprc += er32(MGTPRC);
3668                 adapter->stats.mgpdc += er32(MGTPDC);
3669         }
3670
3671         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3672 }
3673
3674 /**
3675  * e1000_intr_msi - Interrupt Handler
3676  * @irq: interrupt number
3677  * @data: pointer to a network interface device structure
3678  **/
3679
3680 static irqreturn_t e1000_intr_msi(int irq, void *data)
3681 {
3682         struct net_device *netdev = data;
3683         struct e1000_adapter *adapter = netdev_priv(netdev);
3684         struct e1000_hw *hw = &adapter->hw;
3685         u32 icr = er32(ICR);
3686
3687         /* in NAPI mode read ICR disables interrupts using IAM */
3688
3689         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3690                 hw->get_link_status = 1;
3691                 /* 80003ES2LAN workaround-- For packet buffer work-around on
3692                  * link down event; disable receives here in the ISR and reset
3693                  * adapter in watchdog */
3694                 if (netif_carrier_ok(netdev) &&
3695                     (hw->mac_type == e1000_80003es2lan)) {
3696                         /* disable receives */
3697                         u32 rctl = er32(RCTL);
3698                         ew32(RCTL, rctl & ~E1000_RCTL_EN);
3699                 }
3700                 /* guard against interrupt when we're going down */
3701                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3702                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3703         }
3704
3705         if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
3706                 adapter->total_tx_bytes = 0;
3707                 adapter->total_tx_packets = 0;
3708                 adapter->total_rx_bytes = 0;
3709                 adapter->total_rx_packets = 0;
3710                 __netif_rx_schedule(netdev, &adapter->napi);
3711         } else
3712                 e1000_irq_enable(adapter);
3713
3714         return IRQ_HANDLED;
3715 }
3716
3717 /**
3718  * e1000_intr - Interrupt Handler
3719  * @irq: interrupt number
3720  * @data: pointer to a network interface device structure
3721  **/
3722
3723 static irqreturn_t e1000_intr(int irq, void *data)
3724 {
3725         struct net_device *netdev = data;
3726         struct e1000_adapter *adapter = netdev_priv(netdev);
3727         struct e1000_hw *hw = &adapter->hw;
3728         u32 rctl, icr = er32(ICR);
3729
3730         if (unlikely(!icr))
3731                 return IRQ_NONE;  /* Not our interrupt */
3732
3733         /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3734          * not set, then the adapter didn't send an interrupt */
3735         if (unlikely(hw->mac_type >= e1000_82571 &&
3736                      !(icr & E1000_ICR_INT_ASSERTED)))
3737                 return IRQ_NONE;
3738
3739         /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
3740          * need for the IMC write */
3741
3742         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3743                 hw->get_link_status = 1;
3744                 /* 80003ES2LAN workaround--
3745                  * For packet buffer work-around on link down event;
3746                  * disable receives here in the ISR and
3747                  * reset adapter in watchdog
3748                  */
3749                 if (netif_carrier_ok(netdev) &&
3750                     (hw->mac_type == e1000_80003es2lan)) {
3751                         /* disable receives */
3752                         rctl = er32(RCTL);
3753                         ew32(RCTL, rctl & ~E1000_RCTL_EN);
3754                 }
3755                 /* guard against interrupt when we're going down */
3756                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3757                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3758         }
3759
3760         if (unlikely(hw->mac_type < e1000_82571)) {
3761                 /* disable interrupts, without the synchronize_irq bit */
3762                 ew32(IMC, ~0);
3763                 E1000_WRITE_FLUSH();
3764         }
3765         if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
3766                 adapter->total_tx_bytes = 0;
3767                 adapter->total_tx_packets = 0;
3768                 adapter->total_rx_bytes = 0;
3769                 adapter->total_rx_packets = 0;
3770                 __netif_rx_schedule(netdev, &adapter->napi);
3771         } else
3772                 /* this really should not happen! if it does it is basically a
3773                  * bug, but not a hard error, so enable ints and continue */
3774                 e1000_irq_enable(adapter);
3775
3776         return IRQ_HANDLED;
3777 }
3778
3779 /**
3780  * e1000_clean - NAPI Rx polling callback
3781  * @adapter: board private structure
3782  **/
3783 static int e1000_clean(struct napi_struct *napi, int budget)
3784 {
3785         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3786         struct net_device *poll_dev = adapter->netdev;
3787         int tx_cleaned = 0, work_done = 0;
3788
3789         /* Must NOT use netdev_priv macro here. */
3790         adapter = poll_dev->priv;
3791
3792         /* e1000_clean is called per-cpu.  This lock protects
3793          * tx_ring[0] from being cleaned by multiple cpus
3794          * simultaneously.  A failure obtaining the lock means
3795          * tx_ring[0] is currently being cleaned anyway. */
3796         if (spin_trylock(&adapter->tx_queue_lock)) {
3797                 tx_cleaned = e1000_clean_tx_irq(adapter,
3798                                                 &adapter->tx_ring[0]);
3799                 spin_unlock(&adapter->tx_queue_lock);
3800         }
3801
3802         adapter->clean_rx(adapter, &adapter->rx_ring[0],
3803                           &work_done, budget);
3804
3805         if (tx_cleaned)
3806                 work_done = budget;
3807
3808         /* If budget not fully consumed, exit the polling mode */
3809         if (work_done < budget) {
3810                 if (likely(adapter->itr_setting & 3))
3811                         e1000_set_itr(adapter);
3812                 netif_rx_complete(poll_dev, napi);
3813                 e1000_irq_enable(adapter);
3814         }
3815
3816         return work_done;
3817 }
3818
3819 /**
3820  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3821  * @adapter: board private structure
3822  **/
3823 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3824                                struct e1000_tx_ring *tx_ring)
3825 {
3826         struct e1000_hw *hw = &adapter->hw;
3827         struct net_device *netdev = adapter->netdev;
3828         struct e1000_tx_desc *tx_desc, *eop_desc;
3829         struct e1000_buffer *buffer_info;
3830         unsigned int i, eop;
3831         unsigned int count = 0;
3832         bool cleaned = false;
3833         unsigned int total_tx_bytes=0, total_tx_packets=0;
3834
3835         i = tx_ring->next_to_clean;
3836         eop = tx_ring->buffer_info[i].next_to_watch;
3837         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3838
3839         while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3840                 for (cleaned = false; !cleaned; ) {
3841                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3842                         buffer_info = &tx_ring->buffer_info[i];
3843                         cleaned = (i == eop);
3844
3845                         if (cleaned) {
3846                                 struct sk_buff *skb = buffer_info->skb;
3847                                 unsigned int segs, bytecount;
3848                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
3849                                 /* multiply data chunks by size of headers */
3850                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
3851                                             skb->len;
3852                                 total_tx_packets += segs;
3853                                 total_tx_bytes += bytecount;
3854                         }
3855                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3856                         tx_desc->upper.data = 0;
3857
3858                         if (unlikely(++i == tx_ring->count)) i = 0;
3859                 }
3860
3861                 eop = tx_ring->buffer_info[i].next_to_watch;
3862                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3863 #define E1000_TX_WEIGHT 64
3864                 /* weight of a sort for tx, to avoid endless transmit cleanup */
3865                 if (count++ == E1000_TX_WEIGHT)
3866                         break;
3867         }
3868
3869         tx_ring->next_to_clean = i;
3870
3871 #define TX_WAKE_THRESHOLD 32
3872         if (unlikely(cleaned && netif_carrier_ok(netdev) &&
3873                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3874                 /* Make sure that anybody stopping the queue after this
3875                  * sees the new next_to_clean.
3876                  */
3877                 smp_mb();
3878                 if (netif_queue_stopped(netdev)) {
3879                         netif_wake_queue(netdev);
3880                         ++adapter->restart_queue;
3881                 }
3882         }
3883
3884         if (adapter->detect_tx_hung) {
3885                 /* Detect a transmit hang in hardware, this serializes the
3886                  * check with the clearing of time_stamp and movement of i */
3887                 adapter->detect_tx_hung = false;
3888                 if (tx_ring->buffer_info[eop].dma &&
3889                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3890                                (adapter->tx_timeout_factor * HZ))
3891                     && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3892
3893                         /* detected Tx unit hang */
3894                         DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3895                                         "  Tx Queue             <%lu>\n"
3896                                         "  TDH                  <%x>\n"
3897                                         "  TDT                  <%x>\n"
3898                                         "  next_to_use          <%x>\n"
3899                                         "  next_to_clean        <%x>\n"
3900                                         "buffer_info[next_to_clean]\n"
3901                                         "  time_stamp           <%lx>\n"
3902                                         "  next_to_watch        <%x>\n"
3903                                         "  jiffies              <%lx>\n"
3904                                         "  next_to_watch.status <%x>\n",
3905                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3906                                         sizeof(struct e1000_tx_ring)),
3907                                 readl(hw->hw_addr + tx_ring->tdh),
3908                                 readl(hw->hw_addr + tx_ring->tdt),
3909                                 tx_ring->next_to_use,
3910                                 tx_ring->next_to_clean,
3911                                 tx_ring->buffer_info[eop].time_stamp,
3912                                 eop,
3913                                 jiffies,
3914                                 eop_desc->upper.fields.status);
3915                         netif_stop_queue(netdev);
3916                 }
3917         }
3918         adapter->total_tx_bytes += total_tx_bytes;
3919         adapter->total_tx_packets += total_tx_packets;
3920         adapter->net_stats.tx_bytes += total_tx_bytes;
3921         adapter->net_stats.tx_packets += total_tx_packets;
3922         return cleaned;
3923 }
3924
3925 /**
3926  * e1000_rx_checksum - Receive Checksum Offload for 82543
3927  * @adapter:     board private structure
3928  * @status_err:  receive descriptor status and error fields
3929  * @csum:        receive descriptor csum field
3930  * @sk_buff:     socket buffer with received data
3931  **/
3932
3933 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3934                               u32 csum, struct sk_buff *skb)
3935 {
3936         struct e1000_hw *hw = &adapter->hw;
3937         u16 status = (u16)status_err;
3938         u8 errors = (u8)(status_err >> 24);
3939         skb->ip_summed = CHECKSUM_NONE;
3940
3941         /* 82543 or newer only */
3942         if (unlikely(hw->mac_type < e1000_82543)) return;
3943         /* Ignore Checksum bit is set */
3944         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3945         /* TCP/UDP checksum error bit is set */
3946         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3947                 /* let the stack verify checksum errors */
3948                 adapter->hw_csum_err++;
3949                 return;
3950         }
3951         /* TCP/UDP Checksum has not been calculated */
3952         if (hw->mac_type <= e1000_82547_rev_2) {
3953                 if (!(status & E1000_RXD_STAT_TCPCS))
3954                         return;
3955         } else {
3956                 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3957                         return;
3958         }
3959         /* It must be a TCP or UDP packet with a valid checksum */
3960         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3961                 /* TCP checksum is good */
3962                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3963         } else if (hw->mac_type > e1000_82547_rev_2) {
3964                 /* IP fragment with UDP payload */
3965                 /* Hardware complements the payload checksum, so we undo it
3966                  * and then put the value in host order for further stack use.
3967                  */
3968                 __sum16 sum = (__force __sum16)htons(csum);
3969                 skb->csum = csum_unfold(~sum);
3970                 skb->ip_summed = CHECKSUM_COMPLETE;
3971         }
3972         adapter->hw_csum_good++;
3973 }
3974
3975 /**
3976  * e1000_clean_rx_irq - Send received data up the network stack; legacy
3977  * @adapter: board private structure
3978  **/
3979 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3980                                struct e1000_rx_ring *rx_ring,
3981                                int *work_done, int work_to_do)
3982 {
3983         struct e1000_hw *hw = &adapter->hw;
3984         struct net_device *netdev = adapter->netdev;
3985         struct pci_dev *pdev = adapter->pdev;
3986         struct e1000_rx_desc *rx_desc, *next_rxd;
3987         struct e1000_buffer *buffer_info, *next_buffer;
3988         unsigned long flags;
3989         u32 length;
3990         u8 last_byte;
3991         unsigned int i;
3992         int cleaned_count = 0;
3993         bool cleaned = false;
3994         unsigned int total_rx_bytes=0, total_rx_packets=0;
3995
3996         i = rx_ring->next_to_clean;
3997         rx_desc = E1000_RX_DESC(*rx_ring, i);
3998         buffer_info = &rx_ring->buffer_info[i];
3999
4000         while (rx_desc->status & E1000_RXD_STAT_DD) {
4001                 struct sk_buff *skb;
4002                 u8 status;
4003
4004                 if (*work_done >= work_to_do)
4005                         break;
4006                 (*work_done)++;
4007
4008                 status = rx_desc->status;
4009                 skb = buffer_info->skb;
4010                 buffer_info->skb = NULL;
4011
4012                 prefetch(skb->data - NET_IP_ALIGN);
4013
4014                 if (++i == rx_ring->count) i = 0;
4015                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4016                 prefetch(next_rxd);
4017
4018                 next_buffer = &rx_ring->buffer_info[i];
4019
4020                 cleaned = true;
4021                 cleaned_count++;
4022                 pci_unmap_single(pdev,
4023                                  buffer_info->dma,
4024                                  buffer_info->length,
4025                                  PCI_DMA_FROMDEVICE);
4026
4027                 length = le16_to_cpu(rx_desc->length);
4028
4029                 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4030                         /* All receives must fit into a single buffer */
4031                         E1000_DBG("%s: Receive packet consumed multiple"
4032                                   " buffers\n", netdev->name);
4033                         /* recycle */
4034                         buffer_info->skb = skb;
4035                         goto next_desc;
4036                 }
4037
4038                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4039                         last_byte = *(skb->data + length - 1);
4040                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4041                                        last_byte)) {
4042                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4043                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4044                                                        length, skb->data);
4045                                 spin_unlock_irqrestore(&adapter->stats_lock,
4046                                                        flags);
4047                                 length--;
4048                         } else {
4049                                 /* recycle */
4050                                 buffer_info->skb = skb;
4051                                 goto next_desc;
4052                         }
4053                 }
4054
4055                 /* adjust length to remove Ethernet CRC, this must be
4056                  * done after the TBI_ACCEPT workaround above */
4057                 length -= 4;
4058
4059                 /* probably a little skewed due to removing CRC */
4060                 total_rx_bytes += length;
4061                 total_rx_packets++;
4062
4063                 /* code added for copybreak, this should improve
4064                  * performance for small packets with large amounts
4065                  * of reassembly being done in the stack */
4066                 if (length < copybreak) {
4067                         struct sk_buff *new_skb =
4068                             netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
4069                         if (new_skb) {
4070                                 skb_reserve(new_skb, NET_IP_ALIGN);
4071                                 skb_copy_to_linear_data_offset(new_skb,
4072                                                                -NET_IP_ALIGN,
4073                                                                (skb->data -
4074                                                                 NET_IP_ALIGN),
4075                                                                (length +
4076                                                                 NET_IP_ALIGN));
4077                                 /* save the skb in buffer_info as good */
4078                                 buffer_info->skb = skb;
4079                                 skb = new_skb;
4080                         }
4081                         /* else just continue with the old one */
4082                 }
4083                 /* end copybreak code */
4084                 skb_put(skb, length);
4085
4086                 /* Receive Checksum Offload */
4087                 e1000_rx_checksum(adapter,
4088                                   (u32)(status) |
4089                                   ((u32)(rx_desc->errors) << 24),
4090                                   le16_to_cpu(rx_desc->csum), skb);
4091
4092                 skb->protocol = eth_type_trans(skb, netdev);
4093
4094                 if (unlikely(adapter->vlgrp &&
4095                             (status & E1000_RXD_STAT_VP))) {
4096                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4097                                                  le16_to_cpu(rx_desc->special));
4098                 } else {
4099                         netif_receive_skb(skb);
4100                 }
4101
4102                 netdev->last_rx = jiffies;
4103
4104 next_desc:
4105                 rx_desc->status = 0;
4106
4107                 /* return some buffers to hardware, one at a time is too slow */
4108                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4109                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4110                         cleaned_count = 0;
4111                 }
4112
4113                 /* use prefetched values */
4114                 rx_desc = next_rxd;
4115                 buffer_info = next_buffer;
4116         }
4117         rx_ring->next_to_clean = i;
4118
4119         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4120         if (cleaned_count)
4121                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4122
4123         adapter->total_rx_packets += total_rx_packets;
4124         adapter->total_rx_bytes += total_rx_bytes;
4125         adapter->net_stats.rx_bytes += total_rx_bytes;
4126         adapter->net_stats.rx_packets += total_rx_packets;
4127         return cleaned;
4128 }
4129
4130 /**
4131  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4132  * @adapter: address of board private structure
4133  **/
4134
4135 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4136                                    struct e1000_rx_ring *rx_ring,
4137                                    int cleaned_count)
4138 {
4139         struct e1000_hw *hw = &adapter->hw;
4140         struct net_device *netdev = adapter->netdev;
4141         struct pci_dev *pdev = adapter->pdev;
4142         struct e1000_rx_desc *rx_desc;
4143         struct e1000_buffer *buffer_info;
4144         struct sk_buff *skb;
4145         unsigned int i;
4146         unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
4147
4148         i = rx_ring->next_to_use;
4149         buffer_info = &rx_ring->buffer_info[i];
4150
4151         while (cleaned_count--) {
4152                 skb = buffer_info->skb;
4153                 if (skb) {
4154                         skb_trim(skb, 0);
4155                         goto map_skb;
4156                 }
4157
4158                 skb = netdev_alloc_skb(netdev, bufsz);
4159                 if (unlikely(!skb)) {
4160                         /* Better luck next round */
4161                         adapter->alloc_rx_buff_failed++;
4162                         break;
4163                 }
4164
4165                 /* Fix for errata 23, can't cross 64kB boundary */
4166                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4167                         struct sk_buff *oldskb = skb;
4168                         DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4169                                              "at %p\n", bufsz, skb->data);
4170                         /* Try again, without freeing the previous */
4171                         skb = netdev_alloc_skb(netdev, bufsz);
4172                         /* Failed allocation, critical failure */
4173                         if (!skb) {
4174                                 dev_kfree_skb(oldskb);
4175                                 break;
4176                         }
4177
4178                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4179                                 /* give up */
4180                                 dev_kfree_skb(skb);
4181                                 dev_kfree_skb(oldskb);
4182                                 break; /* while !buffer_info->skb */
4183                         }
4184
4185                         /* Use new allocation */
4186                         dev_kfree_skb(oldskb);
4187                 }
4188                 /* Make buffer alignment 2 beyond a 16 byte boundary
4189                  * this will result in a 16 byte aligned IP header after
4190                  * the 14 byte MAC header is removed
4191                  */
4192                 skb_reserve(skb, NET_IP_ALIGN);
4193
4194                 buffer_info->skb = skb;
4195                 buffer_info->length = adapter->rx_buffer_len;
4196 map_skb:
4197                 buffer_info->dma = pci_map_single(pdev,
4198                                                   skb->data,
4199                                                   adapter->rx_buffer_len,
4200                                                   PCI_DMA_FROMDEVICE);
4201
4202                 /* Fix for errata 23, can't cross 64kB boundary */
4203                 if (!e1000_check_64k_bound(adapter,
4204                                         (void *)(unsigned long)buffer_info->dma,
4205                                         adapter->rx_buffer_len)) {
4206                         DPRINTK(RX_ERR, ERR,
4207                                 "dma align check failed: %u bytes at %p\n",
4208                                 adapter->rx_buffer_len,
4209                                 (void *)(unsigned long)buffer_info->dma);
4210                         dev_kfree_skb(skb);
4211                         buffer_info->skb = NULL;
4212
4213                         pci_unmap_single(pdev, buffer_info->dma,
4214                                          adapter->rx_buffer_len,
4215                                          PCI_DMA_FROMDEVICE);
4216
4217                         break; /* while !buffer_info->skb */
4218                 }
4219                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4220                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4221
4222                 if (unlikely(++i == rx_ring->count))
4223                         i = 0;
4224                 buffer_info = &rx_ring->buffer_info[i];
4225         }
4226
4227         if (likely(rx_ring->next_to_use != i)) {
4228                 rx_ring->next_to_use = i;
4229                 if (unlikely(i-- == 0))
4230                         i = (rx_ring->count - 1);
4231
4232                 /* Force memory writes to complete before letting h/w
4233                  * know there are new descriptors to fetch.  (Only
4234                  * applicable for weak-ordered memory model archs,
4235                  * such as IA-64). */
4236                 wmb();
4237                 writel(i, hw->hw_addr + rx_ring->rdt);
4238         }
4239 }
4240
4241 /**
4242  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4243  * @adapter:
4244  **/
4245
4246 static void e1000_smartspeed(struct e1000_adapter *adapter)
4247 {
4248         struct e1000_hw *hw = &adapter->hw;
4249         u16 phy_status;
4250         u16 phy_ctrl;
4251
4252         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4253            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4254                 return;
4255
4256         if (adapter->smartspeed == 0) {
4257                 /* If Master/Slave config fault is asserted twice,
4258                  * we assume back-to-back */
4259                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4260                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4261                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4262                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4263                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4264                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4265                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4266                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4267                                             phy_ctrl);
4268                         adapter->smartspeed++;
4269                         if (!e1000_phy_setup_autoneg(hw) &&
4270                            !e1000_read_phy_reg(hw, PHY_CTRL,
4271                                                &phy_ctrl)) {
4272                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4273                                              MII_CR_RESTART_AUTO_NEG);
4274                                 e1000_write_phy_reg(hw, PHY_CTRL,
4275                                                     phy_ctrl);
4276                         }
4277                 }
4278                 return;
4279         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4280                 /* If still no link, perhaps using 2/3 pair cable */
4281                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4282                 phy_ctrl |= CR_1000T_MS_ENABLE;
4283                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4284                 if (!e1000_phy_setup_autoneg(hw) &&
4285                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4286                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4287                                      MII_CR_RESTART_AUTO_NEG);
4288                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4289                 }
4290         }
4291         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4292         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4293                 adapter->smartspeed = 0;
4294 }
4295
4296 /**
4297  * e1000_ioctl -
4298  * @netdev:
4299  * @ifreq:
4300  * @cmd:
4301  **/
4302
4303 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4304 {
4305         switch (cmd) {
4306         case SIOCGMIIPHY:
4307         case SIOCGMIIREG:
4308         case SIOCSMIIREG:
4309                 return e1000_mii_ioctl(netdev, ifr, cmd);
4310         default:
4311                 return -EOPNOTSUPP;
4312         }
4313 }
4314
4315 /**
4316  * e1000_mii_ioctl -
4317  * @netdev:
4318  * @ifreq:
4319  * @cmd:
4320  **/
4321
4322 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4323                            int cmd)
4324 {
4325         struct e1000_adapter *adapter = netdev_priv(netdev);
4326         struct e1000_hw *hw = &adapter->hw;
4327         struct mii_ioctl_data *data = if_mii(ifr);
4328         int retval;
4329         u16 mii_reg;
4330         u16 spddplx;
4331         unsigned long flags;
4332
4333         if (hw->media_type != e1000_media_type_copper)
4334                 return -EOPNOTSUPP;
4335
4336         switch (cmd) {
4337         case SIOCGMIIPHY:
4338                 data->phy_id = hw->phy_addr;
4339                 break;
4340         case SIOCGMIIREG:
4341                 if (!capable(CAP_NET_ADMIN))
4342                         return -EPERM;
4343                 spin_lock_irqsave(&adapter->stats_lock, flags);
4344                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4345                                    &data->val_out)) {
4346                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4347                         return -EIO;
4348                 }
4349                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4350                 break;
4351         case SIOCSMIIREG:
4352                 if (!capable(CAP_NET_ADMIN))
4353                         return -EPERM;
4354                 if (data->reg_num & ~(0x1F))
4355                         return -EFAULT;
4356                 mii_reg = data->val_in;
4357                 spin_lock_irqsave(&adapter->stats_lock, flags);
4358                 if (e1000_write_phy_reg(hw, data->reg_num,
4359                                         mii_reg)) {
4360                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4361                         return -EIO;
4362                 }
4363                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4364                 if (hw->media_type == e1000_media_type_copper) {
4365                         switch (data->reg_num) {
4366                         case PHY_CTRL:
4367                                 if (mii_reg & MII_CR_POWER_DOWN)
4368                                         break;
4369                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4370                                         hw->autoneg = 1;
4371                                         hw->autoneg_advertised = 0x2F;
4372                                 } else {
4373                                         if (mii_reg & 0x40)
4374                                                 spddplx = SPEED_1000;
4375                                         else if (mii_reg & 0x2000)
4376                                                 spddplx = SPEED_100;
4377                                         else
4378                                                 spddplx = SPEED_10;
4379                                         spddplx += (mii_reg & 0x100)
4380                                                    ? DUPLEX_FULL :
4381                                                    DUPLEX_HALF;
4382                                         retval = e1000_set_spd_dplx(adapter,
4383                                                                     spddplx);
4384                                         if (retval)
4385                                                 return retval;
4386                                 }
4387                                 if (netif_running(adapter->netdev))
4388                                         e1000_reinit_locked(adapter);
4389                                 else
4390                                         e1000_reset(adapter);
4391                                 break;
4392                         case M88E1000_PHY_SPEC_CTRL:
4393                         case M88E1000_EXT_PHY_SPEC_CTRL:
4394                                 if (e1000_phy_reset(hw))
4395                                         return -EIO;
4396                                 break;
4397                         }
4398                 } else {
4399                         switch (data->reg_num) {
4400                         case PHY_CTRL:
4401                                 if (mii_reg & MII_CR_POWER_DOWN)
4402                                         break;
4403                                 if (netif_running(adapter->netdev))
4404                                         e1000_reinit_locked(adapter);
4405                                 else
4406                                         e1000_reset(adapter);
4407                                 break;
4408                         }
4409                 }
4410                 break;
4411         default:
4412                 return -EOPNOTSUPP;
4413         }
4414         return E1000_SUCCESS;
4415 }
4416
4417 void e1000_pci_set_mwi(struct e1000_hw *hw)
4418 {
4419         struct e1000_adapter *adapter = hw->back;
4420         int ret_val = pci_set_mwi(adapter->pdev);
4421
4422         if (ret_val)
4423                 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4424 }
4425
4426 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4427 {
4428         struct e1000_adapter *adapter = hw->back;
4429
4430         pci_clear_mwi(adapter->pdev);
4431 }
4432
4433 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4434 {
4435         struct e1000_adapter *adapter = hw->back;
4436         return pcix_get_mmrbc(adapter->pdev);
4437 }
4438
4439 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4440 {
4441         struct e1000_adapter *adapter = hw->back;
4442         pcix_set_mmrbc(adapter->pdev, mmrbc);
4443 }
4444
4445 s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4446 {
4447     struct e1000_adapter *adapter = hw->back;
4448     u16 cap_offset;
4449
4450     cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4451     if (!cap_offset)
4452         return -E1000_ERR_CONFIG;
4453
4454     pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4455
4456     return E1000_SUCCESS;
4457 }
4458
4459 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4460 {
4461         outl(value, port);
4462 }
4463
4464 static void e1000_vlan_rx_register(struct net_device *netdev,
4465                                    struct vlan_group *grp)
4466 {
4467         struct e1000_adapter *adapter = netdev_priv(netdev);
4468         struct e1000_hw *hw = &adapter->hw;
4469         u32 ctrl, rctl;
4470
4471         if (!test_bit(__E1000_DOWN, &adapter->flags))
4472                 e1000_irq_disable(adapter);
4473         adapter->vlgrp = grp;
4474
4475         if (grp) {
4476                 /* enable VLAN tag insert/strip */
4477                 ctrl = er32(CTRL);
4478                 ctrl |= E1000_CTRL_VME;
4479                 ew32(CTRL, ctrl);
4480
4481                 if (adapter->hw.mac_type != e1000_ich8lan) {
4482                         /* enable VLAN receive filtering */
4483                         rctl = er32(RCTL);
4484                         rctl &= ~E1000_RCTL_CFIEN;
4485                         ew32(RCTL, rctl);
4486                         e1000_update_mng_vlan(adapter);
4487                 }
4488         } else {
4489                 /* disable VLAN tag insert/strip */
4490                 ctrl = er32(CTRL);
4491                 ctrl &= ~E1000_CTRL_VME;
4492                 ew32(CTRL, ctrl);
4493
4494                 if (adapter->hw.mac_type != e1000_ich8lan) {
4495                         if (adapter->mng_vlan_id !=
4496                             (u16)E1000_MNG_VLAN_NONE) {
4497                                 e1000_vlan_rx_kill_vid(netdev,
4498                                                        adapter->mng_vlan_id);
4499                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4500                         }
4501                 }
4502         }
4503
4504         if (!test_bit(__E1000_DOWN, &adapter->flags))
4505                 e1000_irq_enable(adapter);
4506 }
4507
4508 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4509 {
4510         struct e1000_adapter *adapter = netdev_priv(netdev);
4511         struct e1000_hw *hw = &adapter->hw;
4512         u32 vfta, index;
4513
4514         if ((hw->mng_cookie.status &
4515              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4516             (vid == adapter->mng_vlan_id))
4517                 return;
4518         /* add VID to filter table */
4519         index = (vid >> 5) & 0x7F;
4520         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4521         vfta |= (1 << (vid & 0x1F));
4522         e1000_write_vfta(hw, index, vfta);
4523 }
4524
4525 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4526 {
4527         struct e1000_adapter *adapter = netdev_priv(netdev);
4528         struct e1000_hw *hw = &adapter->hw;
4529         u32 vfta, index;
4530
4531         if (!test_bit(__E1000_DOWN, &adapter->flags))
4532                 e1000_irq_disable(adapter);
4533         vlan_group_set_device(adapter->vlgrp, vid, NULL);
4534         if (!test_bit(__E1000_DOWN, &adapter->flags))
4535                 e1000_irq_enable(adapter);
4536
4537         if ((hw->mng_cookie.status &
4538              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4539             (vid == adapter->mng_vlan_id)) {
4540                 /* release control to f/w */
4541                 e1000_release_hw_control(adapter);
4542                 return;
4543         }
4544
4545         /* remove VID from filter table */
4546         index = (vid >> 5) & 0x7F;
4547         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4548         vfta &= ~(1 << (vid & 0x1F));
4549         e1000_write_vfta(hw, index, vfta);
4550 }
4551
4552 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4553 {
4554         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4555
4556         if (adapter->vlgrp) {
4557                 u16 vid;
4558                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4559                         if (!vlan_group_get_device(adapter->vlgrp, vid))
4560                                 continue;
4561                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
4562                 }
4563         }
4564 }
4565
4566 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4567 {
4568         struct e1000_hw *hw = &adapter->hw;
4569
4570         hw->autoneg = 0;
4571
4572         /* Fiber NICs only allow 1000 gbps Full duplex */
4573         if ((hw->media_type == e1000_media_type_fiber) &&
4574                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4575                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4576                 return -EINVAL;
4577         }
4578
4579         switch (spddplx) {
4580         case SPEED_10 + DUPLEX_HALF:
4581                 hw->forced_speed_duplex = e1000_10_half;
4582                 break;
4583         case SPEED_10 + DUPLEX_FULL:
4584                 hw->forced_speed_duplex = e1000_10_full;
4585                 break;
4586         case SPEED_100 + DUPLEX_HALF:
4587                 hw->forced_speed_duplex = e1000_100_half;
4588                 break;
4589         case SPEED_100 + DUPLEX_FULL:
4590                 hw->forced_speed_duplex = e1000_100_full;
4591                 break;
4592         case SPEED_1000 + DUPLEX_FULL:
4593                 hw->autoneg = 1;
4594                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4595                 break;
4596         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4597         default:
4598                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4599                 return -EINVAL;
4600         }
4601         return 0;
4602 }
4603
4604 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4605 {
4606         struct net_device *netdev = pci_get_drvdata(pdev);
4607         struct e1000_adapter *adapter = netdev_priv(netdev);
4608         struct e1000_hw *hw = &adapter->hw;
4609         u32 ctrl, ctrl_ext, rctl, status;
4610         u32 wufc = adapter->wol;
4611 #ifdef CONFIG_PM
4612         int retval = 0;
4613 #endif
4614
4615         netif_device_detach(netdev);
4616
4617         if (netif_running(netdev)) {
4618                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4619                 e1000_down(adapter);
4620         }
4621
4622 #ifdef CONFIG_PM
4623         retval = pci_save_state(pdev);
4624         if (retval)
4625                 return retval;
4626 #endif
4627
4628         status = er32(STATUS);
4629         if (status & E1000_STATUS_LU)
4630                 wufc &= ~E1000_WUFC_LNKC;
4631
4632         if (wufc) {
4633                 e1000_setup_rctl(adapter);
4634                 e1000_set_rx_mode(netdev);
4635
4636                 /* turn on all-multi mode if wake on multicast is enabled */
4637                 if (wufc & E1000_WUFC_MC) {
4638                         rctl = er32(RCTL);
4639                         rctl |= E1000_RCTL_MPE;
4640                         ew32(RCTL, rctl);
4641                 }
4642
4643                 if (hw->mac_type >= e1000_82540) {
4644                         ctrl = er32(CTRL);
4645                         /* advertise wake from D3Cold */
4646                         #define E1000_CTRL_ADVD3WUC 0x00100000
4647                         /* phy power management enable */
4648                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4649                         ctrl |= E1000_CTRL_ADVD3WUC |
4650                                 E1000_CTRL_EN_PHY_PWR_MGMT;
4651                         ew32(CTRL, ctrl);
4652                 }
4653
4654                 if (hw->media_type == e1000_media_type_fiber ||
4655                    hw->media_type == e1000_media_type_internal_serdes) {
4656                         /* keep the laser running in D3 */
4657                         ctrl_ext = er32(CTRL_EXT);
4658                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4659                         ew32(CTRL_EXT, ctrl_ext);
4660                 }
4661
4662                 /* Allow time for pending master requests to run */
4663                 e1000_disable_pciex_master(hw);
4664
4665                 ew32(WUC, E1000_WUC_PME_EN);
4666                 ew32(WUFC, wufc);
4667                 pci_enable_wake(pdev, PCI_D3hot, 1);
4668                 pci_enable_wake(pdev, PCI_D3cold, 1);
4669         } else {
4670                 ew32(WUC, 0);
4671                 ew32(WUFC, 0);
4672                 pci_enable_wake(pdev, PCI_D3hot, 0);
4673                 pci_enable_wake(pdev, PCI_D3cold, 0);
4674         }
4675
4676         e1000_release_manageability(adapter);
4677
4678         /* make sure adapter isn't asleep if manageability is enabled */
4679         if (adapter->en_mng_pt) {
4680                 pci_enable_wake(pdev, PCI_D3hot, 1);
4681                 pci_enable_wake(pdev, PCI_D3cold, 1);
4682         }
4683
4684         if (hw->phy_type == e1000_phy_igp_3)
4685                 e1000_phy_powerdown_workaround(hw);
4686
4687         if (netif_running(netdev))
4688                 e1000_free_irq(adapter);
4689
4690         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
4691          * would have already happened in close and is redundant. */
4692         e1000_release_hw_control(adapter);
4693
4694         pci_disable_device(pdev);
4695
4696         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4697
4698         return 0;
4699 }
4700
4701 #ifdef CONFIG_PM
4702 static int e1000_resume(struct pci_dev *pdev)
4703 {
4704         struct net_device *netdev = pci_get_drvdata(pdev);
4705         struct e1000_adapter *adapter = netdev_priv(netdev);
4706         struct e1000_hw *hw = &adapter->hw;
4707         u32 err;
4708
4709         pci_set_power_state(pdev, PCI_D0);
4710         pci_restore_state(pdev);
4711
4712         if (adapter->need_ioport)
4713                 err = pci_enable_device(pdev);
4714         else
4715                 err = pci_enable_device_mem(pdev);
4716         if (err) {
4717                 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
4718                 return err;
4719         }
4720         pci_set_master(pdev);
4721
4722         pci_enable_wake(pdev, PCI_D3hot, 0);
4723         pci_enable_wake(pdev, PCI_D3cold, 0);
4724
4725         if (netif_running(netdev)) {
4726                 err = e1000_request_irq(adapter);
4727                 if (err)
4728                         return err;
4729         }
4730
4731         e1000_power_up_phy(adapter);
4732         e1000_reset(adapter);
4733         ew32(WUS, ~0);
4734
4735         e1000_init_manageability(adapter);
4736
4737         if (netif_running(netdev))
4738                 e1000_up(adapter);
4739
4740         netif_device_attach(netdev);
4741
4742         /* If the controller is 82573 and f/w is AMT, do not set
4743          * DRV_LOAD until the interface is up.  For all other cases,
4744          * let the f/w know that the h/w is now under the control
4745          * of the driver. */
4746         if (hw->mac_type != e1000_82573 ||
4747             !e1000_check_mng_mode(hw))
4748                 e1000_get_hw_control(adapter);
4749
4750         return 0;
4751 }
4752 #endif
4753
4754 static void e1000_shutdown(struct pci_dev *pdev)
4755 {
4756         e1000_suspend(pdev, PMSG_SUSPEND);
4757 }
4758
4759 #ifdef CONFIG_NET_POLL_CONTROLLER
4760 /*
4761  * Polling 'interrupt' - used by things like netconsole to send skbs
4762  * without having to re-enable interrupts. It's not called while
4763  * the interrupt routine is executing.
4764  */
4765 static void e1000_netpoll(struct net_device *netdev)
4766 {
4767         struct e1000_adapter *adapter = netdev_priv(netdev);
4768
4769         disable_irq(adapter->pdev->irq);
4770         e1000_intr(adapter->pdev->irq, netdev);
4771         enable_irq(adapter->pdev->irq);
4772 }
4773 #endif
4774
4775 /**
4776  * e1000_io_error_detected - called when PCI error is detected
4777  * @pdev: Pointer to PCI device
4778  * @state: The current pci conneection state
4779  *
4780  * This function is called after a PCI bus error affecting
4781  * this device has been detected.
4782  */
4783 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4784                                                 pci_channel_state_t state)
4785 {
4786         struct net_device *netdev = pci_get_drvdata(pdev);
4787         struct e1000_adapter *adapter = netdev->priv;
4788
4789         netif_device_detach(netdev);
4790
4791         if (netif_running(netdev))
4792                 e1000_down(adapter);
4793         pci_disable_device(pdev);
4794
4795         /* Request a slot slot reset. */
4796         return PCI_ERS_RESULT_NEED_RESET;
4797 }
4798
4799 /**
4800  * e1000_io_slot_reset - called after the pci bus has been reset.
4801  * @pdev: Pointer to PCI device
4802  *
4803  * Restart the card from scratch, as if from a cold-boot. Implementation
4804  * resembles the first-half of the e1000_resume routine.
4805  */
4806 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4807 {
4808         struct net_device *netdev = pci_get_drvdata(pdev);
4809         struct e1000_adapter *adapter = netdev->priv;
4810         struct e1000_hw *hw = &adapter->hw;
4811         int err;
4812
4813         if (adapter->need_ioport)
4814                 err = pci_enable_device(pdev);
4815         else
4816                 err = pci_enable_device_mem(pdev);
4817         if (err) {
4818                 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
4819                 return PCI_ERS_RESULT_DISCONNECT;
4820         }
4821         pci_set_master(pdev);
4822
4823         pci_enable_wake(pdev, PCI_D3hot, 0);
4824         pci_enable_wake(pdev, PCI_D3cold, 0);
4825
4826         e1000_reset(adapter);
4827         ew32(WUS, ~0);
4828
4829         return PCI_ERS_RESULT_RECOVERED;
4830 }
4831
4832 /**
4833  * e1000_io_resume - called when traffic can start flowing again.
4834  * @pdev: Pointer to PCI device
4835  *
4836  * This callback is called when the error recovery driver tells us that
4837  * its OK to resume normal operation. Implementation resembles the
4838  * second-half of the e1000_resume routine.
4839  */
4840 static void e1000_io_resume(struct pci_dev *pdev)
4841 {
4842         struct net_device *netdev = pci_get_drvdata(pdev);
4843         struct e1000_adapter *adapter = netdev->priv;
4844         struct e1000_hw *hw = &adapter->hw;
4845
4846         e1000_init_manageability(adapter);
4847
4848         if (netif_running(netdev)) {
4849                 if (e1000_up(adapter)) {
4850                         printk("e1000: can't bring device back up after reset\n");
4851                         return;
4852                 }
4853         }
4854
4855         netif_device_attach(netdev);
4856
4857         /* If the controller is 82573 and f/w is AMT, do not set
4858          * DRV_LOAD until the interface is up.  For all other cases,
4859          * let the f/w know that the h/w is now under the control
4860          * of the driver. */
4861         if (hw->mac_type != e1000_82573 ||
4862             !e1000_check_mng_mode(hw))
4863                 e1000_get_hw_control(adapter);
4864
4865 }
4866
4867 /* e1000_main.c */