]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/e100.c
[NET]: Make NAPI polling independent of struct net_device objects.
[net-next-2.6.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
139 */
140
1da177e4
LT
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
1e7f0bd8 149#include <linux/dma-mapping.h>
1da177e4
LT
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
4e1dc97d 161#define DRV_EXT "-NAPI"
44e4925e 162#define DRV_VERSION "3.5.23-k4"DRV_EXT
1da177e4 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
8fb6f732 176static int eeprom_bad_csum_allow = 0;
27345bb6 177static int use_io = 0;
1da177e4 178module_param(debug, int, 0);
8fb6f732 179module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 180module_param(use_io, int, 0);
1da177e4 181MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 182MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 183MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
184#define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
188
189#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
234 { 0, }
235};
236MODULE_DEVICE_TABLE(pci, e100_id_table);
237
238enum mac {
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
244 mac_82559_D101M = 8,
245 mac_82559_D101S = 9,
246 mac_82550_D102 = 12,
247 mac_82550_D102_C = 13,
248 mac_82551_E = 14,
249 mac_82551_F = 15,
250 mac_82551_10 = 16,
251 mac_unknown = 0xFF,
252};
253
254enum phy {
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
264};
265
266/* CSR (Control/Status Registers) */
267struct csr {
268 struct {
269 u8 status;
270 u8 stat_ack;
271 u8 cmd_lo;
272 u8 cmd_hi;
273 u32 gen_ptr;
274 } scb;
275 u32 port;
276 u16 flash_ctrl;
277 u8 eeprom_ctrl_lo;
278 u8 eeprom_ctrl_hi;
279 u32 mdi_ctrl;
280 u32 rx_dma_count;
281};
282
283enum scb_status {
284 rus_ready = 0x10,
285 rus_mask = 0x3C,
286};
287
ca93ca42
JG
288enum ru_state {
289 RU_SUSPENDED = 0,
290 RU_RUNNING = 1,
291 RU_UNINITIALIZED = -1,
292};
293
1da177e4
LT
294enum scb_stat_ack {
295 stat_ack_not_ours = 0x00,
296 stat_ack_sw_gen = 0x04,
297 stat_ack_rnr = 0x10,
298 stat_ack_cu_idle = 0x20,
299 stat_ack_frame_rx = 0x40,
300 stat_ack_cu_cmd_done = 0x80,
301 stat_ack_not_present = 0xFF,
302 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
303 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
304};
305
306enum scb_cmd_hi {
307 irq_mask_none = 0x00,
308 irq_mask_all = 0x01,
309 irq_sw_gen = 0x02,
310};
311
312enum scb_cmd_lo {
313 cuc_nop = 0x00,
314 ruc_start = 0x01,
315 ruc_load_base = 0x06,
316 cuc_start = 0x10,
317 cuc_resume = 0x20,
318 cuc_dump_addr = 0x40,
319 cuc_dump_stats = 0x50,
320 cuc_load_base = 0x60,
321 cuc_dump_reset = 0x70,
322};
323
324enum cuc_dump {
325 cuc_dump_complete = 0x0000A005,
326 cuc_dump_reset_complete = 0x0000A007,
327};
05479938 328
1da177e4
LT
329enum port {
330 software_reset = 0x0000,
331 selftest = 0x0001,
332 selective_reset = 0x0002,
333};
334
335enum eeprom_ctrl_lo {
336 eesk = 0x01,
337 eecs = 0x02,
338 eedi = 0x04,
339 eedo = 0x08,
340};
341
342enum mdi_ctrl {
343 mdi_write = 0x04000000,
344 mdi_read = 0x08000000,
345 mdi_ready = 0x10000000,
346};
347
348enum eeprom_op {
349 op_write = 0x05,
350 op_read = 0x06,
351 op_ewds = 0x10,
352 op_ewen = 0x13,
353};
354
355enum eeprom_offsets {
356 eeprom_cnfg_mdix = 0x03,
357 eeprom_id = 0x0A,
358 eeprom_config_asf = 0x0D,
359 eeprom_smbus_addr = 0x90,
360};
361
362enum eeprom_cnfg_mdix {
363 eeprom_mdix_enabled = 0x0080,
364};
365
366enum eeprom_id {
367 eeprom_id_wol = 0x0020,
368};
369
370enum eeprom_config_asf {
371 eeprom_asf = 0x8000,
372 eeprom_gcl = 0x4000,
373};
374
375enum cb_status {
376 cb_complete = 0x8000,
377 cb_ok = 0x2000,
378};
379
380enum cb_command {
381 cb_nop = 0x0000,
382 cb_iaaddr = 0x0001,
383 cb_config = 0x0002,
384 cb_multi = 0x0003,
385 cb_tx = 0x0004,
386 cb_ucode = 0x0005,
387 cb_dump = 0x0006,
388 cb_tx_sf = 0x0008,
389 cb_cid = 0x1f00,
390 cb_i = 0x2000,
391 cb_s = 0x4000,
392 cb_el = 0x8000,
393};
394
395struct rfd {
396 u16 status;
397 u16 command;
398 u32 link;
399 u32 rbd;
400 u16 actual_size;
401 u16 size;
402};
403
404struct rx {
405 struct rx *next, *prev;
406 struct sk_buff *skb;
407 dma_addr_t dma_addr;
408};
409
410#if defined(__BIG_ENDIAN_BITFIELD)
411#define X(a,b) b,a
412#else
413#define X(a,b) a,b
414#endif
415struct config {
416/*0*/ u8 X(byte_count:6, pad0:2);
417/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
418/*2*/ u8 adaptive_ifs;
419/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
420 term_write_cache_line:1), pad3:4);
421/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
422/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
423/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
424 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
425 rx_discard_overruns:1), rx_save_bad_frames:1);
426/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
427 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
428 tx_dynamic_tbd:1);
429/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
430/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
431 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
432/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
433 loopback:2);
434/*11*/ u8 X(linear_priority:3, pad11:5);
435/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
436/*13*/ u8 ip_addr_lo;
437/*14*/ u8 ip_addr_hi;
438/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
439 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
440 pad15_2:1), crs_or_cdt:1);
441/*16*/ u8 fc_delay_lo;
442/*17*/ u8 fc_delay_hi;
443/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
444 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
445/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
446 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
447 full_duplex_force:1), full_duplex_pin:1);
448/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
449/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
450/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
451 u8 pad_d102[9];
452};
453
454#define E100_MAX_MULTICAST_ADDRS 64
455struct multi {
456 u16 count;
457 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
458};
459
460/* Important: keep total struct u32-aligned */
461#define UCODE_SIZE 134
462struct cb {
463 u16 status;
464 u16 command;
465 u32 link;
466 union {
467 u8 iaaddr[ETH_ALEN];
468 u32 ucode[UCODE_SIZE];
469 struct config config;
470 struct multi multi;
471 struct {
472 u32 tbd_array;
473 u16 tcb_byte_count;
474 u8 threshold;
475 u8 tbd_count;
476 struct {
477 u32 buf_addr;
478 u16 size;
479 u16 eol;
480 } tbd;
481 } tcb;
482 u32 dump_buffer_addr;
483 } u;
484 struct cb *next, *prev;
485 dma_addr_t dma_addr;
486 struct sk_buff *skb;
487};
488
489enum loopback {
490 lb_none = 0, lb_mac = 1, lb_phy = 3,
491};
492
493struct stats {
494 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
495 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
496 tx_multiple_collisions, tx_total_collisions;
497 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
498 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
499 rx_short_frame_errors;
500 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
501 u16 xmt_tco_frames, rcv_tco_frames;
502 u32 complete;
503};
504
505struct mem {
506 struct {
507 u32 signature;
508 u32 result;
509 } selftest;
510 struct stats stats;
511 u8 dump_buf[596];
512};
513
514struct param_range {
515 u32 min;
516 u32 max;
517 u32 count;
518};
519
520struct params {
521 struct param_range rfds;
522 struct param_range cbs;
523};
524
525struct nic {
526 /* Begin: frequently used values: keep adjacent for cache effect */
527 u32 msg_enable ____cacheline_aligned;
528 struct net_device *netdev;
529 struct pci_dev *pdev;
530
531 struct rx *rxs ____cacheline_aligned;
532 struct rx *rx_to_use;
533 struct rx *rx_to_clean;
534 struct rfd blank_rfd;
ca93ca42 535 enum ru_state ru_running;
1da177e4
LT
536
537 spinlock_t cb_lock ____cacheline_aligned;
538 spinlock_t cmd_lock;
539 struct csr __iomem *csr;
540 enum scb_cmd_lo cuc_cmd;
541 unsigned int cbs_avail;
bea3348e 542 struct napi_struct napi;
1da177e4
LT
543 struct cb *cbs;
544 struct cb *cb_to_use;
545 struct cb *cb_to_send;
546 struct cb *cb_to_clean;
547 u16 tx_command;
548 /* End: frequently used values: keep adjacent for cache effect */
549
550 enum {
551 ich = (1 << 0),
552 promiscuous = (1 << 1),
553 multicast_all = (1 << 2),
554 wol_magic = (1 << 3),
555 ich_10h_workaround = (1 << 4),
556 } flags ____cacheline_aligned;
557
558 enum mac mac;
559 enum phy phy;
560 struct params params;
561 struct net_device_stats net_stats;
562 struct timer_list watchdog;
563 struct timer_list blink_timer;
564 struct mii_if_info mii;
2acdb1e0 565 struct work_struct tx_timeout_task;
1da177e4
LT
566 enum loopback loopback;
567
568 struct mem *mem;
569 dma_addr_t dma_addr;
570
571 dma_addr_t cbs_dma_addr;
572 u8 adaptive_ifs;
573 u8 tx_threshold;
574 u32 tx_frames;
575 u32 tx_collisions;
576 u32 tx_deferred;
577 u32 tx_single_collisions;
578 u32 tx_multiple_collisions;
579 u32 tx_fc_pause;
580 u32 tx_tco_frames;
581
582 u32 rx_fc_pause;
583 u32 rx_fc_unsupported;
584 u32 rx_tco_frames;
585 u32 rx_over_length_errors;
586
1da177e4
LT
587 u16 leds;
588 u16 eeprom_wc;
589 u16 eeprom[256];
ac7c6669 590 spinlock_t mdio_lock;
1da177e4
LT
591};
592
593static inline void e100_write_flush(struct nic *nic)
594{
595 /* Flush previous PCI writes through intermediate bridges
596 * by doing a benign read */
27345bb6 597 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
598}
599
858119e1 600static void e100_enable_irq(struct nic *nic)
1da177e4
LT
601{
602 unsigned long flags;
603
604 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 605 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 606 e100_write_flush(nic);
ad8c48ad 607 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
608}
609
858119e1 610static void e100_disable_irq(struct nic *nic)
1da177e4
LT
611{
612 unsigned long flags;
613
614 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 615 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 616 e100_write_flush(nic);
ad8c48ad 617 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
618}
619
620static void e100_hw_reset(struct nic *nic)
621{
622 /* Put CU and RU into idle with a selective reset to get
623 * device off of PCI bus */
27345bb6 624 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
625 e100_write_flush(nic); udelay(20);
626
627 /* Now fully reset device */
27345bb6 628 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
629 e100_write_flush(nic); udelay(20);
630
631 /* Mask off our interrupt line - it's unmasked after reset */
632 e100_disable_irq(nic);
633}
634
635static int e100_self_test(struct nic *nic)
636{
637 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
638
639 /* Passing the self-test is a pretty good indication
640 * that the device can DMA to/from host memory */
641
642 nic->mem->selftest.signature = 0;
643 nic->mem->selftest.result = 0xFFFFFFFF;
644
27345bb6 645 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
646 e100_write_flush(nic);
647 /* Wait 10 msec for self-test to complete */
648 msleep(10);
649
650 /* Interrupts are enabled after self-test */
651 e100_disable_irq(nic);
652
653 /* Check results of self-test */
654 if(nic->mem->selftest.result != 0) {
655 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
656 nic->mem->selftest.result);
657 return -ETIMEDOUT;
658 }
659 if(nic->mem->selftest.signature == 0) {
660 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
661 return -ETIMEDOUT;
662 }
663
664 return 0;
665}
666
667static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
668{
669 u32 cmd_addr_data[3];
670 u8 ctrl;
671 int i, j;
672
673 /* Three cmds: write/erase enable, write data, write/erase disable */
674 cmd_addr_data[0] = op_ewen << (addr_len - 2);
675 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
676 cpu_to_le16(data);
677 cmd_addr_data[2] = op_ewds << (addr_len - 2);
678
679 /* Bit-bang cmds to write word to eeprom */
680 for(j = 0; j < 3; j++) {
681
682 /* Chip select */
27345bb6 683 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
684 e100_write_flush(nic); udelay(4);
685
686 for(i = 31; i >= 0; i--) {
687 ctrl = (cmd_addr_data[j] & (1 << i)) ?
688 eecs | eedi : eecs;
27345bb6 689 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
690 e100_write_flush(nic); udelay(4);
691
27345bb6 692 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
693 e100_write_flush(nic); udelay(4);
694 }
695 /* Wait 10 msec for cmd to complete */
696 msleep(10);
697
698 /* Chip deselect */
27345bb6 699 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
700 e100_write_flush(nic); udelay(4);
701 }
702};
703
704/* General technique stolen from the eepro100 driver - very clever */
705static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
706{
707 u32 cmd_addr_data;
708 u16 data = 0;
709 u8 ctrl;
710 int i;
711
712 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
713
714 /* Chip select */
27345bb6 715 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
716 e100_write_flush(nic); udelay(4);
717
718 /* Bit-bang to read word from eeprom */
719 for(i = 31; i >= 0; i--) {
720 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 721 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 722 e100_write_flush(nic); udelay(4);
05479938 723
27345bb6 724 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 725 e100_write_flush(nic); udelay(4);
05479938 726
1da177e4
LT
727 /* Eeprom drives a dummy zero to EEDO after receiving
728 * complete address. Use this to adjust addr_len. */
27345bb6 729 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
1da177e4
LT
730 if(!(ctrl & eedo) && i > 16) {
731 *addr_len -= (i - 16);
732 i = 17;
733 }
05479938 734
1da177e4
LT
735 data = (data << 1) | (ctrl & eedo ? 1 : 0);
736 }
737
738 /* Chip deselect */
27345bb6 739 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
740 e100_write_flush(nic); udelay(4);
741
742 return le16_to_cpu(data);
743};
744
745/* Load entire EEPROM image into driver cache and validate checksum */
746static int e100_eeprom_load(struct nic *nic)
747{
748 u16 addr, addr_len = 8, checksum = 0;
749
750 /* Try reading with an 8-bit addr len to discover actual addr len */
751 e100_eeprom_read(nic, &addr_len, 0);
752 nic->eeprom_wc = 1 << addr_len;
753
754 for(addr = 0; addr < nic->eeprom_wc; addr++) {
755 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
756 if(addr < nic->eeprom_wc - 1)
757 checksum += cpu_to_le16(nic->eeprom[addr]);
758 }
759
760 /* The checksum, stored in the last word, is calculated such that
761 * the sum of words should be 0xBABA */
762 checksum = le16_to_cpu(0xBABA - checksum);
763 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
764 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
765 if (!eeprom_bad_csum_allow)
766 return -EAGAIN;
1da177e4
LT
767 }
768
769 return 0;
770}
771
772/* Save (portion of) driver EEPROM cache to device and update checksum */
773static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
774{
775 u16 addr, addr_len = 8, checksum = 0;
776
777 /* Try reading with an 8-bit addr len to discover actual addr len */
778 e100_eeprom_read(nic, &addr_len, 0);
779 nic->eeprom_wc = 1 << addr_len;
780
781 if(start + count >= nic->eeprom_wc)
782 return -EINVAL;
783
784 for(addr = start; addr < start + count; addr++)
785 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
786
787 /* The checksum, stored in the last word, is calculated such that
788 * the sum of words should be 0xBABA */
789 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
790 checksum += cpu_to_le16(nic->eeprom[addr]);
791 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
792 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
793 nic->eeprom[nic->eeprom_wc - 1]);
794
795 return 0;
796}
797
962082b6 798#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 799#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 800static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
801{
802 unsigned long flags;
803 unsigned int i;
804 int err = 0;
805
806 spin_lock_irqsave(&nic->cmd_lock, flags);
807
808 /* Previous command is accepted when SCB clears */
809 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
27345bb6 810 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
811 break;
812 cpu_relax();
e6280f26 813 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
814 udelay(5);
815 }
816 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
817 err = -EAGAIN;
818 goto err_unlock;
819 }
820
821 if(unlikely(cmd != cuc_resume))
27345bb6
JB
822 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
823 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
824
825err_unlock:
826 spin_unlock_irqrestore(&nic->cmd_lock, flags);
827
828 return err;
829}
830
858119e1 831static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
832 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
833{
834 struct cb *cb;
835 unsigned long flags;
836 int err = 0;
837
838 spin_lock_irqsave(&nic->cb_lock, flags);
839
840 if(unlikely(!nic->cbs_avail)) {
841 err = -ENOMEM;
842 goto err_unlock;
843 }
844
845 cb = nic->cb_to_use;
846 nic->cb_to_use = cb->next;
847 nic->cbs_avail--;
848 cb->skb = skb;
849
850 if(unlikely(!nic->cbs_avail))
851 err = -ENOSPC;
852
853 cb_prepare(nic, cb, skb);
854
855 /* Order is important otherwise we'll be in a race with h/w:
856 * set S-bit in current first, then clear S-bit in previous. */
857 cb->command |= cpu_to_le16(cb_s);
858 wmb();
859 cb->prev->command &= cpu_to_le16(~cb_s);
860
861 while(nic->cb_to_send != nic->cb_to_use) {
862 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
863 nic->cb_to_send->dma_addr))) {
864 /* Ok, here's where things get sticky. It's
865 * possible that we can't schedule the command
866 * because the controller is too busy, so
867 * let's just queue the command and try again
868 * when another command is scheduled. */
962082b6
MC
869 if(err == -ENOSPC) {
870 //request a reset
871 schedule_work(&nic->tx_timeout_task);
872 }
1da177e4
LT
873 break;
874 } else {
875 nic->cuc_cmd = cuc_resume;
876 nic->cb_to_send = nic->cb_to_send->next;
877 }
878 }
879
880err_unlock:
881 spin_unlock_irqrestore(&nic->cb_lock, flags);
882
883 return err;
884}
885
886static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
887{
888 u32 data_out = 0;
889 unsigned int i;
ac7c6669 890 unsigned long flags;
1da177e4 891
ac7c6669
OM
892
893 /*
894 * Stratus87247: we shouldn't be writing the MDI control
895 * register until the Ready bit shows True. Also, since
896 * manipulation of the MDI control registers is a multi-step
897 * procedure it should be done under lock.
898 */
899 spin_lock_irqsave(&nic->mdio_lock, flags);
900 for (i = 100; i; --i) {
27345bb6 901 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
902 break;
903 udelay(20);
904 }
905 if (unlikely(!i)) {
906 printk("e100.mdio_ctrl(%s) won't go Ready\n",
907 nic->netdev->name );
908 spin_unlock_irqrestore(&nic->mdio_lock, flags);
909 return 0; /* No way to indicate timeout error */
910 }
27345bb6 911 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 912
ac7c6669 913 for (i = 0; i < 100; i++) {
1da177e4 914 udelay(20);
27345bb6 915 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
916 break;
917 }
ac7c6669 918 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
919 DPRINTK(HW, DEBUG,
920 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
921 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
922 return (u16)data_out;
923}
924
925static int mdio_read(struct net_device *netdev, int addr, int reg)
926{
927 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
928}
929
930static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
931{
932 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
933}
934
935static void e100_get_defaults(struct nic *nic)
936{
2afecc04
JB
937 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
938 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 939
1da177e4 940 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 941 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1da177e4
LT
942 if(nic->mac == mac_unknown)
943 nic->mac = mac_82557_D100_A;
944
945 nic->params.rfds = rfds;
946 nic->params.cbs = cbs;
947
948 /* Quadwords to DMA into FIFO before starting frame transmit */
949 nic->tx_threshold = 0xE0;
950
962082b6
MC
951 /* no interrupt for every tx completion, delay = 256us if not 557*/
952 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
953 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
954
955 /* Template for a freshly allocated RFD */
ca93ca42 956 nic->blank_rfd.command = cpu_to_le16(cb_el);
1da177e4
LT
957 nic->blank_rfd.rbd = 0xFFFFFFFF;
958 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
959
960 /* MII setup */
961 nic->mii.phy_id_mask = 0x1F;
962 nic->mii.reg_num_mask = 0x1F;
963 nic->mii.dev = nic->netdev;
964 nic->mii.mdio_read = mdio_read;
965 nic->mii.mdio_write = mdio_write;
966}
967
968static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
969{
970 struct config *config = &cb->u.config;
971 u8 *c = (u8 *)config;
972
973 cb->command = cpu_to_le16(cb_config);
974
975 memset(config, 0, sizeof(struct config));
976
977 config->byte_count = 0x16; /* bytes in this struct */
978 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
979 config->direct_rx_dma = 0x1; /* reserved */
980 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
981 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
982 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
983 config->tx_underrun_retry = 0x3; /* # of underrun retries */
984 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
985 config->pad10 = 0x6;
986 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
987 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
988 config->ifs = 0x6; /* x16 = inter frame spacing */
989 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
990 config->pad15_1 = 0x1;
991 config->pad15_2 = 0x1;
992 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
993 config->fc_delay_hi = 0x40; /* time delay for fc frame */
994 config->tx_padding = 0x1; /* 1=pad short frames */
995 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
996 config->pad18 = 0x1;
997 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
998 config->pad20_1 = 0x1F;
999 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1000 config->pad21_1 = 0x5;
1001
1002 config->adaptive_ifs = nic->adaptive_ifs;
1003 config->loopback = nic->loopback;
1004
1005 if(nic->mii.force_media && nic->mii.full_duplex)
1006 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1007
1008 if(nic->flags & promiscuous || nic->loopback) {
1009 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1010 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1011 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1012 }
1013
1014 if(nic->flags & multicast_all)
1015 config->multicast_all = 0x1; /* 1=accept, 0=no */
1016
6bdacb1a
MC
1017 /* disable WoL when up */
1018 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1019 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1020
1021 if(nic->mac >= mac_82558_D101_A4) {
1022 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1023 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1024 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1025 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1026 if (nic->mac >= mac_82559_D101M) {
1da177e4 1027 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1028 /* Enable TCO in extended config */
1029 if (nic->mac >= mac_82551_10) {
1030 config->byte_count = 0x20; /* extended bytes */
1031 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1032 }
1033 } else {
1da177e4 1034 config->standard_stat_counter = 0x0;
44e4925e 1035 }
1da177e4
LT
1036 }
1037
1038 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1039 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1040 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1041 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1042 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1043 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1044}
1045
2afecc04
JB
1046/********************************************************/
1047/* Micro code for 8086:1229 Rev 8 */
1048/********************************************************/
1049
1050/* Parameter values for the D101M B-step */
1051#define D101M_CPUSAVER_TIMER_DWORD 78
1052#define D101M_CPUSAVER_BUNDLE_DWORD 65
1053#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1054
1055#define D101M_B_RCVBUNDLE_UCODE \
1056{\
10570x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10580x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10590x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10600x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10610x00380438, 0x00000000, 0x00140000, 0x00380555, \
10620x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10630x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10640x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10650x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10660x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10670x00000000, 0x00000000, 0x00000000, 0x00000000, \
10680x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10690x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10700x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10710x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10720x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10730x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10740x00000000, 0x00000000, 0x00000000, 0x00000000, \
10750x00000000, 0x00000000, 0x00000000, 0x00000000, \
10760x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10770x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10780x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10790x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10800x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10810x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10820x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10830x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10840x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10850x00380559, 0x00000000, 0x00000000, 0x00000000, \
10860x00000000, 0x00000000, 0x00000000, 0x00000000, \
10870x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10880x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10890x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1090}
1091
1092/********************************************************/
1093/* Micro code for 8086:1229 Rev 9 */
1094/********************************************************/
1095
1096/* Parameter values for the D101S */
1097#define D101S_CPUSAVER_TIMER_DWORD 78
1098#define D101S_CPUSAVER_BUNDLE_DWORD 67
1099#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1100
1101#define D101S_RCVBUNDLE_UCODE \
1102{\
11030x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11040x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11050x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11060x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11070x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11080x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11090x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11100x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11110x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11120x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11130x00000000, 0x00000000, 0x00000000, 0x00000000, \
11140x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11150x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11160x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11170x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11180x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11190x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11200x00101313, 0x00380700, 0x00000000, 0x00000000, \
11210x00000000, 0x00000000, 0x00000000, 0x00000000, \
11220x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11230x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11240x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11250x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11260x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11270x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11280x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11290x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11300x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11310x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00000000, \
11330x00000000, 0x00000000, 0x00000000, 0x00130831, \
11340x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11350x00041000, 0x00010004, 0x00380700 \
1136}
1137
1138/********************************************************/
1139/* Micro code for the 8086:1229 Rev F/10 */
1140/********************************************************/
1141
1142/* Parameter values for the D102 E-step */
1143#define D102_E_CPUSAVER_TIMER_DWORD 42
1144#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1145#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1146
1147#define D102_E_RCVBUNDLE_UCODE \
1148{\
11490x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11500x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11510x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11520x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11530x00000000, 0x00000000, 0x00000000, 0x00000000, \
11540x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11550x00000000, 0x00000000, 0x00000000, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00000000, 0x00000000, 0x00000000, 0x00000000, \
11580x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11590x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11600x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11610x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11620x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00000000, 0x00000000, 0x00000000, 0x00000000, \
11660x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11670x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11680x00000000, 0x00000000, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00000000, 0x00000000, 0x00000000, 0x00000000, \
11730x00000000, 0x00000000, 0x00000000, 0x00000000, \
11740x00000000, 0x00000000, 0x00000000, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
11810x00000000, 0x00000000, 0x00000000, 0x00000000, \
1182}
1183
24180333 1184static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1185{
2afecc04
JB
1186/* *INDENT-OFF* */
1187 static struct {
1188 u32 ucode[UCODE_SIZE + 1];
1189 u8 mac;
1190 u8 timer_dword;
1191 u8 bundle_dword;
1192 u8 min_size_dword;
1193 } ucode_opts[] = {
1194 { D101M_B_RCVBUNDLE_UCODE,
1195 mac_82559_D101M,
1196 D101M_CPUSAVER_TIMER_DWORD,
1197 D101M_CPUSAVER_BUNDLE_DWORD,
1198 D101M_CPUSAVER_MIN_SIZE_DWORD },
1199 { D101S_RCVBUNDLE_UCODE,
1200 mac_82559_D101S,
1201 D101S_CPUSAVER_TIMER_DWORD,
1202 D101S_CPUSAVER_BUNDLE_DWORD,
1203 D101S_CPUSAVER_MIN_SIZE_DWORD },
1204 { D102_E_RCVBUNDLE_UCODE,
1205 mac_82551_F,
1206 D102_E_CPUSAVER_TIMER_DWORD,
1207 D102_E_CPUSAVER_BUNDLE_DWORD,
1208 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1209 { D102_E_RCVBUNDLE_UCODE,
1210 mac_82551_10,
1211 D102_E_CPUSAVER_TIMER_DWORD,
1212 D102_E_CPUSAVER_BUNDLE_DWORD,
1213 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1214 { {0}, 0, 0, 0, 0}
1215 }, *opts;
1216/* *INDENT-ON* */
1217
1218/*************************************************************************
1219* CPUSaver parameters
1220*
1221* All CPUSaver parameters are 16-bit literals that are part of a
1222* "move immediate value" instruction. By changing the value of
1223* the literal in the instruction before the code is loaded, the
1224* driver can change the algorithm.
1225*
0779bf2d 1226* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1227* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1228* timer is reset each time a new packet is received. (see
1229* BUNDLEMAX below to set the limit on number of chained packets)
1230* The current default is 0x600 or 1536. Experiments show that
1231* the value should probably stay within the 0x200 - 0x1000.
1232*
05479938 1233* BUNDLEMAX -
2afecc04
JB
1234* This sets the maximum number of frames that will be bundled. In
1235* some situations, such as the TCP windowing algorithm, it may be
1236* better to limit the growth of the bundle size than let it go as
1237* high as it can, because that could cause too much added latency.
1238* The default is six, because this is the number of packets in the
1239* default TCP window size. A value of 1 would make CPUSaver indicate
1240* an interrupt for every frame received. If you do not want to put
1241* a limit on the bundle size, set this value to xFFFF.
1242*
05479938 1243* BUNDLESMALL -
2afecc04
JB
1244* This contains a bit-mask describing the minimum size frame that
1245* will be bundled. The default masks the lower 7 bits, which means
1246* that any frame less than 128 bytes in length will not be bundled,
1247* but will instead immediately generate an interrupt. This does
1248* not affect the current bundle in any way. Any frame that is 128
1249* bytes or large will be bundled normally. This feature is meant
1250* to provide immediate indication of ACK frames in a TCP environment.
1251* Customers were seeing poor performance when a machine with CPUSaver
1252* enabled was sending but not receiving. The delay introduced when
1253* the ACKs were received was enough to reduce total throughput, because
1254* the sender would sit idle until the ACK was finally seen.
1255*
1256* The current default is 0xFF80, which masks out the lower 7 bits.
1257* This means that any frame which is x7F (127) bytes or smaller
05479938 1258* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1259* bit mask, there are only a few valid values that can be used. To
1260* turn this feature off, the driver can write the value xFFFF to the
1261* lower word of this instruction (in the same way that the other
1262* parameters are used). Likewise, a value of 0xF800 (2047) would
1263* cause an interrupt to be generated for every frame, because all
1264* standard Ethernet frames are <= 2047 bytes in length.
1265*************************************************************************/
1266
05479938 1267/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1268 * workarounds it provides, set the following defines to:
1269 * BUNDLESMALL 0
1270 * BUNDLEMAX 1
1271 * INTDELAY 1
1272 */
1273#define BUNDLESMALL 1
1274#define BUNDLEMAX (u16)6
1275#define INTDELAY (u16)1536 /* 0x600 */
1276
1277 /* do not load u-code for ICH devices */
1278 if (nic->flags & ich)
1279 goto noloaducode;
1280
44c10138 1281 /* Search for ucode match against h/w revision */
2afecc04
JB
1282 for (opts = ucode_opts; opts->mac; opts++) {
1283 int i;
1284 u32 *ucode = opts->ucode;
1285 if (nic->mac != opts->mac)
1286 continue;
1287
1288 /* Insert user-tunable settings */
1289 ucode[opts->timer_dword] &= 0xFFFF0000;
1290 ucode[opts->timer_dword] |= INTDELAY;
1291 ucode[opts->bundle_dword] &= 0xFFFF0000;
1292 ucode[opts->bundle_dword] |= BUNDLEMAX;
1293 ucode[opts->min_size_dword] &= 0xFFFF0000;
1294 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1295
1296 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1297 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1298 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1299 return;
1300 }
1301
1302noloaducode:
24180333
JB
1303 cb->command = cpu_to_le16(cb_nop | cb_el);
1304}
1305
1306static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1307 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1308{
1309 int err = 0, counter = 50;
1310 struct cb *cb = nic->cb_to_clean;
1311
1312 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1313 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1314
24180333
JB
1315 /* must restart cuc */
1316 nic->cuc_cmd = cuc_start;
1317
1318 /* wait for completion */
1319 e100_write_flush(nic);
1320 udelay(10);
1321
1322 /* wait for possibly (ouch) 500ms */
1323 while (!(cb->status & cpu_to_le16(cb_complete))) {
1324 msleep(10);
1325 if (!--counter) break;
1326 }
05479938 1327
24180333 1328 /* ack any interupts, something could have been set */
27345bb6 1329 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1330
1331 /* if the command failed, or is not OK, notify and return */
1332 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1333 DPRINTK(PROBE,ERR, "ucode load failed\n");
1334 err = -EPERM;
1335 }
05479938 1336
24180333 1337 return err;
1da177e4
LT
1338}
1339
1340static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1341 struct sk_buff *skb)
1342{
1343 cb->command = cpu_to_le16(cb_iaaddr);
1344 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1345}
1346
1347static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1348{
1349 cb->command = cpu_to_le16(cb_dump);
1350 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1351 offsetof(struct mem, dump_buf));
1352}
1353
1354#define NCONFIG_AUTO_SWITCH 0x0080
1355#define MII_NSC_CONG MII_RESV1
1356#define NSC_CONG_ENABLE 0x0100
1357#define NSC_CONG_TXREADY 0x0400
1358#define ADVERTISE_FC_SUPPORTED 0x0400
1359static int e100_phy_init(struct nic *nic)
1360{
1361 struct net_device *netdev = nic->netdev;
1362 u32 addr;
1363 u16 bmcr, stat, id_lo, id_hi, cong;
1364
1365 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1366 for(addr = 0; addr < 32; addr++) {
1367 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1368 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1369 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1370 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1371 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1372 break;
1373 }
1374 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1375 if(addr == 32)
1376 return -EAGAIN;
1377
1378 /* Selected the phy and isolate the rest */
1379 for(addr = 0; addr < 32; addr++) {
1380 if(addr != nic->mii.phy_id) {
1381 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1382 } else {
1383 bmcr = mdio_read(netdev, addr, MII_BMCR);
1384 mdio_write(netdev, addr, MII_BMCR,
1385 bmcr & ~BMCR_ISOLATE);
1386 }
1387 }
1388
1389 /* Get phy ID */
1390 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1391 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1392 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1393 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1394
1395 /* Handle National tx phys */
1396#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1397 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1398 /* Disable congestion control */
1399 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1400 cong |= NSC_CONG_TXREADY;
1401 cong &= ~NSC_CONG_ENABLE;
1402 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1403 }
1404
05479938 1405 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1406 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1407 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1408 /* enable/disable MDI/MDI-X auto-switching. */
1409 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1410 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1411 }
1da177e4
LT
1412
1413 return 0;
1414}
1415
1416static int e100_hw_init(struct nic *nic)
1417{
1418 int err;
1419
1420 e100_hw_reset(nic);
1421
1422 DPRINTK(HW, ERR, "e100_hw_init\n");
1423 if(!in_interrupt() && (err = e100_self_test(nic)))
1424 return err;
1425
1426 if((err = e100_phy_init(nic)))
1427 return err;
1428 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1429 return err;
1430 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1431 return err;
24180333 1432 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1433 return err;
1434 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1435 return err;
1436 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1437 return err;
1438 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1439 nic->dma_addr + offsetof(struct mem, stats))))
1440 return err;
1441 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1442 return err;
1443
1444 e100_disable_irq(nic);
1445
1446 return 0;
1447}
1448
1449static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1450{
1451 struct net_device *netdev = nic->netdev;
1452 struct dev_mc_list *list = netdev->mc_list;
1453 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1454
1455 cb->command = cpu_to_le16(cb_multi);
1456 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1457 for(i = 0; list && i < count; i++, list = list->next)
1458 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1459 ETH_ALEN);
1460}
1461
1462static void e100_set_multicast_list(struct net_device *netdev)
1463{
1464 struct nic *nic = netdev_priv(netdev);
1465
1466 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1467 netdev->mc_count, netdev->flags);
1468
1469 if(netdev->flags & IFF_PROMISC)
1470 nic->flags |= promiscuous;
1471 else
1472 nic->flags &= ~promiscuous;
1473
1474 if(netdev->flags & IFF_ALLMULTI ||
1475 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1476 nic->flags |= multicast_all;
1477 else
1478 nic->flags &= ~multicast_all;
1479
1480 e100_exec_cb(nic, NULL, e100_configure);
1481 e100_exec_cb(nic, NULL, e100_multi);
1482}
1483
1484static void e100_update_stats(struct nic *nic)
1485{
1486 struct net_device_stats *ns = &nic->net_stats;
1487 struct stats *s = &nic->mem->stats;
1488 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1489 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1490 &s->complete;
1491
1492 /* Device's stats reporting may take several microseconds to
1493 * complete, so where always waiting for results of the
1494 * previous command. */
1495
1496 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1497 *complete = 0;
1498 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1499 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1500 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1501 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1502 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1503 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1504 ns->collisions += nic->tx_collisions;
1505 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1506 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1507 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1508 nic->rx_over_length_errors;
1509 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1510 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1511 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1512 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1513 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1514 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1515 le32_to_cpu(s->rx_alignment_errors) +
1516 le32_to_cpu(s->rx_short_frame_errors) +
1517 le32_to_cpu(s->rx_cdt_errors);
1518 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1519 nic->tx_single_collisions +=
1520 le32_to_cpu(s->tx_single_collisions);
1521 nic->tx_multiple_collisions +=
1522 le32_to_cpu(s->tx_multiple_collisions);
1523 if(nic->mac >= mac_82558_D101_A4) {
1524 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1525 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1526 nic->rx_fc_unsupported +=
1527 le32_to_cpu(s->fc_rcv_unsupported);
1528 if(nic->mac >= mac_82559_D101M) {
1529 nic->tx_tco_frames +=
1530 le16_to_cpu(s->xmt_tco_frames);
1531 nic->rx_tco_frames +=
1532 le16_to_cpu(s->rcv_tco_frames);
1533 }
1534 }
1535 }
1536
05479938 1537
1f53367d
MC
1538 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1539 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1540}
1541
1542static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1543{
1544 /* Adjust inter-frame-spacing (IFS) between two transmits if
1545 * we're getting collisions on a half-duplex connection. */
1546
1547 if(duplex == DUPLEX_HALF) {
1548 u32 prev = nic->adaptive_ifs;
1549 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1550
1551 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1552 (nic->tx_frames > min_frames)) {
1553 if(nic->adaptive_ifs < 60)
1554 nic->adaptive_ifs += 5;
1555 } else if (nic->tx_frames < min_frames) {
1556 if(nic->adaptive_ifs >= 5)
1557 nic->adaptive_ifs -= 5;
1558 }
1559 if(nic->adaptive_ifs != prev)
1560 e100_exec_cb(nic, NULL, e100_configure);
1561 }
1562}
1563
1564static void e100_watchdog(unsigned long data)
1565{
1566 struct nic *nic = (struct nic *)data;
1567 struct ethtool_cmd cmd;
1568
1569 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1570
1571 /* mii library handles link maintenance tasks */
1572
1573 mii_ethtool_gset(&nic->mii, &cmd);
1574
1575 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1576 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1577 cmd.speed == SPEED_100 ? "100" : "10",
1578 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1579 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1580 DPRINTK(LINK, INFO, "link down\n");
1581 }
1582
1583 mii_check_link(&nic->mii);
1584
1585 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1586 * allocation failure.
1587 * Unfortunately have to use a spinlock to not re-enable interrupts
1588 * accidentally, due to hardware that shares a register between the
1589 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1590 spin_lock_irq(&nic->cmd_lock);
27345bb6 1591 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1592 e100_write_flush(nic);
ad8c48ad 1593 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1594
1595 e100_update_stats(nic);
1596 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1597
1598 if(nic->mac <= mac_82557_D100_C)
1599 /* Issue a multicast command to workaround a 557 lock up */
1600 e100_set_multicast_list(nic->netdev);
1601
1602 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1603 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1604 nic->flags |= ich_10h_workaround;
1605 else
1606 nic->flags &= ~ich_10h_workaround;
1607
1608 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1609}
1610
858119e1 1611static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1612 struct sk_buff *skb)
1613{
1614 cb->command = nic->tx_command;
962082b6 1615 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1616 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1617 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1618 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1619 cb->u.tcb.tcb_byte_count = 0;
1620 cb->u.tcb.threshold = nic->tx_threshold;
1621 cb->u.tcb.tbd_count = 1;
1622 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1623 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1624 /* check for mapping failure? */
1da177e4
LT
1625 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1626}
1627
1628static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1629{
1630 struct nic *nic = netdev_priv(netdev);
1631 int err;
1632
1633 if(nic->flags & ich_10h_workaround) {
1634 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1635 Issue a NOP command followed by a 1us delay before
1636 issuing the Tx command. */
1f53367d
MC
1637 if(e100_exec_cmd(nic, cuc_nop, 0))
1638 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1639 udelay(1);
1640 }
1641
1642 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1643
1644 switch(err) {
1645 case -ENOSPC:
1646 /* We queued the skb, but now we're out of space. */
1647 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1648 netif_stop_queue(netdev);
1649 break;
1650 case -ENOMEM:
1651 /* This is a hard error - log it. */
1652 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1653 netif_stop_queue(netdev);
1654 return 1;
1655 }
1656
1657 netdev->trans_start = jiffies;
1658 return 0;
1659}
1660
858119e1 1661static int e100_tx_clean(struct nic *nic)
1da177e4
LT
1662{
1663 struct cb *cb;
1664 int tx_cleaned = 0;
1665
1666 spin_lock(&nic->cb_lock);
1667
1da177e4
LT
1668 /* Clean CBs marked complete */
1669 for(cb = nic->cb_to_clean;
1670 cb->status & cpu_to_le16(cb_complete);
1671 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1672 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1673 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1674 cb->status);
1675
1da177e4
LT
1676 if(likely(cb->skb != NULL)) {
1677 nic->net_stats.tx_packets++;
1678 nic->net_stats.tx_bytes += cb->skb->len;
1679
1680 pci_unmap_single(nic->pdev,
1681 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1682 le16_to_cpu(cb->u.tcb.tbd.size),
1683 PCI_DMA_TODEVICE);
1684 dev_kfree_skb_any(cb->skb);
1685 cb->skb = NULL;
1686 tx_cleaned = 1;
1687 }
1688 cb->status = 0;
1689 nic->cbs_avail++;
1690 }
1691
1692 spin_unlock(&nic->cb_lock);
1693
1694 /* Recover from running out of Tx resources in xmit_frame */
1695 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1696 netif_wake_queue(nic->netdev);
1697
1698 return tx_cleaned;
1699}
1700
1701static void e100_clean_cbs(struct nic *nic)
1702{
1703 if(nic->cbs) {
1704 while(nic->cbs_avail != nic->params.cbs.count) {
1705 struct cb *cb = nic->cb_to_clean;
1706 if(cb->skb) {
1707 pci_unmap_single(nic->pdev,
1708 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1709 le16_to_cpu(cb->u.tcb.tbd.size),
1710 PCI_DMA_TODEVICE);
1711 dev_kfree_skb(cb->skb);
1712 }
1713 nic->cb_to_clean = nic->cb_to_clean->next;
1714 nic->cbs_avail++;
1715 }
1716 pci_free_consistent(nic->pdev,
1717 sizeof(struct cb) * nic->params.cbs.count,
1718 nic->cbs, nic->cbs_dma_addr);
1719 nic->cbs = NULL;
1720 nic->cbs_avail = 0;
1721 }
1722 nic->cuc_cmd = cuc_start;
1723 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1724 nic->cbs;
1725}
1726
1727static int e100_alloc_cbs(struct nic *nic)
1728{
1729 struct cb *cb;
1730 unsigned int i, count = nic->params.cbs.count;
1731
1732 nic->cuc_cmd = cuc_start;
1733 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1734 nic->cbs_avail = 0;
1735
1736 nic->cbs = pci_alloc_consistent(nic->pdev,
1737 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1738 if(!nic->cbs)
1739 return -ENOMEM;
1740
1741 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1742 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1743 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1744
1745 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1746 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1747 ((i+1) % count) * sizeof(struct cb));
1748 cb->skb = NULL;
1749 }
1750
1751 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1752 nic->cbs_avail = count;
1753
1754 return 0;
1755}
1756
ca93ca42 1757static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1758{
ca93ca42
JG
1759 if(!nic->rxs) return;
1760 if(RU_SUSPENDED != nic->ru_running) return;
1761
1762 /* handle init time starts */
1763 if(!rx) rx = nic->rxs;
1764
1765 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1766 if(rx->skb) {
1767 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1768 nic->ru_running = RU_RUNNING;
1769 }
1da177e4
LT
1770}
1771
1772#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1773static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1774{
4187592b 1775 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1776 return -ENOMEM;
1777
1778 /* Align, init, and map the RFD. */
1da177e4 1779 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1780 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1781 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1782 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1783
1f53367d
MC
1784 if(pci_dma_mapping_error(rx->dma_addr)) {
1785 dev_kfree_skb_any(rx->skb);
097688ef 1786 rx->skb = NULL;
1f53367d
MC
1787 rx->dma_addr = 0;
1788 return -ENOMEM;
1789 }
1790
1da177e4
LT
1791 /* Link the RFD to end of RFA by linking previous RFD to
1792 * this one, and clearing EL bit of previous. */
1793 if(rx->prev->skb) {
1794 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1795 put_unaligned(cpu_to_le32(rx->dma_addr),
1796 (u32 *)&prev_rfd->link);
1797 wmb();
ca93ca42 1798 prev_rfd->command &= ~cpu_to_le16(cb_el);
1da177e4
LT
1799 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1800 sizeof(struct rfd), PCI_DMA_TODEVICE);
1801 }
1802
1803 return 0;
1804}
1805
858119e1 1806static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1807 unsigned int *work_done, unsigned int work_to_do)
1808{
1809 struct sk_buff *skb = rx->skb;
1810 struct rfd *rfd = (struct rfd *)skb->data;
1811 u16 rfd_status, actual_size;
1812
1813 if(unlikely(work_done && *work_done >= work_to_do))
1814 return -EAGAIN;
1815
1816 /* Need to sync before taking a peek at cb_complete bit */
1817 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1818 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1819 rfd_status = le16_to_cpu(rfd->status);
1820
1821 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1822
1823 /* If data isn't ready, nothing to indicate */
1824 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1825 return -ENODATA;
1da177e4
LT
1826
1827 /* Get actual data size */
1828 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1829 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1830 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1831
1832 /* Get data */
1833 pci_unmap_single(nic->pdev, rx->dma_addr,
1834 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1835
ca93ca42
JG
1836 /* this allows for a fast restart without re-enabling interrupts */
1837 if(le16_to_cpu(rfd->command) & cb_el)
1838 nic->ru_running = RU_SUSPENDED;
1839
1da177e4
LT
1840 /* Pull off the RFD and put the actual data (minus eth hdr) */
1841 skb_reserve(skb, sizeof(struct rfd));
1842 skb_put(skb, actual_size);
1843 skb->protocol = eth_type_trans(skb, nic->netdev);
1844
1845 if(unlikely(!(rfd_status & cb_ok))) {
1846 /* Don't indicate if hardware indicates errors */
1da177e4 1847 dev_kfree_skb_any(skb);
136df52d 1848 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1849 /* Don't indicate oversized frames */
1850 nic->rx_over_length_errors++;
1da177e4
LT
1851 dev_kfree_skb_any(skb);
1852 } else {
1853 nic->net_stats.rx_packets++;
1854 nic->net_stats.rx_bytes += actual_size;
1855 nic->netdev->last_rx = jiffies;
1856 netif_receive_skb(skb);
1857 if(work_done)
1858 (*work_done)++;
1859 }
1860
1861 rx->skb = NULL;
1862
1863 return 0;
1864}
1865
858119e1 1866static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1867 unsigned int work_to_do)
1868{
1869 struct rx *rx;
ca93ca42
JG
1870 int restart_required = 0;
1871 struct rx *rx_to_start = NULL;
1872
1873 /* are we already rnr? then pay attention!!! this ensures that
1874 * the state machine progression never allows a start with a
1875 * partially cleaned list, avoiding a race between hardware
1876 * and rx_to_clean when in NAPI mode */
1877 if(RU_SUSPENDED == nic->ru_running)
1878 restart_required = 1;
1da177e4
LT
1879
1880 /* Indicate newly arrived packets */
1881 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
ca93ca42
JG
1882 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1883 if(-EAGAIN == err) {
1884 /* hit quota so have more work to do, restart once
1885 * cleanup is complete */
1886 restart_required = 0;
1887 break;
1888 } else if(-ENODATA == err)
1da177e4
LT
1889 break; /* No more to clean */
1890 }
1891
ca93ca42
JG
1892 /* save our starting point as the place we'll restart the receiver */
1893 if(restart_required)
1894 rx_to_start = nic->rx_to_clean;
1895
1da177e4
LT
1896 /* Alloc new skbs to refill list */
1897 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1898 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1899 break; /* Better luck next time (see watchdog) */
1900 }
ca93ca42
JG
1901
1902 if(restart_required) {
1903 // ack the rnr?
1904 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1905 e100_start_receiver(nic, rx_to_start);
1906 if(work_done)
1907 (*work_done)++;
1908 }
1da177e4
LT
1909}
1910
1911static void e100_rx_clean_list(struct nic *nic)
1912{
1913 struct rx *rx;
1914 unsigned int i, count = nic->params.rfds.count;
1915
ca93ca42
JG
1916 nic->ru_running = RU_UNINITIALIZED;
1917
1da177e4
LT
1918 if(nic->rxs) {
1919 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1920 if(rx->skb) {
1921 pci_unmap_single(nic->pdev, rx->dma_addr,
1922 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1923 dev_kfree_skb(rx->skb);
1924 }
1925 }
1926 kfree(nic->rxs);
1927 nic->rxs = NULL;
1928 }
1929
1930 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1931}
1932
1933static int e100_rx_alloc_list(struct nic *nic)
1934{
1935 struct rx *rx;
1936 unsigned int i, count = nic->params.rfds.count;
1937
1938 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1939 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1940
c48e3fca 1941 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1942 return -ENOMEM;
1da177e4
LT
1943
1944 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1945 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1946 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1947 if(e100_rx_alloc_skb(nic, rx)) {
1948 e100_rx_clean_list(nic);
1949 return -ENOMEM;
1950 }
1951 }
1952
1953 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 1954 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1955
1956 return 0;
1957}
1958
7d12e780 1959static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1960{
1961 struct net_device *netdev = dev_id;
1962 struct nic *nic = netdev_priv(netdev);
27345bb6 1963 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
1964
1965 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1966
1967 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1968 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1969 return IRQ_NONE;
1970
1971 /* Ack interrupt(s) */
27345bb6 1972 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 1973
ca93ca42
JG
1974 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1975 if(stat_ack & stat_ack_rnr)
1976 nic->ru_running = RU_SUSPENDED;
1977
bea3348e 1978 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
0685c31b 1979 e100_disable_irq(nic);
bea3348e 1980 __netif_rx_schedule(netdev, &nic->napi);
0685c31b 1981 }
1da177e4
LT
1982
1983 return IRQ_HANDLED;
1984}
1985
bea3348e 1986static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 1987{
bea3348e
SH
1988 struct nic *nic = container_of(napi, struct nic, napi);
1989 struct net_device *netdev = nic->netdev;
1990 int work_done = 0;
1da177e4
LT
1991 int tx_cleaned;
1992
bea3348e 1993 e100_rx_clean(nic, &work_done, budget);
1da177e4
LT
1994 tx_cleaned = e100_tx_clean(nic);
1995
1996 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1997 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
bea3348e 1998 netif_rx_complete(netdev, napi);
1da177e4 1999 e100_enable_irq(nic);
1da177e4
LT
2000 }
2001
bea3348e 2002 return work_done;
1da177e4
LT
2003}
2004
2005#ifdef CONFIG_NET_POLL_CONTROLLER
2006static void e100_netpoll(struct net_device *netdev)
2007{
2008 struct nic *nic = netdev_priv(netdev);
611494dc 2009
1da177e4 2010 e100_disable_irq(nic);
7d12e780 2011 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2012 e100_tx_clean(nic);
2013 e100_enable_irq(nic);
2014}
2015#endif
2016
2017static struct net_device_stats *e100_get_stats(struct net_device *netdev)
2018{
2019 struct nic *nic = netdev_priv(netdev);
2020 return &nic->net_stats;
2021}
2022
2023static int e100_set_mac_address(struct net_device *netdev, void *p)
2024{
2025 struct nic *nic = netdev_priv(netdev);
2026 struct sockaddr *addr = p;
2027
2028 if (!is_valid_ether_addr(addr->sa_data))
2029 return -EADDRNOTAVAIL;
2030
2031 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2032 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2033
2034 return 0;
2035}
2036
2037static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2038{
2039 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2040 return -EINVAL;
2041 netdev->mtu = new_mtu;
2042 return 0;
2043}
2044
2045static int e100_asf(struct nic *nic)
2046{
2047 /* ASF can be enabled from eeprom */
2048 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2049 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2050 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2051 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2052}
2053
2054static int e100_up(struct nic *nic)
2055{
2056 int err;
2057
2058 if((err = e100_rx_alloc_list(nic)))
2059 return err;
2060 if((err = e100_alloc_cbs(nic)))
2061 goto err_rx_clean_list;
2062 if((err = e100_hw_init(nic)))
2063 goto err_clean_cbs;
2064 e100_set_multicast_list(nic->netdev);
ca93ca42 2065 e100_start_receiver(nic, NULL);
1da177e4 2066 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2067 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2068 nic->netdev->name, nic->netdev)))
2069 goto err_no_irq;
1da177e4 2070 netif_wake_queue(nic->netdev);
bea3348e 2071 napi_enable(&nic->napi);
0236ebb7
MC
2072 /* enable ints _after_ enabling poll, preventing a race between
2073 * disable ints+schedule */
2074 e100_enable_irq(nic);
1da177e4
LT
2075 return 0;
2076
2077err_no_irq:
2078 del_timer_sync(&nic->watchdog);
2079err_clean_cbs:
2080 e100_clean_cbs(nic);
2081err_rx_clean_list:
2082 e100_rx_clean_list(nic);
2083 return err;
2084}
2085
2086static void e100_down(struct nic *nic)
2087{
0236ebb7 2088 /* wait here for poll to complete */
bea3348e 2089 napi_disable(&nic->napi);
0236ebb7 2090 netif_stop_queue(nic->netdev);
1da177e4
LT
2091 e100_hw_reset(nic);
2092 free_irq(nic->pdev->irq, nic->netdev);
2093 del_timer_sync(&nic->watchdog);
2094 netif_carrier_off(nic->netdev);
1da177e4
LT
2095 e100_clean_cbs(nic);
2096 e100_rx_clean_list(nic);
2097}
2098
2099static void e100_tx_timeout(struct net_device *netdev)
2100{
2101 struct nic *nic = netdev_priv(netdev);
2102
05479938 2103 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2104 * in interrupt context */
2105 schedule_work(&nic->tx_timeout_task);
2106}
2107
c4028958 2108static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2109{
c4028958
DH
2110 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2111 struct net_device *netdev = nic->netdev;
2acdb1e0 2112
1da177e4 2113 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2114 ioread8(&nic->csr->scb.status));
1da177e4
LT
2115 e100_down(netdev_priv(netdev));
2116 e100_up(netdev_priv(netdev));
2117}
2118
2119static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2120{
2121 int err;
2122 struct sk_buff *skb;
2123
2124 /* Use driver resources to perform internal MAC or PHY
2125 * loopback test. A single packet is prepared and transmitted
2126 * in loopback mode, and the test passes if the received
2127 * packet compares byte-for-byte to the transmitted packet. */
2128
2129 if((err = e100_rx_alloc_list(nic)))
2130 return err;
2131 if((err = e100_alloc_cbs(nic)))
2132 goto err_clean_rx;
2133
2134 /* ICH PHY loopback is broken so do MAC loopback instead */
2135 if(nic->flags & ich && loopback_mode == lb_phy)
2136 loopback_mode = lb_mac;
2137
2138 nic->loopback = loopback_mode;
2139 if((err = e100_hw_init(nic)))
2140 goto err_loopback_none;
2141
2142 if(loopback_mode == lb_phy)
2143 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2144 BMCR_LOOPBACK);
2145
ca93ca42 2146 e100_start_receiver(nic, NULL);
1da177e4 2147
4187592b 2148 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2149 err = -ENOMEM;
2150 goto err_loopback_none;
2151 }
2152 skb_put(skb, ETH_DATA_LEN);
2153 memset(skb->data, 0xFF, ETH_DATA_LEN);
2154 e100_xmit_frame(skb, nic->netdev);
2155
2156 msleep(10);
2157
aa49cdd9
JB
2158 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2159 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2160
1da177e4
LT
2161 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2162 skb->data, ETH_DATA_LEN))
2163 err = -EAGAIN;
2164
2165err_loopback_none:
2166 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2167 nic->loopback = lb_none;
1da177e4 2168 e100_clean_cbs(nic);
aa49cdd9 2169 e100_hw_reset(nic);
1da177e4
LT
2170err_clean_rx:
2171 e100_rx_clean_list(nic);
2172 return err;
2173}
2174
2175#define MII_LED_CONTROL 0x1B
2176static void e100_blink_led(unsigned long data)
2177{
2178 struct nic *nic = (struct nic *)data;
2179 enum led_state {
2180 led_on = 0x01,
2181 led_off = 0x04,
2182 led_on_559 = 0x05,
2183 led_on_557 = 0x07,
2184 };
2185
2186 nic->leds = (nic->leds & led_on) ? led_off :
2187 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2188 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2189 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2190}
2191
2192static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2193{
2194 struct nic *nic = netdev_priv(netdev);
2195 return mii_ethtool_gset(&nic->mii, cmd);
2196}
2197
2198static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2199{
2200 struct nic *nic = netdev_priv(netdev);
2201 int err;
2202
2203 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2204 err = mii_ethtool_sset(&nic->mii, cmd);
2205 e100_exec_cb(nic, NULL, e100_configure);
2206
2207 return err;
2208}
2209
2210static void e100_get_drvinfo(struct net_device *netdev,
2211 struct ethtool_drvinfo *info)
2212{
2213 struct nic *nic = netdev_priv(netdev);
2214 strcpy(info->driver, DRV_NAME);
2215 strcpy(info->version, DRV_VERSION);
2216 strcpy(info->fw_version, "N/A");
2217 strcpy(info->bus_info, pci_name(nic->pdev));
2218}
2219
2220static int e100_get_regs_len(struct net_device *netdev)
2221{
2222 struct nic *nic = netdev_priv(netdev);
2223#define E100_PHY_REGS 0x1C
2224#define E100_REGS_LEN 1 + E100_PHY_REGS + \
2225 sizeof(nic->mem->dump_buf) / sizeof(u32)
2226 return E100_REGS_LEN * sizeof(u32);
2227}
2228
2229static void e100_get_regs(struct net_device *netdev,
2230 struct ethtool_regs *regs, void *p)
2231{
2232 struct nic *nic = netdev_priv(netdev);
2233 u32 *buff = p;
2234 int i;
2235
44c10138 2236 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2237 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2238 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2239 ioread16(&nic->csr->scb.status);
1da177e4
LT
2240 for(i = E100_PHY_REGS; i >= 0; i--)
2241 buff[1 + E100_PHY_REGS - i] =
2242 mdio_read(netdev, nic->mii.phy_id, i);
2243 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2244 e100_exec_cb(nic, NULL, e100_dump);
2245 msleep(10);
2246 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2247 sizeof(nic->mem->dump_buf));
2248}
2249
2250static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2251{
2252 struct nic *nic = netdev_priv(netdev);
2253 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2254 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2255}
2256
2257static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2258{
2259 struct nic *nic = netdev_priv(netdev);
2260
2261 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2262 return -EOPNOTSUPP;
2263
2264 if(wol->wolopts)
2265 nic->flags |= wol_magic;
2266 else
2267 nic->flags &= ~wol_magic;
2268
1da177e4
LT
2269 e100_exec_cb(nic, NULL, e100_configure);
2270
2271 return 0;
2272}
2273
2274static u32 e100_get_msglevel(struct net_device *netdev)
2275{
2276 struct nic *nic = netdev_priv(netdev);
2277 return nic->msg_enable;
2278}
2279
2280static void e100_set_msglevel(struct net_device *netdev, u32 value)
2281{
2282 struct nic *nic = netdev_priv(netdev);
2283 nic->msg_enable = value;
2284}
2285
2286static int e100_nway_reset(struct net_device *netdev)
2287{
2288 struct nic *nic = netdev_priv(netdev);
2289 return mii_nway_restart(&nic->mii);
2290}
2291
2292static u32 e100_get_link(struct net_device *netdev)
2293{
2294 struct nic *nic = netdev_priv(netdev);
2295 return mii_link_ok(&nic->mii);
2296}
2297
2298static int e100_get_eeprom_len(struct net_device *netdev)
2299{
2300 struct nic *nic = netdev_priv(netdev);
2301 return nic->eeprom_wc << 1;
2302}
2303
2304#define E100_EEPROM_MAGIC 0x1234
2305static int e100_get_eeprom(struct net_device *netdev,
2306 struct ethtool_eeprom *eeprom, u8 *bytes)
2307{
2308 struct nic *nic = netdev_priv(netdev);
2309
2310 eeprom->magic = E100_EEPROM_MAGIC;
2311 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2312
2313 return 0;
2314}
2315
2316static int e100_set_eeprom(struct net_device *netdev,
2317 struct ethtool_eeprom *eeprom, u8 *bytes)
2318{
2319 struct nic *nic = netdev_priv(netdev);
2320
2321 if(eeprom->magic != E100_EEPROM_MAGIC)
2322 return -EINVAL;
2323
2324 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2325
2326 return e100_eeprom_save(nic, eeprom->offset >> 1,
2327 (eeprom->len >> 1) + 1);
2328}
2329
2330static void e100_get_ringparam(struct net_device *netdev,
2331 struct ethtool_ringparam *ring)
2332{
2333 struct nic *nic = netdev_priv(netdev);
2334 struct param_range *rfds = &nic->params.rfds;
2335 struct param_range *cbs = &nic->params.cbs;
2336
2337 ring->rx_max_pending = rfds->max;
2338 ring->tx_max_pending = cbs->max;
2339 ring->rx_mini_max_pending = 0;
2340 ring->rx_jumbo_max_pending = 0;
2341 ring->rx_pending = rfds->count;
2342 ring->tx_pending = cbs->count;
2343 ring->rx_mini_pending = 0;
2344 ring->rx_jumbo_pending = 0;
2345}
2346
2347static int e100_set_ringparam(struct net_device *netdev,
2348 struct ethtool_ringparam *ring)
2349{
2350 struct nic *nic = netdev_priv(netdev);
2351 struct param_range *rfds = &nic->params.rfds;
2352 struct param_range *cbs = &nic->params.cbs;
2353
05479938 2354 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2355 return -EINVAL;
2356
2357 if(netif_running(netdev))
2358 e100_down(nic);
2359 rfds->count = max(ring->rx_pending, rfds->min);
2360 rfds->count = min(rfds->count, rfds->max);
2361 cbs->count = max(ring->tx_pending, cbs->min);
2362 cbs->count = min(cbs->count, cbs->max);
2363 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2364 rfds->count, cbs->count);
2365 if(netif_running(netdev))
2366 e100_up(nic);
2367
2368 return 0;
2369}
2370
2371static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2372 "Link test (on/offline)",
2373 "Eeprom test (on/offline)",
2374 "Self test (offline)",
2375 "Mac loopback (offline)",
2376 "Phy loopback (offline)",
2377};
2378#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2379
2380static int e100_diag_test_count(struct net_device *netdev)
2381{
2382 return E100_TEST_LEN;
2383}
2384
2385static void e100_diag_test(struct net_device *netdev,
2386 struct ethtool_test *test, u64 *data)
2387{
2388 struct ethtool_cmd cmd;
2389 struct nic *nic = netdev_priv(netdev);
2390 int i, err;
2391
2392 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2393 data[0] = !mii_link_ok(&nic->mii);
2394 data[1] = e100_eeprom_load(nic);
2395 if(test->flags & ETH_TEST_FL_OFFLINE) {
2396
2397 /* save speed, duplex & autoneg settings */
2398 err = mii_ethtool_gset(&nic->mii, &cmd);
2399
2400 if(netif_running(netdev))
2401 e100_down(nic);
2402 data[2] = e100_self_test(nic);
2403 data[3] = e100_loopback_test(nic, lb_mac);
2404 data[4] = e100_loopback_test(nic, lb_phy);
2405
2406 /* restore speed, duplex & autoneg settings */
2407 err = mii_ethtool_sset(&nic->mii, &cmd);
2408
2409 if(netif_running(netdev))
2410 e100_up(nic);
2411 }
2412 for(i = 0; i < E100_TEST_LEN; i++)
2413 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2414
2415 msleep_interruptible(4 * 1000);
1da177e4
LT
2416}
2417
2418static int e100_phys_id(struct net_device *netdev, u32 data)
2419{
2420 struct nic *nic = netdev_priv(netdev);
2421
2422 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2423 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2424 mod_timer(&nic->blink_timer, jiffies);
2425 msleep_interruptible(data * 1000);
2426 del_timer_sync(&nic->blink_timer);
2427 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2428
2429 return 0;
2430}
2431
2432static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2433 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2434 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2435 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2436 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2437 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2438 "tx_heartbeat_errors", "tx_window_errors",
2439 /* device-specific stats */
2440 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2441 "tx_flow_control_pause", "rx_flow_control_pause",
2442 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2443};
2444#define E100_NET_STATS_LEN 21
2445#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2446
2447static int e100_get_stats_count(struct net_device *netdev)
2448{
2449 return E100_STATS_LEN;
2450}
2451
2452static void e100_get_ethtool_stats(struct net_device *netdev,
2453 struct ethtool_stats *stats, u64 *data)
2454{
2455 struct nic *nic = netdev_priv(netdev);
2456 int i;
2457
2458 for(i = 0; i < E100_NET_STATS_LEN; i++)
2459 data[i] = ((unsigned long *)&nic->net_stats)[i];
2460
2461 data[i++] = nic->tx_deferred;
2462 data[i++] = nic->tx_single_collisions;
2463 data[i++] = nic->tx_multiple_collisions;
2464 data[i++] = nic->tx_fc_pause;
2465 data[i++] = nic->rx_fc_pause;
2466 data[i++] = nic->rx_fc_unsupported;
2467 data[i++] = nic->tx_tco_frames;
2468 data[i++] = nic->rx_tco_frames;
2469}
2470
2471static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2472{
2473 switch(stringset) {
2474 case ETH_SS_TEST:
2475 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2476 break;
2477 case ETH_SS_STATS:
2478 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2479 break;
2480 }
2481}
2482
7282d491 2483static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2484 .get_settings = e100_get_settings,
2485 .set_settings = e100_set_settings,
2486 .get_drvinfo = e100_get_drvinfo,
2487 .get_regs_len = e100_get_regs_len,
2488 .get_regs = e100_get_regs,
2489 .get_wol = e100_get_wol,
2490 .set_wol = e100_set_wol,
2491 .get_msglevel = e100_get_msglevel,
2492 .set_msglevel = e100_set_msglevel,
2493 .nway_reset = e100_nway_reset,
2494 .get_link = e100_get_link,
2495 .get_eeprom_len = e100_get_eeprom_len,
2496 .get_eeprom = e100_get_eeprom,
2497 .set_eeprom = e100_set_eeprom,
2498 .get_ringparam = e100_get_ringparam,
2499 .set_ringparam = e100_set_ringparam,
2500 .self_test_count = e100_diag_test_count,
2501 .self_test = e100_diag_test,
2502 .get_strings = e100_get_strings,
2503 .phys_id = e100_phys_id,
2504 .get_stats_count = e100_get_stats_count,
2505 .get_ethtool_stats = e100_get_ethtool_stats,
2506};
2507
2508static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2509{
2510 struct nic *nic = netdev_priv(netdev);
2511
2512 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2513}
2514
2515static int e100_alloc(struct nic *nic)
2516{
2517 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2518 &nic->dma_addr);
2519 return nic->mem ? 0 : -ENOMEM;
2520}
2521
2522static void e100_free(struct nic *nic)
2523{
2524 if(nic->mem) {
2525 pci_free_consistent(nic->pdev, sizeof(struct mem),
2526 nic->mem, nic->dma_addr);
2527 nic->mem = NULL;
2528 }
2529}
2530
2531static int e100_open(struct net_device *netdev)
2532{
2533 struct nic *nic = netdev_priv(netdev);
2534 int err = 0;
2535
2536 netif_carrier_off(netdev);
2537 if((err = e100_up(nic)))
2538 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2539 return err;
2540}
2541
2542static int e100_close(struct net_device *netdev)
2543{
2544 e100_down(netdev_priv(netdev));
2545 return 0;
2546}
2547
2548static int __devinit e100_probe(struct pci_dev *pdev,
2549 const struct pci_device_id *ent)
2550{
2551 struct net_device *netdev;
2552 struct nic *nic;
2553 int err;
2554
2555 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2556 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2557 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2558 return -ENOMEM;
2559 }
2560
2561 netdev->open = e100_open;
2562 netdev->stop = e100_close;
2563 netdev->hard_start_xmit = e100_xmit_frame;
2564 netdev->get_stats = e100_get_stats;
2565 netdev->set_multicast_list = e100_set_multicast_list;
2566 netdev->set_mac_address = e100_set_mac_address;
2567 netdev->change_mtu = e100_change_mtu;
2568 netdev->do_ioctl = e100_do_ioctl;
2569 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2570 netdev->tx_timeout = e100_tx_timeout;
2571 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
1da177e4
LT
2572#ifdef CONFIG_NET_POLL_CONTROLLER
2573 netdev->poll_controller = e100_netpoll;
2574#endif
0eb5a34c 2575 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2576
2577 nic = netdev_priv(netdev);
bea3348e 2578 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2579 nic->netdev = netdev;
2580 nic->pdev = pdev;
2581 nic->msg_enable = (1 << debug) - 1;
2582 pci_set_drvdata(pdev, netdev);
2583
2584 if((err = pci_enable_device(pdev))) {
2585 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2586 goto err_out_free_dev;
2587 }
2588
2589 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2590 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2591 "base address, aborting.\n");
2592 err = -ENODEV;
2593 goto err_out_disable_pdev;
2594 }
2595
2596 if((err = pci_request_regions(pdev, DRV_NAME))) {
2597 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2598 goto err_out_disable_pdev;
2599 }
2600
1e7f0bd8 2601 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2602 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2603 goto err_out_free_res;
2604 }
2605
2606 SET_MODULE_OWNER(netdev);
2607 SET_NETDEV_DEV(netdev, &pdev->dev);
2608
27345bb6
JB
2609 if (use_io)
2610 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2611
2612 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
1da177e4
LT
2613 if(!nic->csr) {
2614 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2615 err = -ENOMEM;
2616 goto err_out_free_res;
2617 }
2618
2619 if(ent->driver_data)
2620 nic->flags |= ich;
2621 else
2622 nic->flags &= ~ich;
2623
2624 e100_get_defaults(nic);
2625
1f53367d 2626 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2627 spin_lock_init(&nic->cb_lock);
2628 spin_lock_init(&nic->cmd_lock);
ac7c6669 2629 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2630
2631 /* Reset the device before pci_set_master() in case device is in some
2632 * funky state and has an interrupt pending - hint: we don't have the
2633 * interrupt handler registered yet. */
2634 e100_hw_reset(nic);
2635
2636 pci_set_master(pdev);
2637
2638 init_timer(&nic->watchdog);
2639 nic->watchdog.function = e100_watchdog;
2640 nic->watchdog.data = (unsigned long)nic;
2641 init_timer(&nic->blink_timer);
2642 nic->blink_timer.function = e100_blink_led;
2643 nic->blink_timer.data = (unsigned long)nic;
2644
c4028958 2645 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2646
1da177e4
LT
2647 if((err = e100_alloc(nic))) {
2648 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2649 goto err_out_iounmap;
2650 }
2651
1da177e4
LT
2652 if((err = e100_eeprom_load(nic)))
2653 goto err_out_free;
2654
f92d8728
MC
2655 e100_phy_init(nic);
2656
1da177e4 2657 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2658 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2659 if (!is_valid_ether_addr(netdev->perm_addr)) {
2660 if (!eeprom_bad_csum_allow) {
2661 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2662 "EEPROM, aborting.\n");
2663 err = -EAGAIN;
2664 goto err_out_free;
2665 } else {
2666 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2667 "you MUST configure one.\n");
2668 }
1da177e4
LT
2669 }
2670
2671 /* Wol magic packet can be enabled from eeprom */
2672 if((nic->mac >= mac_82558_D101_A4) &&
2673 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2674 nic->flags |= wol_magic;
2675
6bdacb1a 2676 /* ack any pending wake events, disable PME */
3435dbce
JB
2677 err = pci_enable_wake(pdev, 0, 0);
2678 if (err)
2679 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2680
2681 strcpy(netdev->name, "eth%d");
2682 if((err = register_netdev(netdev))) {
2683 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2684 goto err_out_free;
2685 }
2686
7c7459d1 2687 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
1da177e4 2688 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
27345bb6 2689 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq,
1da177e4
LT
2690 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2691 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2692
2693 return 0;
2694
2695err_out_free:
2696 e100_free(nic);
2697err_out_iounmap:
27345bb6 2698 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2699err_out_free_res:
2700 pci_release_regions(pdev);
2701err_out_disable_pdev:
2702 pci_disable_device(pdev);
2703err_out_free_dev:
2704 pci_set_drvdata(pdev, NULL);
2705 free_netdev(netdev);
2706 return err;
2707}
2708
2709static void __devexit e100_remove(struct pci_dev *pdev)
2710{
2711 struct net_device *netdev = pci_get_drvdata(pdev);
2712
2713 if(netdev) {
2714 struct nic *nic = netdev_priv(netdev);
2715 unregister_netdev(netdev);
2716 e100_free(nic);
2717 iounmap(nic->csr);
2718 free_netdev(netdev);
2719 pci_release_regions(pdev);
2720 pci_disable_device(pdev);
2721 pci_set_drvdata(pdev, NULL);
2722 }
2723}
2724
e8e82b76 2725#ifdef CONFIG_PM
1da177e4
LT
2726static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2727{
2728 struct net_device *netdev = pci_get_drvdata(pdev);
2729 struct nic *nic = netdev_priv(netdev);
2730
824545e7 2731 if (netif_running(netdev))
bea3348e 2732 napi_disable(&nic->napi);
e8e82b76
AK
2733 del_timer_sync(&nic->watchdog);
2734 netif_carrier_off(nic->netdev);
518d8338 2735 netif_device_detach(netdev);
a53a33da 2736
1da177e4 2737 pci_save_state(pdev);
e8e82b76
AK
2738
2739 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2740 pci_enable_wake(pdev, PCI_D3hot, 1);
2741 pci_enable_wake(pdev, PCI_D3cold, 1);
2742 } else {
2743 pci_enable_wake(pdev, PCI_D3hot, 0);
2744 pci_enable_wake(pdev, PCI_D3cold, 0);
2745 }
975b366a 2746
1da177e4 2747 pci_disable_device(pdev);
518d8338 2748 free_irq(pdev->irq, netdev);
e8e82b76 2749 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2750
2751 return 0;
2752}
2753
2754static int e100_resume(struct pci_dev *pdev)
2755{
2756 struct net_device *netdev = pci_get_drvdata(pdev);
2757 struct nic *nic = netdev_priv(netdev);
2758
975b366a 2759 pci_set_power_state(pdev, PCI_D0);
1da177e4 2760 pci_restore_state(pdev);
6bdacb1a 2761 /* ack any pending wake events, disable PME */
975b366a 2762 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2763
2764 netif_device_attach(netdev);
975b366a 2765 if (netif_running(netdev))
1da177e4
LT
2766 e100_up(nic);
2767
2768 return 0;
2769}
975b366a 2770#endif /* CONFIG_PM */
1da177e4 2771
d18c3db5 2772static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2773{
e8e82b76
AK
2774 struct net_device *netdev = pci_get_drvdata(pdev);
2775 struct nic *nic = netdev_priv(netdev);
2776
824545e7 2777 if (netif_running(netdev))
bea3348e 2778 napi_disable(&nic->napi);
e8e82b76
AK
2779 del_timer_sync(&nic->watchdog);
2780 netif_carrier_off(nic->netdev);
2781
2782 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2783 pci_enable_wake(pdev, PCI_D3hot, 1);
2784 pci_enable_wake(pdev, PCI_D3cold, 1);
2785 } else {
2786 pci_enable_wake(pdev, PCI_D3hot, 0);
2787 pci_enable_wake(pdev, PCI_D3cold, 0);
2788 }
2789
2790 pci_disable_device(pdev);
2791 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2792}
2793
2cc30492
AK
2794/* ------------------ PCI Error Recovery infrastructure -------------- */
2795/**
2796 * e100_io_error_detected - called when PCI error is detected.
2797 * @pdev: Pointer to PCI device
2798 * @state: The current pci conneection state
2799 */
2800static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2801{
2802 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2803 struct nic *nic = netdev_priv(netdev);
2cc30492
AK
2804
2805 /* Similar to calling e100_down(), but avoids adpater I/O. */
2806 netdev->stop(netdev);
2807
2808 /* Detach; put netif into state similar to hotplug unplug. */
bea3348e 2809 napi_enable(&nic->napi);
2cc30492 2810 netif_device_detach(netdev);
b1d26f24 2811 pci_disable_device(pdev);
2cc30492
AK
2812
2813 /* Request a slot reset. */
2814 return PCI_ERS_RESULT_NEED_RESET;
2815}
2816
2817/**
2818 * e100_io_slot_reset - called after the pci bus has been reset.
2819 * @pdev: Pointer to PCI device
2820 *
2821 * Restart the card from scratch.
2822 */
2823static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2824{
2825 struct net_device *netdev = pci_get_drvdata(pdev);
2826 struct nic *nic = netdev_priv(netdev);
2827
2828 if (pci_enable_device(pdev)) {
2829 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2830 return PCI_ERS_RESULT_DISCONNECT;
2831 }
2832 pci_set_master(pdev);
2833
2834 /* Only one device per card can do a reset */
2835 if (0 != PCI_FUNC(pdev->devfn))
2836 return PCI_ERS_RESULT_RECOVERED;
2837 e100_hw_reset(nic);
2838 e100_phy_init(nic);
2839
2840 return PCI_ERS_RESULT_RECOVERED;
2841}
2842
2843/**
2844 * e100_io_resume - resume normal operations
2845 * @pdev: Pointer to PCI device
2846 *
2847 * Resume normal operations after an error recovery
2848 * sequence has been completed.
2849 */
2850static void e100_io_resume(struct pci_dev *pdev)
2851{
2852 struct net_device *netdev = pci_get_drvdata(pdev);
2853 struct nic *nic = netdev_priv(netdev);
2854
2855 /* ack any pending wake events, disable PME */
2856 pci_enable_wake(pdev, 0, 0);
2857
2858 netif_device_attach(netdev);
2859 if (netif_running(netdev)) {
2860 e100_open(netdev);
2861 mod_timer(&nic->watchdog, jiffies);
2862 }
2863}
2864
2865static struct pci_error_handlers e100_err_handler = {
2866 .error_detected = e100_io_error_detected,
2867 .slot_reset = e100_io_slot_reset,
2868 .resume = e100_io_resume,
2869};
6bdacb1a 2870
1da177e4
LT
2871static struct pci_driver e100_driver = {
2872 .name = DRV_NAME,
2873 .id_table = e100_id_table,
2874 .probe = e100_probe,
2875 .remove = __devexit_p(e100_remove),
e8e82b76 2876#ifdef CONFIG_PM
975b366a 2877 /* Power Management hooks */
1da177e4
LT
2878 .suspend = e100_suspend,
2879 .resume = e100_resume,
2880#endif
05479938 2881 .shutdown = e100_shutdown,
2cc30492 2882 .err_handler = &e100_err_handler,
1da177e4
LT
2883};
2884
2885static int __init e100_init_module(void)
2886{
2887 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2888 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2889 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2890 }
29917620 2891 return pci_register_driver(&e100_driver);
1da177e4
LT
2892}
2893
2894static void __exit e100_cleanup_module(void)
2895{
2896 pci_unregister_driver(&e100_driver);
2897}
2898
2899module_init(e100_init_module);
2900module_exit(e100_cleanup_module);