]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/3c59x.c
e1000e: disable EEE support by default
[net-next-2.6.git] / drivers / net / 3c59x.c
CommitLineData
1da177e4
LT
1/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9 Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10 and the EtherLink XL 3c900 and 3c905 cards.
11
12 Problem reports and questions should be directed to
13 vortex@scyld.com
14
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
19
1da177e4
LT
20*/
21
22/*
23 * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
24 * as well as other drivers
25 *
26 * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
27 * due to dead code elimination. There will be some performance benefits from this due to
28 * elimination of all the tests and reduced cache footprint.
29 */
30
31
32#define DRV_NAME "3c59x"
1da177e4
LT
33
34
35
36/* A few values that may be tweaked. */
37/* Keep the ring sizes a power of two for efficiency. */
38#define TX_RING_SIZE 16
39#define RX_RING_SIZE 32
40#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
41
42/* "Knobs" that adjust features and parameters. */
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1512 effectively disables this feature. */
45#ifndef __arm__
46static int rx_copybreak = 200;
47#else
48/* ARM systems perform better by disregarding the bus-master
49 transfer capability of these cards. -- rmk */
50static int rx_copybreak = 1513;
51#endif
52/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
53static const int mtu = 1500;
54/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
55static int max_interrupt_work = 32;
56/* Tx timeout interval (millisecs) */
57static int watchdog = 5000;
58
59/* Allow aggregation of Tx interrupts. Saves CPU load at the cost
60 * of possible Tx stalls if the system is blocking interrupts
61 * somewhere else. Undefine this to disable.
62 */
63#define tx_interrupt_mitigation 1
64
65/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
66#define vortex_debug debug
67#ifdef VORTEX_DEBUG
68static int vortex_debug = VORTEX_DEBUG;
69#else
70static int vortex_debug = 1;
71#endif
72
1da177e4
LT
73#include <linux/module.h>
74#include <linux/kernel.h>
75#include <linux/string.h>
76#include <linux/timer.h>
77#include <linux/errno.h>
78#include <linux/in.h>
79#include <linux/ioport.h>
1da177e4
LT
80#include <linux/interrupt.h>
81#include <linux/pci.h>
82#include <linux/mii.h>
83#include <linux/init.h>
84#include <linux/netdevice.h>
85#include <linux/etherdevice.h>
86#include <linux/skbuff.h>
87#include <linux/ethtool.h>
88#include <linux/highmem.h>
89#include <linux/eisa.h>
90#include <linux/bitops.h>
ff5688ae 91#include <linux/jiffies.h>
5a0e3ad6 92#include <linux/gfp.h>
60e4ad7a 93#include <asm/irq.h> /* For nr_irqs only. */
1da177e4
LT
94#include <asm/io.h>
95#include <asm/uaccess.h>
96
97/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
98 This is only in the support-all-kernels source code. */
99
100#define RUN_AT(x) (jiffies + (x))
101
102#include <linux/delay.h>
103
104
86de79b6
SH
105static const char version[] __devinitconst =
106 DRV_NAME ": Donald Becker and others.\n";
1da177e4
LT
107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
61238602 109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
1da177e4 110MODULE_LICENSE("GPL");
1da177e4
LT
111
112
113/* Operational parameter that usually are not changed. */
114
115/* The Vortex size is twice that of the original EtherLinkIII series: the
116 runtime register window, window 1, is now always mapped in.
117 The Boomerang size is twice as large as the Vortex -- it has additional
118 bus master control registers. */
119#define VORTEX_TOTAL_SIZE 0x20
120#define BOOMERANG_TOTAL_SIZE 0x40
121
122/* Set iff a MII transceiver on any interface requires mdio preamble.
123 This only set with the original DP83840 on older 3c905 boards, so the extra
124 code size of a per-interface flag is not worthwhile. */
125static char mii_preamble_required;
126
127#define PFX DRV_NAME ": "
128
129
130
131/*
132 Theory of Operation
133
134I. Board Compatibility
135
136This device driver is designed for the 3Com FastEtherLink and FastEtherLink
137XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
138versions of the FastEtherLink cards. The supported product IDs are
139 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
140
141The related ISA 3c515 is supported with a separate driver, 3c515.c, included
142with the kernel source or available from
143 cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
144
145II. Board-specific settings
146
147PCI bus devices are configured by the system at boot time, so no jumpers
148need to be set on the board. The system BIOS should be set to assign the
149PCI INTA signal to an otherwise unused system IRQ line.
150
151The EEPROM settings for media type and forced-full-duplex are observed.
152The EEPROM media type should be left at the default "autoselect" unless using
15310base2 or AUI connections which cannot be reliably detected.
154
155III. Driver operation
156
157The 3c59x series use an interface that's very similar to the previous 3c5x9
158series. The primary interface is two programmed-I/O FIFOs, with an
159alternate single-contiguous-region bus-master transfer (see next).
160
161The 3c900 "Boomerang" series uses a full-bus-master interface with separate
162lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
163DEC Tulip and Intel Speedo3. The first chip version retains a compatible
164programmed-I/O interface that has been removed in 'B' and subsequent board
165revisions.
166
167One extension that is advertised in a very large font is that the adapters
168are capable of being bus masters. On the Vortex chip this capability was
169only for a single contiguous region making it far less useful than the full
170bus master capability. There is a significant performance impact of taking
171an extra interrupt or polling for the completion of each transfer, as well
172as difficulty sharing the single transfer engine between the transmit and
173receive threads. Using DMA transfers is a win only with large blocks or
174with the flawed versions of the Intel Orion motherboard PCI controller.
175
176The Boomerang chip's full-bus-master interface is useful, and has the
177currently-unused advantages over other similar chips that queued transmit
178packets may be reordered and receive buffer groups are associated with a
179single frame.
180
181With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
182Rather than a fixed intermediate receive buffer, this scheme allocates
183full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
184the copying breakpoint: it is chosen to trade-off the memory wasted by
185passing the full-sized skbuff to the queue layer for all frames vs. the
186copying cost of copying a frame to a correctly-sized skbuff.
187
188IIIC. Synchronization
189The driver runs as two independent, single-threaded flows of control. One
190is the send-packet routine, which enforces single-threaded use by the
191dev->tbusy flag. The other thread is the interrupt handler, which is single
192threaded by the hardware and other software.
193
194IV. Notes
195
196Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
1973c590, 3c595, and 3c900 boards.
198The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
199the EISA version is called "Demon". According to Terry these names come
200from rides at the local amusement park.
201
202The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
203This driver only supports ethernet packets because of the skbuff allocation
204limit of 4K.
205*/
206
207/* This table drives the PCI probe routines. It's mostly boilerplate in all
208 of the drivers, and will likely be provided by some future kernel.
209*/
210enum pci_flags_bit {
1f1bd5fc 211 PCI_USES_MASTER=4,
1da177e4
LT
212};
213
214enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
215 EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
216 HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
217 INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
218 EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
219 EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
220
221enum vortex_chips {
222 CH_3C590 = 0,
223 CH_3C592,
224 CH_3C597,
225 CH_3C595_1,
226 CH_3C595_2,
227
228 CH_3C595_3,
229 CH_3C900_1,
230 CH_3C900_2,
231 CH_3C900_3,
232 CH_3C900_4,
233
234 CH_3C900_5,
235 CH_3C900B_FL,
236 CH_3C905_1,
237 CH_3C905_2,
b4adbb4d 238 CH_3C905B_TX,
1da177e4
LT
239 CH_3C905B_1,
240
241 CH_3C905B_2,
242 CH_3C905B_FX,
243 CH_3C905C,
244 CH_3C9202,
245 CH_3C980,
246 CH_3C9805,
247
248 CH_3CSOHO100_TX,
249 CH_3C555,
250 CH_3C556,
251 CH_3C556B,
252 CH_3C575,
253
254 CH_3C575_1,
255 CH_3CCFE575,
256 CH_3CCFE575CT,
257 CH_3CCFE656,
258 CH_3CCFEM656,
259
260 CH_3CCFEM656_1,
261 CH_3C450,
262 CH_3C920,
263 CH_3C982A,
264 CH_3C982B,
265
266 CH_905BT4,
267 CH_920B_EMB_WNM,
268};
269
270
271/* note: this array directly indexed by above enums, and MUST
272 * be kept in sync with both the enums above, and the PCI device
273 * table below
274 */
275static struct vortex_chip_info {
276 const char *name;
277 int flags;
278 int drv_flags;
279 int io_size;
280} vortex_info_tbl[] __devinitdata = {
281 {"3c590 Vortex 10Mbps",
1f1bd5fc 282 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4 283 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
1f1bd5fc 284 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4 285 {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
1f1bd5fc 286 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4 287 {"3c595 Vortex 100baseTx",
1f1bd5fc 288 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4 289 {"3c595 Vortex 100baseT4",
1f1bd5fc 290 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4
LT
291
292 {"3c595 Vortex 100base-MII",
1f1bd5fc 293 PCI_USES_MASTER, IS_VORTEX, 32, },
1da177e4 294 {"3c900 Boomerang 10baseT",
1f1bd5fc 295 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
1da177e4 296 {"3c900 Boomerang 10Mbps Combo",
1f1bd5fc 297 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
1da177e4 298 {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
1f1bd5fc 299 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
1da177e4 300 {"3c900 Cyclone 10Mbps Combo",
1f1bd5fc 301 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
1da177e4
LT
302
303 {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
1f1bd5fc 304 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
1da177e4 305 {"3c900B-FL Cyclone 10base-FL",
1f1bd5fc 306 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
1da177e4 307 {"3c905 Boomerang 100baseTx",
1f1bd5fc 308 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
1da177e4 309 {"3c905 Boomerang 100baseT4",
1f1bd5fc 310 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
b4adbb4d
PT
311 {"3C905B-TX Fast Etherlink XL PCI",
312 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4 313 {"3c905B Cyclone 100baseTx",
1f1bd5fc 314 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4
LT
315
316 {"3c905B Cyclone 10/100/BNC",
1f1bd5fc 317 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
1da177e4 318 {"3c905B-FX Cyclone 100baseFx",
1f1bd5fc 319 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
1da177e4 320 {"3c905C Tornado",
1f1bd5fc 321 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4 322 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
1f1bd5fc 323 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
1da177e4 324 {"3c980 Cyclone",
aa807f79 325 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4
LT
326
327 {"3c980C Python-T",
1f1bd5fc 328 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
1da177e4 329 {"3cSOHO100-TX Hurricane",
b8a1fcee 330 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4 331 {"3c555 Laptop Hurricane",
1f1bd5fc 332 PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
1da177e4 333 {"3c556 Laptop Tornado",
1f1bd5fc 334 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
1da177e4
LT
335 HAS_HWCKSM, 128, },
336 {"3c556B Laptop Hurricane",
1f1bd5fc 337 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
1da177e4
LT
338 WNO_XCVR_PWR|HAS_HWCKSM, 128, },
339
340 {"3c575 [Megahertz] 10/100 LAN CardBus",
1f1bd5fc 341 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
1da177e4 342 {"3c575 Boomerang CardBus",
1f1bd5fc 343 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
1da177e4 344 {"3CCFE575BT Cyclone CardBus",
1f1bd5fc 345 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
1da177e4
LT
346 INVERT_LED_PWR|HAS_HWCKSM, 128, },
347 {"3CCFE575CT Tornado CardBus",
1f1bd5fc 348 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
1da177e4
LT
349 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
350 {"3CCFE656 Cyclone CardBus",
1f1bd5fc 351 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
1da177e4
LT
352 INVERT_LED_PWR|HAS_HWCKSM, 128, },
353
354 {"3CCFEM656B Cyclone+Winmodem CardBus",
1f1bd5fc 355 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
1da177e4
LT
356 INVERT_LED_PWR|HAS_HWCKSM, 128, },
357 {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
1f1bd5fc 358 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
1da177e4
LT
359 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
360 {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
1f1bd5fc 361 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
1da177e4 362 {"3c920 Tornado",
1f1bd5fc 363 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
1da177e4 364 {"3c982 Hydra Dual Port A",
1f1bd5fc 365 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
1da177e4
LT
366
367 {"3c982 Hydra Dual Port B",
1f1bd5fc 368 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
1da177e4 369 {"3c905B-T4",
1f1bd5fc 370 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
1da177e4 371 {"3c920B-EMB-WNM Tornado",
1f1bd5fc 372 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
1da177e4
LT
373
374 {NULL,}, /* NULL terminated list. */
375};
376
377
a3aa1884 378static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
1da177e4
LT
379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
382 { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
383 { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
384
385 { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
386 { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
387 { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
388 { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
389 { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
390
391 { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
392 { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
393 { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
394 { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
b4adbb4d 395 { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
1da177e4
LT
396 { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
397
398 { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
399 { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
400 { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
401 { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
402 { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
403 { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
404
405 { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
406 { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
407 { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
408 { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
409 { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
410
411 { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
412 { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
413 { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
414 { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
415 { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
416
417 { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
418 { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
419 { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
420 { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
421 { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
422
423 { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
424 { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
425
426 {0,} /* 0 terminated list. */
427};
428MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
429
430
431/* Operational definitions.
432 These are not used by other compilation units and thus are not
433 exported in a ".h" file.
434
435 First the windows. There are eight register windows, with the command
436 and status registers available in each.
437 */
1da177e4
LT
438#define EL3_CMD 0x0e
439#define EL3_STATUS 0x0e
440
441/* The top five bits written to EL3_CMD are a command, the lower
442 11 bits are the parameter, if applicable.
443 Note that 11 parameters bits was fine for ethernet, but the new chip
444 can handle FDDI length frames (~4500 octets) and now parameters count
445 32-bit 'Dwords' rather than octets. */
446
447enum vortex_cmd {
448 TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
449 RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
450 UpStall = 6<<11, UpUnstall = (6<<11)+1,
451 DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
452 RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
453 FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
454 SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
455 SetTxThreshold = 18<<11, SetTxStart = 19<<11,
456 StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
457 StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
458
459/* The SetRxFilter command accepts the following classes: */
460enum RxFilter {
461 RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
462
463/* Bits in the general status register. */
464enum vortex_status {
465 IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
466 TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
467 IntReq = 0x0040, StatsFull = 0x0080,
468 DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
469 DMAInProgress = 1<<11, /* DMA controller is still busy.*/
470 CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
471};
472
473/* Register window 1 offsets, the window used in normal operation.
474 On the Vortex this window is always mapped at offsets 0x10-0x1f. */
475enum Window1 {
476 TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
477 RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
478 TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
479};
480enum Window0 {
481 Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
482 Wn0EepromData = 12, /* Window 0: EEPROM results register. */
483 IntrStatus=0x0E, /* Valid in all windows. */
484};
485enum Win0_EEPROM_bits {
486 EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
487 EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
488 EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
489};
490/* EEPROM locations. */
491enum eeprom_offset {
492 PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
493 EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
494 NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
495 DriverTune=13, Checksum=15};
496
497enum Window2 { /* Window 2. */
498 Wn2_ResetOptions=12,
499};
500enum Window3 { /* Window 3: MAC/config bits. */
501 Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
502};
503
504#define BFEXT(value, offset, bitcount) \
505 ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
506
507#define BFINS(lhs, rhs, offset, bitcount) \
508 (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
509 (((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
510
511#define RAM_SIZE(v) BFEXT(v, 0, 3)
512#define RAM_WIDTH(v) BFEXT(v, 3, 1)
513#define RAM_SPEED(v) BFEXT(v, 4, 2)
514#define ROM_SIZE(v) BFEXT(v, 6, 2)
515#define RAM_SPLIT(v) BFEXT(v, 16, 2)
516#define XCVR(v) BFEXT(v, 20, 4)
517#define AUTOSELECT(v) BFEXT(v, 24, 1)
518
519enum Window4 { /* Window 4: Xcvr/media bits. */
520 Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
521};
522enum Win4_Media_bits {
523 Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
524 Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
525 Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
526 Media_LnkBeat = 0x0800,
527};
528enum Window7 { /* Window 7: Bus Master control. */
529 Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
530 Wn7_MasterStatus = 12,
531};
532/* Boomerang bus master control registers. */
533enum MasterCtrl {
534 PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
535 TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
536};
537
538/* The Rx and Tx descriptor lists.
539 Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
540 alignment contraint on tx_ring[] and rx_ring[]. */
541#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
542#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
543struct boom_rx_desc {
cc2d6596
AV
544 __le32 next; /* Last entry points to 0. */
545 __le32 status;
546 __le32 addr; /* Up to 63 addr/len pairs possible. */
547 __le32 length; /* Set LAST_FRAG to indicate last pair. */
1da177e4
LT
548};
549/* Values for the Rx status entry. */
550enum rx_desc_status {
551 RxDComplete=0x00008000, RxDError=0x4000,
552 /* See boomerang_rx() for actual error bits */
553 IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
554 IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
555};
556
557#ifdef MAX_SKB_FRAGS
558#define DO_ZEROCOPY 1
559#else
560#define DO_ZEROCOPY 0
561#endif
562
563struct boom_tx_desc {
cc2d6596
AV
564 __le32 next; /* Last entry points to 0. */
565 __le32 status; /* bits 0:12 length, others see below. */
1da177e4
LT
566#if DO_ZEROCOPY
567 struct {
cc2d6596
AV
568 __le32 addr;
569 __le32 length;
1da177e4
LT
570 } frag[1+MAX_SKB_FRAGS];
571#else
cc2d6596
AV
572 __le32 addr;
573 __le32 length;
1da177e4
LT
574#endif
575};
576
577/* Values for the Tx status entry. */
578enum tx_desc_status {
579 CRCDisable=0x2000, TxDComplete=0x8000,
580 AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
581 TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
582};
583
584/* Chip features we care about in vp->capabilities, read from the EEPROM. */
585enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
586
587struct vortex_extra_stats {
8d1d0340
SK
588 unsigned long tx_deferred;
589 unsigned long tx_max_collisions;
590 unsigned long tx_multiple_collisions;
591 unsigned long tx_single_collisions;
592 unsigned long rx_bad_ssd;
1da177e4
LT
593};
594
595struct vortex_private {
596 /* The Rx and Tx rings should be quad-word-aligned. */
597 struct boom_rx_desc* rx_ring;
598 struct boom_tx_desc* tx_ring;
599 dma_addr_t rx_ring_dma;
600 dma_addr_t tx_ring_dma;
601 /* The addresses of transmit- and receive-in-place skbuffs. */
602 struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 unsigned int cur_rx, cur_tx; /* The next free ring entry */
605 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
1da177e4
LT
606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
609
610 /* PCI configuration space information. */
611 struct device *gendev;
62afe595
JL
612 void __iomem *ioaddr; /* IO address space */
613 void __iomem *cb_fn_base; /* CardBus function status addr space. */
1da177e4
LT
614
615 /* Some values here only for performance evaluation and path-coverage */
616 int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
617 int card_idx;
618
619 /* The remainder are related to chip state, mostly media selection. */
620 struct timer_list timer; /* Media selection timer. */
621 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
622 int options; /* User-settable misc. driver options. */
623 unsigned int media_override:4, /* Passed-in media type. */
624 default_media:4, /* Read from the EEPROM/Wn3_Config. */
09ce3512 625 full_duplex:1, autoselect:1,
1da177e4
LT
626 bus_master:1, /* Vortex can only do a fragment bus-m. */
627 full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
628 flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
629 partner_flow_ctrl:1, /* Partner supports flow control */
630 has_nway:1,
631 enable_wol:1, /* Wake-on-LAN is enabled */
632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
633 open:1,
634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1; /* accept large frames */
637 int drv_flags;
638 u16 status_enable;
639 u16 intr_enable;
640 u16 available_media; /* From Wn3_Options. */
641 u16 capabilities, info1, info2; /* Various, from EEPROM. */
642 u16 advertising; /* NWay media advertisement */
643 unsigned char phys[2]; /* MII device addresses. */
644 u16 deferred; /* Resend these interrupts when we
645 * bale from the ISR */
646 u16 io_size; /* Size of PCI region (for release_region) */
647 spinlock_t lock; /* Serialise access to device & its vortex_private */
648 struct mii_if_info mii; /* MII lib hooks/info */
a095cfc4 649 int window; /* Register window */
1da177e4
LT
650};
651
a095cfc4
BH
652static void window_set(struct vortex_private *vp, int window)
653{
654 if (window != vp->window) {
655 iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
656 vp->window = window;
657 }
658}
659
660#define DEFINE_WINDOW_IO(size) \
661static u ## size \
662window_read ## size(struct vortex_private *vp, int window, int addr) \
663{ \
664 window_set(vp, window); \
665 return ioread ## size(vp->ioaddr + addr); \
666} \
667static void \
668window_write ## size(struct vortex_private *vp, u ## size value, \
669 int window, int addr) \
670{ \
671 window_set(vp, window); \
672 iowrite ## size(value, vp->ioaddr + addr); \
673}
674DEFINE_WINDOW_IO(8)
675DEFINE_WINDOW_IO(16)
676DEFINE_WINDOW_IO(32)
677
1da177e4
LT
678#ifdef CONFIG_PCI
679#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
680#else
681#define DEVICE_PCI(dev) NULL
682#endif
683
684#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
685
686#ifdef CONFIG_EISA
687#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
688#else
689#define DEVICE_EISA(dev) NULL
690#endif
691
692#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
693
694/* The action to take with a media selection timer tick.
695 Note that we deviate from the 3Com order by checking 10base2 before AUI.
696 */
697enum xcvr_types {
698 XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
699 XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
700};
701
f71e1309 702static const struct media_table {
1da177e4
LT
703 char *name;
704 unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
705 mask:8, /* The transceiver-present bit in Wn3_Config.*/
706 next:8; /* The media type to try next. */
707 int wait; /* Time before we check media status. */
708} media_tbl[] = {
709 { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
710 { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
711 { "undefined", 0, 0x80, XCVR_10baseT, 10000},
712 { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
713 { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
714 { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
715 { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
716 { "undefined", 0, 0x01, XCVR_10baseT, 10000},
717 { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
718 { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
719 { "Default", 0, 0xFF, XCVR_10baseT, 10000},
720};
721
722static struct {
723 const char str[ETH_GSTRING_LEN];
724} ethtool_stats_keys[] = {
725 { "tx_deferred" },
8d1d0340 726 { "tx_max_collisions" },
1da177e4 727 { "tx_multiple_collisions" },
8d1d0340 728 { "tx_single_collisions" },
1da177e4
LT
729 { "rx_bad_ssd" },
730};
731
732/* number of ETHTOOL_GSTATS u64's */
8d1d0340 733#define VORTEX_NUM_STATS 5
1da177e4 734
62afe595 735static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1da177e4 736 int chip_idx, int card_idx);
c8303d10 737static int vortex_up(struct net_device *dev);
1da177e4
LT
738static void vortex_down(struct net_device *dev, int final);
739static int vortex_open(struct net_device *dev);
a095cfc4 740static void mdio_sync(struct vortex_private *vp, int bits);
1da177e4
LT
741static int mdio_read(struct net_device *dev, int phy_id, int location);
742static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
743static void vortex_timer(unsigned long arg);
744static void rx_oom_timer(unsigned long arg);
27a1de95
SH
745static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
746 struct net_device *dev);
747static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
748 struct net_device *dev);
1da177e4
LT
749static int vortex_rx(struct net_device *dev);
750static int boomerang_rx(struct net_device *dev);
7d12e780
DH
751static irqreturn_t vortex_interrupt(int irq, void *dev_id);
752static irqreturn_t boomerang_interrupt(int irq, void *dev_id);
1da177e4
LT
753static int vortex_close(struct net_device *dev);
754static void dump_tx_ring(struct net_device *dev);
62afe595 755static void update_stats(void __iomem *ioaddr, struct net_device *dev);
1da177e4
LT
756static struct net_device_stats *vortex_get_stats(struct net_device *dev);
757static void set_rx_mode(struct net_device *dev);
758#ifdef CONFIG_PCI
759static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
760#endif
761static void vortex_tx_timeout(struct net_device *dev);
762static void acpi_set_WOL(struct net_device *dev);
7282d491 763static const struct ethtool_ops vortex_ethtool_ops;
1da177e4
LT
764static void set_8021q_mode(struct net_device *dev, int enable);
765
1da177e4
LT
766/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
767/* Option count limit only -- unlimited interfaces are supported. */
768#define MAX_UNITS 8
9954ab7f
JL
769static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
770static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
771static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
772static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
773static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
900fd17d 774static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
1da177e4
LT
775static int global_options = -1;
776static int global_full_duplex = -1;
777static int global_enable_wol = -1;
900fd17d 778static int global_use_mmio = -1;
1da177e4 779
1da177e4
LT
780/* Variables to work-around the Compaq PCI BIOS32 problem. */
781static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
782static struct net_device *compaq_net_device;
783
784static int vortex_cards_found;
785
786module_param(debug, int, 0);
787module_param(global_options, int, 0);
788module_param_array(options, int, NULL, 0);
789module_param(global_full_duplex, int, 0);
790module_param_array(full_duplex, int, NULL, 0);
791module_param_array(hw_checksums, int, NULL, 0);
792module_param_array(flow_ctrl, int, NULL, 0);
793module_param(global_enable_wol, int, 0);
794module_param_array(enable_wol, int, NULL, 0);
795module_param(rx_copybreak, int, 0);
796module_param(max_interrupt_work, int, 0);
797module_param(compaq_ioaddr, int, 0);
798module_param(compaq_irq, int, 0);
799module_param(compaq_device_id, int, 0);
800module_param(watchdog, int, 0);
900fd17d
JL
801module_param(global_use_mmio, int, 0);
802module_param_array(use_mmio, int, NULL, 0);
1da177e4
LT
803MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
804MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
805MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
806MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
46e5e4a8 807MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
1da177e4
LT
808MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
809MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
810MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
46e5e4a8 811MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
1da177e4
LT
812MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
813MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
814MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
815MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
816MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
817MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
900fd17d
JL
818MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
819MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
1da177e4
LT
820
821#ifdef CONFIG_NET_POLL_CONTROLLER
822static void poll_vortex(struct net_device *dev)
823{
824 struct vortex_private *vp = netdev_priv(dev);
825 unsigned long flags;
0d38ff1d 826 local_irq_save(flags);
7d12e780 827 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
1da177e4 828 local_irq_restore(flags);
6aa20a22 829}
1da177e4
LT
830#endif
831
832#ifdef CONFIG_PM
833
7bfc4ab5 834static int vortex_suspend(struct device *dev)
1da177e4 835{
7bfc4ab5
AV
836 struct pci_dev *pdev = to_pci_dev(dev);
837 struct net_device *ndev = pci_get_drvdata(pdev);
838
839 if (!ndev || !netif_running(ndev))
840 return 0;
841
842 netif_device_detach(ndev);
843 vortex_down(ndev, 1);
1da177e4 844
1da177e4
LT
845 return 0;
846}
847
7bfc4ab5 848static int vortex_resume(struct device *dev)
1da177e4 849{
7bfc4ab5
AV
850 struct pci_dev *pdev = to_pci_dev(dev);
851 struct net_device *ndev = pci_get_drvdata(pdev);
e1265153 852 int err;
1da177e4 853
7bfc4ab5
AV
854 if (!ndev || !netif_running(ndev))
855 return 0;
856
857 err = vortex_up(ndev);
858 if (err)
859 return err;
860
861 netif_device_attach(ndev);
862
1da177e4
LT
863 return 0;
864}
865
47145210 866static const struct dev_pm_ops vortex_pm_ops = {
7bfc4ab5
AV
867 .suspend = vortex_suspend,
868 .resume = vortex_resume,
869 .freeze = vortex_suspend,
870 .thaw = vortex_resume,
871 .poweroff = vortex_suspend,
872 .restore = vortex_resume,
873};
874
875#define VORTEX_PM_OPS (&vortex_pm_ops)
876
877#else /* !CONFIG_PM */
878
879#define VORTEX_PM_OPS NULL
880
881#endif /* !CONFIG_PM */
1da177e4
LT
882
883#ifdef CONFIG_EISA
884static struct eisa_device_id vortex_eisa_ids[] = {
885 { "TCM5920", CH_3C592 },
886 { "TCM5970", CH_3C597 },
887 { "" }
888};
07563c71 889MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
1da177e4 890
95c408a9 891static int __init vortex_eisa_probe(struct device *device)
1da177e4 892{
62afe595 893 void __iomem *ioaddr;
1da177e4
LT
894 struct eisa_device *edev;
895
a880c4cd 896 edev = to_eisa_device(device);
1da177e4 897
62afe595 898 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
1da177e4
LT
899 return -EBUSY;
900
62afe595
JL
901 ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
902
903 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
1da177e4 904 edev->id.driver_data, vortex_cards_found)) {
a880c4cd 905 release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
1da177e4
LT
906 return -ENODEV;
907 }
908
909 vortex_cards_found++;
910
911 return 0;
912}
913
95c408a9 914static int __devexit vortex_eisa_remove(struct device *device)
1da177e4
LT
915{
916 struct eisa_device *edev;
917 struct net_device *dev;
918 struct vortex_private *vp;
62afe595 919 void __iomem *ioaddr;
1da177e4 920
a880c4cd
SK
921 edev = to_eisa_device(device);
922 dev = eisa_get_drvdata(edev);
1da177e4
LT
923
924 if (!dev) {
39738e16 925 pr_err("vortex_eisa_remove called for Compaq device!\n");
1da177e4
LT
926 BUG();
927 }
928
929 vp = netdev_priv(dev);
62afe595 930 ioaddr = vp->ioaddr;
6aa20a22 931
a880c4cd
SK
932 unregister_netdev(dev);
933 iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
934 release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
1da177e4 935
a880c4cd 936 free_netdev(dev);
1da177e4
LT
937 return 0;
938}
95c408a9
RB
939
940static struct eisa_driver vortex_eisa_driver = {
941 .id_table = vortex_eisa_ids,
942 .driver = {
943 .name = "3c59x",
944 .probe = vortex_eisa_probe,
945 .remove = __devexit_p(vortex_eisa_remove)
946 }
947};
948
949#endif /* CONFIG_EISA */
1da177e4
LT
950
951/* returns count found (>= 0), or negative on error */
a880c4cd 952static int __init vortex_eisa_init(void)
1da177e4
LT
953{
954 int eisa_found = 0;
955 int orig_cards_found = vortex_cards_found;
956
957#ifdef CONFIG_EISA
c2f6fabb
BH
958 int err;
959
960 err = eisa_driver_register (&vortex_eisa_driver);
961 if (!err) {
962 /*
963 * Because of the way EISA bus is probed, we cannot assume
964 * any device have been found when we exit from
965 * eisa_driver_register (the bus root driver may not be
966 * initialized yet). So we blindly assume something was
967 * found, and let the sysfs magic happend...
968 */
969 eisa_found = 1;
1da177e4
LT
970 }
971#endif
6aa20a22 972
1da177e4
LT
973 /* Special code to work-around the Compaq PCI BIOS32 problem. */
974 if (compaq_ioaddr) {
62afe595
JL
975 vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
976 compaq_irq, compaq_device_id, vortex_cards_found++);
1da177e4
LT
977 }
978
979 return vortex_cards_found - orig_cards_found + eisa_found;
980}
981
982/* returns count (>= 0), or negative on error */
a880c4cd 983static int __devinit vortex_init_one(struct pci_dev *pdev,
1da177e4
LT
984 const struct pci_device_id *ent)
985{
900fd17d
JL
986 int rc, unit, pci_bar;
987 struct vortex_chip_info *vci;
988 void __iomem *ioaddr;
1da177e4 989
6aa20a22 990 /* wake up and enable device */
a880c4cd 991 rc = pci_enable_device(pdev);
1da177e4
LT
992 if (rc < 0)
993 goto out;
994
900fd17d
JL
995 unit = vortex_cards_found;
996
997 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
998 /* Determine the default if the user didn't override us */
999 vci = &vortex_info_tbl[ent->driver_data];
1000 pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
1001 } else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
1002 pci_bar = use_mmio[unit] ? 1 : 0;
1003 else
1004 pci_bar = global_use_mmio ? 1 : 0;
1005
1006 ioaddr = pci_iomap(pdev, pci_bar, 0);
1007 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1008 ioaddr = pci_iomap(pdev, 0, 0);
1009
1010 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1011 ent->driver_data, unit);
1da177e4 1012 if (rc < 0) {
a880c4cd 1013 pci_disable_device(pdev);
1da177e4
LT
1014 goto out;
1015 }
1016
1017 vortex_cards_found++;
1018
1019out:
1020 return rc;
1021}
1022
48b47a5e
SH
1023static const struct net_device_ops boomrang_netdev_ops = {
1024 .ndo_open = vortex_open,
1025 .ndo_stop = vortex_close,
1026 .ndo_start_xmit = boomerang_start_xmit,
1027 .ndo_tx_timeout = vortex_tx_timeout,
1028 .ndo_get_stats = vortex_get_stats,
1029#ifdef CONFIG_PCI
1030 .ndo_do_ioctl = vortex_ioctl,
1031#endif
1032 .ndo_set_multicast_list = set_rx_mode,
1033 .ndo_change_mtu = eth_change_mtu,
1034 .ndo_set_mac_address = eth_mac_addr,
1035 .ndo_validate_addr = eth_validate_addr,
1036#ifdef CONFIG_NET_POLL_CONTROLLER
1037 .ndo_poll_controller = poll_vortex,
1038#endif
1039};
1040
1041static const struct net_device_ops vortex_netdev_ops = {
1042 .ndo_open = vortex_open,
1043 .ndo_stop = vortex_close,
1044 .ndo_start_xmit = vortex_start_xmit,
1045 .ndo_tx_timeout = vortex_tx_timeout,
1046 .ndo_get_stats = vortex_get_stats,
1047#ifdef CONFIG_PCI
1048 .ndo_do_ioctl = vortex_ioctl,
1049#endif
1050 .ndo_set_multicast_list = set_rx_mode,
1051 .ndo_change_mtu = eth_change_mtu,
1052 .ndo_set_mac_address = eth_mac_addr,
1053 .ndo_validate_addr = eth_validate_addr,
1054#ifdef CONFIG_NET_POLL_CONTROLLER
1055 .ndo_poll_controller = poll_vortex,
1056#endif
1057};
1058
1da177e4
LT
1059/*
1060 * Start up the PCI/EISA device which is described by *gendev.
1061 * Return 0 on success.
1062 *
1063 * NOTE: pdev can be NULL, for the case of a Compaq device
1064 */
1065static int __devinit vortex_probe1(struct device *gendev,
62afe595 1066 void __iomem *ioaddr, int irq,
1da177e4
LT
1067 int chip_idx, int card_idx)
1068{
1069 struct vortex_private *vp;
1070 int option;
1071 unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
1072 int i, step;
1073 struct net_device *dev;
1074 static int printed_version;
1075 int retval, print_info;
1076 struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
361d5ee3 1077 const char *print_name = "3c59x";
1da177e4
LT
1078 struct pci_dev *pdev = NULL;
1079 struct eisa_device *edev = NULL;
1080
1081 if (!printed_version) {
39738e16 1082 pr_info("%s", version);
1da177e4
LT
1083 printed_version = 1;
1084 }
1085
1086 if (gendev) {
1087 if ((pdev = DEVICE_PCI(gendev))) {
1088 print_name = pci_name(pdev);
1089 }
1090
1091 if ((edev = DEVICE_EISA(gendev))) {
fb28ad35 1092 print_name = dev_name(&edev->dev);
1da177e4
LT
1093 }
1094 }
1095
1096 dev = alloc_etherdev(sizeof(*vp));
1097 retval = -ENOMEM;
1098 if (!dev) {
39738e16 1099 pr_err(PFX "unable to allocate etherdev, aborting\n");
1da177e4
LT
1100 goto out;
1101 }
1da177e4
LT
1102 SET_NETDEV_DEV(dev, gendev);
1103 vp = netdev_priv(dev);
1104
1105 option = global_options;
1106
1107 /* The lower four bits are the media type. */
1108 if (dev->mem_start) {
1109 /*
1110 * The 'options' param is passed in as the third arg to the
1111 * LILO 'ether=' argument for non-modular use
1112 */
1113 option = dev->mem_start;
1114 }
1115 else if (card_idx < MAX_UNITS) {
1116 if (options[card_idx] >= 0)
1117 option = options[card_idx];
1118 }
1119
1120 if (option > 0) {
1121 if (option & 0x8000)
1122 vortex_debug = 7;
1123 if (option & 0x4000)
1124 vortex_debug = 2;
1125 if (option & 0x0400)
1126 vp->enable_wol = 1;
1127 }
1128
1129 print_info = (vortex_debug > 1);
1130 if (print_info)
39738e16 1131 pr_info("See Documentation/networking/vortex.txt\n");
1da177e4 1132
39738e16 1133 pr_info("%s: 3Com %s %s at %p.\n",
1da177e4
LT
1134 print_name,
1135 pdev ? "PCI" : "EISA",
1136 vci->name,
1137 ioaddr);
1138
62afe595 1139 dev->base_addr = (unsigned long)ioaddr;
1da177e4
LT
1140 dev->irq = irq;
1141 dev->mtu = mtu;
62afe595 1142 vp->ioaddr = ioaddr;
1da177e4
LT
1143 vp->large_frames = mtu > 1500;
1144 vp->drv_flags = vci->drv_flags;
1145 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1146 vp->io_size = vci->io_size;
1147 vp->card_idx = card_idx;
a095cfc4 1148 vp->window = -1;
1da177e4
LT
1149
1150 /* module list only for Compaq device */
1151 if (gendev == NULL) {
1152 compaq_net_device = dev;
1153 }
1154
1155 /* PCI-only startup logic */
1156 if (pdev) {
1157 /* EISA resources already marked, so only PCI needs to do this here */
1158 /* Ignore return value, because Cardbus drivers already allocate for us */
62afe595 1159 if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1da177e4
LT
1160 vp->must_free_region = 1;
1161
6aa20a22 1162 /* enable bus-mastering if necessary */
1da177e4 1163 if (vci->flags & PCI_USES_MASTER)
a880c4cd 1164 pci_set_master(pdev);
1da177e4
LT
1165
1166 if (vci->drv_flags & IS_VORTEX) {
1167 u8 pci_latency;
1168 u8 new_latency = 248;
1169
1170 /* Check the PCI latency value. On the 3c590 series the latency timer
1171 must be set to the maximum value to avoid data corruption that occurs
1172 when the timer expires during a transfer. This bug exists the Vortex
1173 chip only. */
1174 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1175 if (pci_latency < new_latency) {
39738e16 1176 pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
1da177e4 1177 print_name, pci_latency, new_latency);
39738e16 1178 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1da177e4
LT
1179 }
1180 }
1181 }
1182
1183 spin_lock_init(&vp->lock);
1184 vp->gendev = gendev;
1185 vp->mii.dev = dev;
1186 vp->mii.mdio_read = mdio_read;
1187 vp->mii.mdio_write = mdio_write;
1188 vp->mii.phy_id_mask = 0x1f;
1189 vp->mii.reg_num_mask = 0x1f;
1190
1191 /* Makes sure rings are at least 16 byte aligned. */
1192 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1193 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1194 &vp->rx_ring_dma);
1195 retval = -ENOMEM;
cc2d6596 1196 if (!vp->rx_ring)
1da177e4
LT
1197 goto free_region;
1198
1199 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1200 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1201
1202 /* if we are a PCI driver, we store info in pdev->driver_data
6aa20a22 1203 * instead of a module list */
1da177e4
LT
1204 if (pdev)
1205 pci_set_drvdata(pdev, dev);
1206 if (edev)
a880c4cd 1207 eisa_set_drvdata(edev, dev);
1da177e4
LT
1208
1209 vp->media_override = 7;
1210 if (option >= 0) {
1211 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1212 if (vp->media_override != 7)
1213 vp->medialock = 1;
1214 vp->full_duplex = (option & 0x200) ? 1 : 0;
1215 vp->bus_master = (option & 16) ? 1 : 0;
1216 }
1217
1218 if (global_full_duplex > 0)
1219 vp->full_duplex = 1;
1220 if (global_enable_wol > 0)
1221 vp->enable_wol = 1;
1222
1223 if (card_idx < MAX_UNITS) {
1224 if (full_duplex[card_idx] > 0)
1225 vp->full_duplex = 1;
1226 if (flow_ctrl[card_idx] > 0)
1227 vp->flow_ctrl = 1;
1228 if (enable_wol[card_idx] > 0)
1229 vp->enable_wol = 1;
1230 }
1231
125d5ce8 1232 vp->mii.force_media = vp->full_duplex;
1da177e4
LT
1233 vp->options = option;
1234 /* Read the station address from the EEPROM. */
1da177e4
LT
1235 {
1236 int base;
1237
1238 if (vci->drv_flags & EEPROM_8BIT)
1239 base = 0x230;
1240 else if (vci->drv_flags & EEPROM_OFFSET)
1241 base = EEPROM_Read + 0x30;
1242 else
1243 base = EEPROM_Read;
1244
1245 for (i = 0; i < 0x40; i++) {
1246 int timer;
a095cfc4 1247 window_write16(vp, base + i, 0, Wn0EepromCmd);
1da177e4
LT
1248 /* Pause for at least 162 us. for the read to take place. */
1249 for (timer = 10; timer >= 0; timer--) {
1250 udelay(162);
a095cfc4
BH
1251 if ((window_read16(vp, 0, Wn0EepromCmd) &
1252 0x8000) == 0)
1da177e4
LT
1253 break;
1254 }
a095cfc4 1255 eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1da177e4
LT
1256 }
1257 }
1258 for (i = 0; i < 0x18; i++)
1259 checksum ^= eeprom[i];
1260 checksum = (checksum ^ (checksum >> 8)) & 0xff;
1261 if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
1262 while (i < 0x21)
1263 checksum ^= eeprom[i++];
1264 checksum = (checksum ^ (checksum >> 8)) & 0xff;
1265 }
1266 if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
39738e16 1267 pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1da177e4 1268 for (i = 0; i < 3; i++)
cc2d6596 1269 ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
bb531fc0 1270 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
0795af57 1271 if (print_info)
39738e16 1272 pr_cont(" %pM", dev->dev_addr);
1da177e4
LT
1273 /* Unfortunately an all zero eeprom passes the checksum and this
1274 gets found in the wild in failure cases. Crypto is hard 8) */
1275 if (!is_valid_ether_addr(dev->dev_addr)) {
1276 retval = -EINVAL;
39738e16 1277 pr_err("*** EEPROM MAC address is invalid.\n");
1da177e4
LT
1278 goto free_ring; /* With every pack */
1279 }
1da177e4 1280 for (i = 0; i < 6; i++)
a095cfc4 1281 window_write8(vp, dev->dev_addr[i], 2, i);
1da177e4 1282
1da177e4 1283 if (print_info)
39738e16 1284 pr_cont(", IRQ %d\n", dev->irq);
1da177e4 1285 /* Tell them about an invalid IRQ. */
60e4ad7a 1286 if (dev->irq <= 0 || dev->irq >= nr_irqs)
39738e16 1287 pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
1da177e4 1288 dev->irq);
1da177e4 1289
a095cfc4 1290 step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1da177e4 1291 if (print_info) {
39738e16
AB
1292 pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
1293 eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1da177e4
LT
1294 step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1295 }
1296
1297
1298 if (pdev && vci->drv_flags & HAS_CB_FNS) {
1da177e4
LT
1299 unsigned short n;
1300
62afe595
JL
1301 vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1302 if (!vp->cb_fn_base) {
1da177e4 1303 retval = -ENOMEM;
62afe595 1304 goto free_ring;
1da177e4 1305 }
62afe595 1306
1da177e4 1307 if (print_info) {
39738e16 1308 pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
7c7459d1
GKH
1309 print_name,
1310 (unsigned long long)pci_resource_start(pdev, 2),
62afe595 1311 vp->cb_fn_base);
1da177e4 1312 }
1da177e4 1313
a095cfc4 1314 n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1da177e4
LT
1315 if (vp->drv_flags & INVERT_LED_PWR)
1316 n |= 0x10;
1317 if (vp->drv_flags & INVERT_MII_PWR)
1318 n |= 0x4000;
a095cfc4 1319 window_write16(vp, n, 2, Wn2_ResetOptions);
1da177e4 1320 if (vp->drv_flags & WNO_XCVR_PWR) {
a095cfc4 1321 window_write16(vp, 0x0800, 0, 0);
1da177e4
LT
1322 }
1323 }
1324
1325 /* Extract our information from the EEPROM data. */
1326 vp->info1 = eeprom[13];
1327 vp->info2 = eeprom[15];
1328 vp->capabilities = eeprom[16];
1329
1330 if (vp->info1 & 0x8000) {
1331 vp->full_duplex = 1;
1332 if (print_info)
39738e16 1333 pr_info("Full duplex capable\n");
1da177e4
LT
1334 }
1335
1336 {
f71e1309 1337 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1da177e4 1338 unsigned int config;
a095cfc4 1339 vp->available_media = window_read16(vp, 3, Wn3_Options);
1da177e4
LT
1340 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1341 vp->available_media = 0x40;
a095cfc4 1342 config = window_read32(vp, 3, Wn3_Config);
1da177e4 1343 if (print_info) {
39738e16 1344 pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
a095cfc4 1345 config, window_read16(vp, 3, Wn3_Options));
39738e16 1346 pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1da177e4
LT
1347 8 << RAM_SIZE(config),
1348 RAM_WIDTH(config) ? "word" : "byte",
1349 ram_split[RAM_SPLIT(config)],
1350 AUTOSELECT(config) ? "autoselect/" : "",
1351 XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1352 media_tbl[XCVR(config)].name);
1353 }
1354 vp->default_media = XCVR(config);
1355 if (vp->default_media == XCVR_NWAY)
1356 vp->has_nway = 1;
1357 vp->autoselect = AUTOSELECT(config);
1358 }
1359
1360 if (vp->media_override != 7) {
39738e16 1361 pr_info("%s: Media override to transceiver type %d (%s).\n",
1da177e4
LT
1362 print_name, vp->media_override,
1363 media_tbl[vp->media_override].name);
1364 dev->if_port = vp->media_override;
1365 } else
1366 dev->if_port = vp->default_media;
1367
1368 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1369 dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1370 int phy, phy_idx = 0;
1da177e4
LT
1371 mii_preamble_required++;
1372 if (vp->drv_flags & EXTRA_PREAMBLE)
1373 mii_preamble_required++;
1374 mdio_sync(ioaddr, 32);
106427e6 1375 mdio_read(dev, 24, MII_BMSR);
1da177e4
LT
1376 for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1377 int mii_status, phyx;
1378
1379 /*
1380 * For the 3c905CX we look at index 24 first, because it bogusly
1381 * reports an external PHY at all indices
1382 */
1383 if (phy == 0)
1384 phyx = 24;
1385 else if (phy <= 24)
1386 phyx = phy - 1;
1387 else
1388 phyx = phy;
106427e6 1389 mii_status = mdio_read(dev, phyx, MII_BMSR);
1da177e4
LT
1390 if (mii_status && mii_status != 0xffff) {
1391 vp->phys[phy_idx++] = phyx;
1392 if (print_info) {
39738e16
AB
1393 pr_info(" MII transceiver found at address %d, status %4x.\n",
1394 phyx, mii_status);
1da177e4
LT
1395 }
1396 if ((mii_status & 0x0040) == 0)
1397 mii_preamble_required++;
1398 }
1399 }
1400 mii_preamble_required--;
1401 if (phy_idx == 0) {
39738e16 1402 pr_warning(" ***WARNING*** No MII transceivers found!\n");
1da177e4
LT
1403 vp->phys[0] = 24;
1404 } else {
106427e6 1405 vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1da177e4
LT
1406 if (vp->full_duplex) {
1407 /* Only advertise the FD media types. */
1408 vp->advertising &= ~0x02A0;
1409 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1410 }
1411 }
1412 vp->mii.phy_id = vp->phys[0];
1413 }
1414
1415 if (vp->capabilities & CapBusMaster) {
1416 vp->full_bus_master_tx = 1;
1417 if (print_info) {
39738e16 1418 pr_info(" Enabling bus-master transmits and %s receives.\n",
1da177e4
LT
1419 (vp->info2 & 1) ? "early" : "whole-frame" );
1420 }
1421 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1422 vp->bus_master = 0; /* AKPM: vortex only */
1423 }
1424
1425 /* The 3c59x-specific entries in the device structure. */
1da177e4 1426 if (vp->full_bus_master_tx) {
48b47a5e 1427 dev->netdev_ops = &boomrang_netdev_ops;
1da177e4 1428 /* Actually, it still should work with iommu. */
32fb5f06
JL
1429 if (card_idx < MAX_UNITS &&
1430 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1431 hw_checksums[card_idx] == 1)) {
d311b0d3 1432 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1da177e4 1433 }
48b47a5e
SH
1434 } else
1435 dev->netdev_ops = &vortex_netdev_ops;
1da177e4
LT
1436
1437 if (print_info) {
39738e16 1438 pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
1da177e4
LT
1439 print_name,
1440 (dev->features & NETIF_F_SG) ? "en":"dis",
1441 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1442 }
1443
1da177e4 1444 dev->ethtool_ops = &vortex_ethtool_ops;
1da177e4 1445 dev->watchdog_timeo = (watchdog * HZ) / 1000;
48b47a5e 1446
1da177e4
LT
1447 if (pdev) {
1448 vp->pm_state_valid = 1;
1449 pci_save_state(VORTEX_PCI(vp));
1450 acpi_set_WOL(dev);
1451 }
1452 retval = register_netdev(dev);
1453 if (retval == 0)
1454 return 0;
1455
1456free_ring:
1457 pci_free_consistent(pdev,
1458 sizeof(struct boom_rx_desc) * RX_RING_SIZE
1459 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1460 vp->rx_ring,
1461 vp->rx_ring_dma);
1462free_region:
1463 if (vp->must_free_region)
62afe595 1464 release_region(dev->base_addr, vci->io_size);
1da177e4 1465 free_netdev(dev);
39738e16 1466 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
1da177e4
LT
1467out:
1468 return retval;
1469}
1470
1471static void
1472issue_and_wait(struct net_device *dev, int cmd)
1473{
62afe595
JL
1474 struct vortex_private *vp = netdev_priv(dev);
1475 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
1476 int i;
1477
62afe595 1478 iowrite16(cmd, ioaddr + EL3_CMD);
1da177e4 1479 for (i = 0; i < 2000; i++) {
62afe595 1480 if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1da177e4
LT
1481 return;
1482 }
1483
1484 /* OK, that didn't work. Do it the slow way. One second */
1485 for (i = 0; i < 100000; i++) {
62afe595 1486 if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
1da177e4 1487 if (vortex_debug > 1)
39738e16 1488 pr_info("%s: command 0x%04x took %d usecs\n",
1da177e4
LT
1489 dev->name, cmd, i * 10);
1490 return;
1491 }
1492 udelay(10);
1493 }
39738e16 1494 pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
62afe595 1495 dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
1da177e4
LT
1496}
1497
125d5ce8
SK
1498static void
1499vortex_set_duplex(struct net_device *dev)
1500{
1501 struct vortex_private *vp = netdev_priv(dev);
125d5ce8 1502
39738e16 1503 pr_info("%s: setting %s-duplex.\n",
125d5ce8
SK
1504 dev->name, (vp->full_duplex) ? "full" : "half");
1505
125d5ce8 1506 /* Set the full-duplex bit. */
a095cfc4
BH
1507 window_write16(vp,
1508 ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1509 (vp->large_frames ? 0x40 : 0) |
1510 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1511 0x100 : 0),
1512 3, Wn3_MAC_Ctrl);
125d5ce8
SK
1513}
1514
1515static void vortex_check_media(struct net_device *dev, unsigned int init)
1516{
1517 struct vortex_private *vp = netdev_priv(dev);
1518 unsigned int ok_to_print = 0;
1519
1520 if (vortex_debug > 3)
1521 ok_to_print = 1;
1522
1523 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1524 vp->full_duplex = vp->mii.full_duplex;
1525 vortex_set_duplex(dev);
1526 } else if (init) {
1527 vortex_set_duplex(dev);
1528 }
1529}
1530
c8303d10 1531static int
1da177e4
LT
1532vortex_up(struct net_device *dev)
1533{
1da177e4 1534 struct vortex_private *vp = netdev_priv(dev);
62afe595 1535 void __iomem *ioaddr = vp->ioaddr;
1da177e4 1536 unsigned int config;
0280f9f9 1537 int i, mii_reg1, mii_reg5, err = 0;
1da177e4
LT
1538
1539 if (VORTEX_PCI(vp)) {
1540 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
3c8fad18
DR
1541 if (vp->pm_state_valid)
1542 pci_restore_state(VORTEX_PCI(vp));
c8303d10
MH
1543 err = pci_enable_device(VORTEX_PCI(vp));
1544 if (err) {
39738e16 1545 pr_warning("%s: Could not enable device\n",
c8303d10
MH
1546 dev->name);
1547 goto err_out;
1548 }
1da177e4
LT
1549 }
1550
1551 /* Before initializing select the active media port. */
a095cfc4 1552 config = window_read32(vp, 3, Wn3_Config);
1da177e4
LT
1553
1554 if (vp->media_override != 7) {
39738e16 1555 pr_info("%s: Media override to transceiver %d (%s).\n",
1da177e4
LT
1556 dev->name, vp->media_override,
1557 media_tbl[vp->media_override].name);
1558 dev->if_port = vp->media_override;
1559 } else if (vp->autoselect) {
1560 if (vp->has_nway) {
1561 if (vortex_debug > 1)
39738e16 1562 pr_info("%s: using NWAY device table, not %d\n",
1da177e4
LT
1563 dev->name, dev->if_port);
1564 dev->if_port = XCVR_NWAY;
1565 } else {
1566 /* Find first available media type, starting with 100baseTx. */
1567 dev->if_port = XCVR_100baseTx;
1568 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1569 dev->if_port = media_tbl[dev->if_port].next;
1570 if (vortex_debug > 1)
39738e16 1571 pr_info("%s: first available media type: %s\n",
1da177e4
LT
1572 dev->name, media_tbl[dev->if_port].name);
1573 }
1574 } else {
1575 dev->if_port = vp->default_media;
1576 if (vortex_debug > 1)
39738e16 1577 pr_info("%s: using default media %s\n",
1da177e4
LT
1578 dev->name, media_tbl[dev->if_port].name);
1579 }
1580
1581 init_timer(&vp->timer);
1582 vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
1583 vp->timer.data = (unsigned long)dev;
1584 vp->timer.function = vortex_timer; /* timer handler */
1585 add_timer(&vp->timer);
1586
1587 init_timer(&vp->rx_oom_timer);
1588 vp->rx_oom_timer.data = (unsigned long)dev;
1589 vp->rx_oom_timer.function = rx_oom_timer;
1590
1591 if (vortex_debug > 1)
39738e16 1592 pr_debug("%s: Initial media type %s.\n",
1da177e4
LT
1593 dev->name, media_tbl[dev->if_port].name);
1594
125d5ce8 1595 vp->full_duplex = vp->mii.force_media;
1da177e4
LT
1596 config = BFINS(config, dev->if_port, 20, 4);
1597 if (vortex_debug > 6)
39738e16 1598 pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
a095cfc4 1599 window_write32(vp, config, 3, Wn3_Config);
1da177e4
LT
1600
1601 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
09ce3512
SK
1602 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1603 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1604 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
373492d0 1605 vp->mii.full_duplex = vp->full_duplex;
09ce3512 1606
125d5ce8 1607 vortex_check_media(dev, 1);
1da177e4 1608 }
125d5ce8
SK
1609 else
1610 vortex_set_duplex(dev);
1da177e4 1611
09ce3512
SK
1612 issue_and_wait(dev, TxReset);
1613 /*
1614 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1615 */
1616 issue_and_wait(dev, RxReset|0x04);
1617
1da177e4 1618
62afe595 1619 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1da177e4
LT
1620
1621 if (vortex_debug > 1) {
39738e16 1622 pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
a095cfc4 1623 dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1da177e4
LT
1624 }
1625
1626 /* Set the station address and mask in window 2 each time opened. */
1da177e4 1627 for (i = 0; i < 6; i++)
a095cfc4 1628 window_write8(vp, dev->dev_addr[i], 2, i);
1da177e4 1629 for (; i < 12; i+=2)
a095cfc4 1630 window_write16(vp, 0, 2, i);
1da177e4
LT
1631
1632 if (vp->cb_fn_base) {
a095cfc4 1633 unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1da177e4
LT
1634 if (vp->drv_flags & INVERT_LED_PWR)
1635 n |= 0x10;
1636 if (vp->drv_flags & INVERT_MII_PWR)
1637 n |= 0x4000;
a095cfc4 1638 window_write16(vp, n, 2, Wn2_ResetOptions);
1da177e4
LT
1639 }
1640
1641 if (dev->if_port == XCVR_10base2)
1642 /* Start the thinnet transceiver. We should really wait 50ms...*/
62afe595 1643 iowrite16(StartCoax, ioaddr + EL3_CMD);
1da177e4 1644 if (dev->if_port != XCVR_NWAY) {
a095cfc4
BH
1645 window_write16(vp,
1646 (window_read16(vp, 4, Wn4_Media) &
1647 ~(Media_10TP|Media_SQE)) |
1648 media_tbl[dev->if_port].media_bits,
1649 4, Wn4_Media);
1da177e4
LT
1650 }
1651
1652 /* Switch to the stats window, and clear all stats by reading. */
62afe595 1653 iowrite16(StatsDisable, ioaddr + EL3_CMD);
1da177e4 1654 for (i = 0; i < 10; i++)
a095cfc4
BH
1655 window_read8(vp, 6, i);
1656 window_read16(vp, 6, 10);
1657 window_read16(vp, 6, 12);
1da177e4 1658 /* New: On the Vortex we must also clear the BadSSD counter. */
a095cfc4 1659 window_read8(vp, 4, 12);
1da177e4 1660 /* ..and on the Boomerang we enable the extra statistics bits. */
a095cfc4 1661 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1da177e4
LT
1662
1663 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1664 vp->cur_rx = vp->dirty_rx = 0;
1665 /* Initialize the RxEarly register as recommended. */
62afe595
JL
1666 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1667 iowrite32(0x0020, ioaddr + PktStatus);
1668 iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1da177e4
LT
1669 }
1670 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1671 vp->cur_tx = vp->dirty_tx = 0;
1672 if (vp->drv_flags & IS_BOOMERANG)
62afe595 1673 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1da177e4
LT
1674 /* Clear the Rx, Tx rings. */
1675 for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
1676 vp->rx_ring[i].status = 0;
1677 for (i = 0; i < TX_RING_SIZE; i++)
1678 vp->tx_skbuff[i] = NULL;
62afe595 1679 iowrite32(0, ioaddr + DownListPtr);
1da177e4
LT
1680 }
1681 /* Set receiver mode: presumably accept b-case and phys addr only. */
1682 set_rx_mode(dev);
1683 /* enable 802.1q tagged frames */
1684 set_8021q_mode(dev, 1);
62afe595 1685 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1da177e4 1686
62afe595
JL
1687 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1688 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1da177e4
LT
1689 /* Allow status bits to be seen. */
1690 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1691 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1692 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1693 (vp->bus_master ? DMADone : 0);
1694 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1695 (vp->full_bus_master_rx ? 0 : RxComplete) |
1696 StatsFull | HostError | TxComplete | IntReq
1697 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
62afe595 1698 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1da177e4 1699 /* Ack all pending events, and set active indicator mask. */
62afe595 1700 iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1da177e4 1701 ioaddr + EL3_CMD);
62afe595 1702 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1da177e4 1703 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
62afe595 1704 iowrite32(0x8000, vp->cb_fn_base + 4);
1da177e4 1705 netif_start_queue (dev);
c8303d10
MH
1706err_out:
1707 return err;
1da177e4
LT
1708}
1709
1710static int
1711vortex_open(struct net_device *dev)
1712{
1713 struct vortex_private *vp = netdev_priv(dev);
1714 int i;
1715 int retval;
1716
1717 /* Use the now-standard shared IRQ implementation. */
1718 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1fb9df5d 1719 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
39738e16 1720 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
c8303d10 1721 goto err;
1da177e4
LT
1722 }
1723
1724 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1725 if (vortex_debug > 2)
39738e16 1726 pr_debug("%s: Filling in the Rx ring.\n", dev->name);
1da177e4
LT
1727 for (i = 0; i < RX_RING_SIZE; i++) {
1728 struct sk_buff *skb;
1729 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1730 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1731 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
9a5d3414
SH
1732
1733 skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1734 GFP_KERNEL);
1da177e4
LT
1735 vp->rx_skbuff[i] = skb;
1736 if (skb == NULL)
1737 break; /* Bad news! */
9a5d3414
SH
1738
1739 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
689be439 1740 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1da177e4
LT
1741 }
1742 if (i != RX_RING_SIZE) {
1743 int j;
39738e16 1744 pr_emerg("%s: no memory for rx ring\n", dev->name);
1da177e4
LT
1745 for (j = 0; j < i; j++) {
1746 if (vp->rx_skbuff[j]) {
1747 dev_kfree_skb(vp->rx_skbuff[j]);
1748 vp->rx_skbuff[j] = NULL;
1749 }
1750 }
1751 retval = -ENOMEM;
c8303d10 1752 goto err_free_irq;
1da177e4
LT
1753 }
1754 /* Wrap the ring. */
1755 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1756 }
1757
c8303d10
MH
1758 retval = vortex_up(dev);
1759 if (!retval)
1760 goto out;
1da177e4 1761
c8303d10 1762err_free_irq:
1da177e4 1763 free_irq(dev->irq, dev);
c8303d10 1764err:
1da177e4 1765 if (vortex_debug > 1)
39738e16 1766 pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
c8303d10 1767out:
1da177e4
LT
1768 return retval;
1769}
1770
1771static void
1772vortex_timer(unsigned long data)
1773{
1774 struct net_device *dev = (struct net_device *)data;
1775 struct vortex_private *vp = netdev_priv(dev);
62afe595 1776 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
1777 int next_tick = 60*HZ;
1778 int ok = 0;
a095cfc4 1779 int media_status;
1da177e4
LT
1780
1781 if (vortex_debug > 2) {
39738e16 1782 pr_debug("%s: Media selection timer tick happened, %s.\n",
1da177e4 1783 dev->name, media_tbl[dev->if_port].name);
39738e16 1784 pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1da177e4
LT
1785 }
1786
0a9da4bd 1787 disable_irq_lockdep(dev->irq);
a095cfc4 1788 media_status = window_read16(vp, 4, Wn4_Media);
1da177e4
LT
1789 switch (dev->if_port) {
1790 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
1791 if (media_status & Media_LnkBeat) {
1792 netif_carrier_on(dev);
1793 ok = 1;
1794 if (vortex_debug > 1)
39738e16 1795 pr_debug("%s: Media %s has link beat, %x.\n",
1da177e4
LT
1796 dev->name, media_tbl[dev->if_port].name, media_status);
1797 } else {
1798 netif_carrier_off(dev);
1799 if (vortex_debug > 1) {
39738e16 1800 pr_debug("%s: Media %s has no link beat, %x.\n",
1da177e4
LT
1801 dev->name, media_tbl[dev->if_port].name, media_status);
1802 }
1803 }
1804 break;
1805 case XCVR_MII: case XCVR_NWAY:
1806 {
1da177e4 1807 ok = 1;
c5643cab
IM
1808 /* Interrupts are already disabled */
1809 spin_lock(&vp->lock);
125d5ce8 1810 vortex_check_media(dev, 0);
c5643cab 1811 spin_unlock(&vp->lock);
1da177e4
LT
1812 }
1813 break;
1814 default: /* Other media types handled by Tx timeouts. */
1815 if (vortex_debug > 1)
39738e16 1816 pr_debug("%s: Media %s has no indication, %x.\n",
1da177e4
LT
1817 dev->name, media_tbl[dev->if_port].name, media_status);
1818 ok = 1;
1819 }
b4ff6450
SK
1820
1821 if (!netif_carrier_ok(dev))
1822 next_tick = 5*HZ;
1823
e94d10eb
SK
1824 if (vp->medialock)
1825 goto leave_media_alone;
1826
a880c4cd 1827 if (!ok) {
1da177e4
LT
1828 unsigned int config;
1829
1830 do {
1831 dev->if_port = media_tbl[dev->if_port].next;
1832 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1833 if (dev->if_port == XCVR_Default) { /* Go back to default. */
1834 dev->if_port = vp->default_media;
1835 if (vortex_debug > 1)
39738e16 1836 pr_debug("%s: Media selection failing, using default %s port.\n",
1da177e4
LT
1837 dev->name, media_tbl[dev->if_port].name);
1838 } else {
1839 if (vortex_debug > 1)
39738e16 1840 pr_debug("%s: Media selection failed, now trying %s port.\n",
1da177e4
LT
1841 dev->name, media_tbl[dev->if_port].name);
1842 next_tick = media_tbl[dev->if_port].wait;
1843 }
a095cfc4
BH
1844 window_write16(vp,
1845 (media_status & ~(Media_10TP|Media_SQE)) |
1846 media_tbl[dev->if_port].media_bits,
1847 4, Wn4_Media);
1da177e4 1848
a095cfc4 1849 config = window_read32(vp, 3, Wn3_Config);
1da177e4 1850 config = BFINS(config, dev->if_port, 20, 4);
a095cfc4 1851 window_write32(vp, config, 3, Wn3_Config);
1da177e4 1852
62afe595 1853 iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1da177e4
LT
1854 ioaddr + EL3_CMD);
1855 if (vortex_debug > 1)
39738e16 1856 pr_debug("wrote 0x%08x to Wn3_Config\n", config);
1da177e4
LT
1857 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
1858 }
1da177e4
LT
1859
1860leave_media_alone:
1861 if (vortex_debug > 2)
39738e16 1862 pr_debug("%s: Media selection timer finished, %s.\n",
1da177e4
LT
1863 dev->name, media_tbl[dev->if_port].name);
1864
0a9da4bd 1865 enable_irq_lockdep(dev->irq);
1da177e4
LT
1866 mod_timer(&vp->timer, RUN_AT(next_tick));
1867 if (vp->deferred)
62afe595 1868 iowrite16(FakeIntr, ioaddr + EL3_CMD);
1da177e4
LT
1869}
1870
1871static void vortex_tx_timeout(struct net_device *dev)
1872{
1873 struct vortex_private *vp = netdev_priv(dev);
62afe595 1874 void __iomem *ioaddr = vp->ioaddr;
1da177e4 1875
39738e16 1876 pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
62afe595
JL
1877 dev->name, ioread8(ioaddr + TxStatus),
1878 ioread16(ioaddr + EL3_STATUS));
39738e16 1879 pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
a095cfc4
BH
1880 window_read16(vp, 4, Wn4_NetDiag),
1881 window_read16(vp, 4, Wn4_Media),
62afe595 1882 ioread32(ioaddr + PktStatus),
a095cfc4 1883 window_read16(vp, 4, Wn4_FIFODiag));
1da177e4 1884 /* Slight code bloat to be user friendly. */
62afe595 1885 if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
39738e16 1886 pr_err("%s: Transmitter encountered 16 collisions --"
1da177e4 1887 " network cable problem?\n", dev->name);
62afe595 1888 if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
39738e16 1889 pr_err("%s: Interrupt posted but not delivered --"
1da177e4
LT
1890 " IRQ blocked by another device?\n", dev->name);
1891 /* Bad idea here.. but we might as well handle a few events. */
1892 {
1893 /*
1894 * Block interrupts because vortex_interrupt does a bare spin_lock()
1895 */
1896 unsigned long flags;
1897 local_irq_save(flags);
1898 if (vp->full_bus_master_tx)
7d12e780 1899 boomerang_interrupt(dev->irq, dev);
1da177e4 1900 else
7d12e780 1901 vortex_interrupt(dev->irq, dev);
1da177e4
LT
1902 local_irq_restore(flags);
1903 }
1904 }
1905
1906 if (vortex_debug > 0)
1907 dump_tx_ring(dev);
1908
1909 issue_and_wait(dev, TxReset);
1910
1daad055 1911 dev->stats.tx_errors++;
1da177e4 1912 if (vp->full_bus_master_tx) {
39738e16 1913 pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
62afe595
JL
1914 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
1915 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1da177e4
LT
1916 ioaddr + DownListPtr);
1917 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
1918 netif_wake_queue (dev);
1919 if (vp->drv_flags & IS_BOOMERANG)
62afe595
JL
1920 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1921 iowrite16(DownUnstall, ioaddr + EL3_CMD);
1da177e4 1922 } else {
1daad055 1923 dev->stats.tx_dropped++;
1da177e4
LT
1924 netif_wake_queue(dev);
1925 }
6aa20a22 1926
1da177e4 1927 /* Issue Tx Enable */
62afe595 1928 iowrite16(TxEnable, ioaddr + EL3_CMD);
1ae5dc34 1929 dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
1930}
1931
1932/*
1933 * Handle uncommon interrupt sources. This is a separate routine to minimize
1934 * the cache impact.
1935 */
1936static void
1937vortex_error(struct net_device *dev, int status)
1938{
1939 struct vortex_private *vp = netdev_priv(dev);
62afe595 1940 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
1941 int do_tx_reset = 0, reset_mask = 0;
1942 unsigned char tx_status = 0;
1943
1944 if (vortex_debug > 2) {
39738e16 1945 pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
1da177e4
LT
1946 }
1947
1948 if (status & TxComplete) { /* Really "TxError" for us. */
62afe595 1949 tx_status = ioread8(ioaddr + TxStatus);
1da177e4 1950 /* Presumably a tx-timeout. We must merely re-enable. */
8e95a202
JP
1951 if (vortex_debug > 2 ||
1952 (tx_status != 0x88 && vortex_debug > 0)) {
39738e16 1953 pr_err("%s: Transmit error, Tx status register %2.2x.\n",
1da177e4
LT
1954 dev->name, tx_status);
1955 if (tx_status == 0x82) {
39738e16 1956 pr_err("Probably a duplex mismatch. See "
1da177e4
LT
1957 "Documentation/networking/vortex.txt\n");
1958 }
1959 dump_tx_ring(dev);
1960 }
1daad055
PZ
1961 if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
1962 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
0000754c 1963 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
62afe595 1964 iowrite8(0, ioaddr + TxStatus);
1da177e4
LT
1965 if (tx_status & 0x30) { /* txJabber or txUnderrun */
1966 do_tx_reset = 1;
0000754c
AM
1967 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1968 do_tx_reset = 1;
1969 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
1970 } else { /* Merely re-enable the transmitter. */
62afe595 1971 iowrite16(TxEnable, ioaddr + EL3_CMD);
1da177e4
LT
1972 }
1973 }
1974
1975 if (status & RxEarly) { /* Rx early is unused. */
1976 vortex_rx(dev);
62afe595 1977 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1da177e4
LT
1978 }
1979 if (status & StatsFull) { /* Empty statistics. */
1980 static int DoneDidThat;
1981 if (vortex_debug > 4)
39738e16 1982 pr_debug("%s: Updating stats.\n", dev->name);
1da177e4
LT
1983 update_stats(ioaddr, dev);
1984 /* HACK: Disable statistics as an interrupt source. */
1985 /* This occurs when we have the wrong media type! */
1986 if (DoneDidThat == 0 &&
62afe595 1987 ioread16(ioaddr + EL3_STATUS) & StatsFull) {
39738e16 1988 pr_warning("%s: Updating statistics failed, disabling "
1da177e4 1989 "stats as an interrupt source.\n", dev->name);
a095cfc4
BH
1990 iowrite16(SetIntrEnb |
1991 (window_read16(vp, 5, 10) & ~StatsFull),
1992 ioaddr + EL3_CMD);
1da177e4 1993 vp->intr_enable &= ~StatsFull;
1da177e4
LT
1994 DoneDidThat++;
1995 }
1996 }
1997 if (status & IntReq) { /* Restore all interrupt sources. */
62afe595
JL
1998 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1999 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1da177e4
LT
2000 }
2001 if (status & HostError) {
2002 u16 fifo_diag;
a095cfc4 2003 fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
39738e16 2004 pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
1da177e4
LT
2005 dev->name, fifo_diag);
2006 /* Adapter failure requires Tx/Rx reset and reinit. */
2007 if (vp->full_bus_master_tx) {
62afe595 2008 int bus_status = ioread32(ioaddr + PktStatus);
1da177e4
LT
2009 /* 0x80000000 PCI master abort. */
2010 /* 0x40000000 PCI target abort. */
2011 if (vortex_debug)
39738e16 2012 pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
1da177e4
LT
2013
2014 /* In this case, blow the card away */
2015 /* Must not enter D3 or we can't legally issue the reset! */
2016 vortex_down(dev, 0);
2017 issue_and_wait(dev, TotalReset | 0xff);
2018 vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
2019 } else if (fifo_diag & 0x0400)
2020 do_tx_reset = 1;
2021 if (fifo_diag & 0x3000) {
2022 /* Reset Rx fifo and upload logic */
2023 issue_and_wait(dev, RxReset|0x07);
2024 /* Set the Rx filter to the current state. */
2025 set_rx_mode(dev);
2026 /* enable 802.1q VLAN tagged frames */
2027 set_8021q_mode(dev, 1);
62afe595
JL
2028 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
2029 iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
1da177e4
LT
2030 }
2031 }
2032
2033 if (do_tx_reset) {
2034 issue_and_wait(dev, TxReset|reset_mask);
62afe595 2035 iowrite16(TxEnable, ioaddr + EL3_CMD);
1da177e4
LT
2036 if (!vp->full_bus_master_tx)
2037 netif_wake_queue(dev);
2038 }
2039}
2040
27a1de95 2041static netdev_tx_t
1da177e4
LT
2042vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2043{
2044 struct vortex_private *vp = netdev_priv(dev);
62afe595 2045 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2046
2047 /* Put out the doubleword header... */
62afe595 2048 iowrite32(skb->len, ioaddr + TX_FIFO);
1da177e4
LT
2049 if (vp->bus_master) {
2050 /* Set the bus-master controller to transfer the packet. */
2051 int len = (skb->len + 3) & ~3;
a095cfc4
BH
2052 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2053 PCI_DMA_TODEVICE);
2054 window_set(vp, 7);
2055 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
62afe595 2056 iowrite16(len, ioaddr + Wn7_MasterLen);
1da177e4 2057 vp->tx_skb = skb;
62afe595 2058 iowrite16(StartDMADown, ioaddr + EL3_CMD);
1da177e4
LT
2059 /* netif_wake_queue() will be called at the DMADone interrupt. */
2060 } else {
2061 /* ... and the packet rounded to a doubleword. */
62afe595 2062 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
1da177e4 2063 dev_kfree_skb (skb);
62afe595 2064 if (ioread16(ioaddr + TxFree) > 1536) {
1da177e4
LT
2065 netif_start_queue (dev); /* AKPM: redundant? */
2066 } else {
2067 /* Interrupt us when the FIFO has room for max-sized packet. */
2068 netif_stop_queue(dev);
62afe595 2069 iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
1da177e4
LT
2070 }
2071 }
2072
1da177e4
LT
2073
2074 /* Clear the Tx status stack. */
2075 {
2076 int tx_status;
2077 int i = 32;
2078
62afe595 2079 while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
1da177e4
LT
2080 if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
2081 if (vortex_debug > 2)
39738e16 2082 pr_debug("%s: Tx error, status %2.2x.\n",
1da177e4 2083 dev->name, tx_status);
1daad055
PZ
2084 if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2085 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
1da177e4
LT
2086 if (tx_status & 0x30) {
2087 issue_and_wait(dev, TxReset);
2088 }
62afe595 2089 iowrite16(TxEnable, ioaddr + EL3_CMD);
1da177e4 2090 }
62afe595 2091 iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
1da177e4
LT
2092 }
2093 }
6ed10654 2094 return NETDEV_TX_OK;
1da177e4
LT
2095}
2096
27a1de95 2097static netdev_tx_t
1da177e4
LT
2098boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2099{
2100 struct vortex_private *vp = netdev_priv(dev);
62afe595 2101 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2102 /* Calculate the next Tx descriptor entry. */
2103 int entry = vp->cur_tx % TX_RING_SIZE;
2104 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2105 unsigned long flags;
2106
2107 if (vortex_debug > 6) {
39738e16
AB
2108 pr_debug("boomerang_start_xmit()\n");
2109 pr_debug("%s: Trying to send a packet, Tx index %d.\n",
0f667ff5 2110 dev->name, vp->cur_tx);
1da177e4
LT
2111 }
2112
2113 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2114 if (vortex_debug > 0)
39738e16 2115 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
1da177e4
LT
2116 dev->name);
2117 netif_stop_queue(dev);
5b548140 2118 return NETDEV_TX_BUSY;
1da177e4
LT
2119 }
2120
2121 vp->tx_skbuff[entry] = skb;
2122
2123 vp->tx_ring[entry].next = 0;
2124#if DO_ZEROCOPY
84fa7933 2125 if (skb->ip_summed != CHECKSUM_PARTIAL)
1da177e4
LT
2126 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2127 else
2128 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2129
2130 if (!skb_shinfo(skb)->nr_frags) {
2131 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2132 skb->len, PCI_DMA_TODEVICE));
2133 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2134 } else {
2135 int i;
2136
2137 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
e743d313
ED
2138 skb_headlen(skb), PCI_DMA_TODEVICE));
2139 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
1da177e4
LT
2140
2141 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2142 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2143
2144 vp->tx_ring[entry].frag[i+1].addr =
2145 cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
2146 (void*)page_address(frag->page) + frag->page_offset,
2147 frag->size, PCI_DMA_TODEVICE));
2148
2149 if (i == skb_shinfo(skb)->nr_frags-1)
2150 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
2151 else
2152 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
2153 }
2154 }
2155#else
2156 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
2157 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2158 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2159#endif
2160
2161 spin_lock_irqsave(&vp->lock, flags);
2162 /* Wait for the stall to complete. */
2163 issue_and_wait(dev, DownStall);
2164 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
62afe595
JL
2165 if (ioread32(ioaddr + DownListPtr) == 0) {
2166 iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
1da177e4
LT
2167 vp->queued_packet++;
2168 }
2169
2170 vp->cur_tx++;
2171 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2172 netif_stop_queue (dev);
2173 } else { /* Clear previous interrupt enable. */
2174#if defined(tx_interrupt_mitigation)
2175 /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2176 * were selected, this would corrupt DN_COMPLETE. No?
2177 */
2178 prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2179#endif
2180 }
62afe595 2181 iowrite16(DownUnstall, ioaddr + EL3_CMD);
1da177e4 2182 spin_unlock_irqrestore(&vp->lock, flags);
6ed10654 2183 return NETDEV_TX_OK;
1da177e4
LT
2184}
2185
2186/* The interrupt handler does all of the Rx thread work and cleans up
2187 after the Tx thread. */
2188
2189/*
2190 * This is the ISR for the vortex series chips.
2191 * full_bus_master_tx == 0 && full_bus_master_rx == 0
2192 */
2193
2194static irqreturn_t
7d12e780 2195vortex_interrupt(int irq, void *dev_id)
1da177e4
LT
2196{
2197 struct net_device *dev = dev_id;
2198 struct vortex_private *vp = netdev_priv(dev);
62afe595 2199 void __iomem *ioaddr;
1da177e4
LT
2200 int status;
2201 int work_done = max_interrupt_work;
2202 int handled = 0;
2203
62afe595 2204 ioaddr = vp->ioaddr;
1da177e4
LT
2205 spin_lock(&vp->lock);
2206
62afe595 2207 status = ioread16(ioaddr + EL3_STATUS);
1da177e4
LT
2208
2209 if (vortex_debug > 6)
39738e16 2210 pr_debug("vortex_interrupt(). status=0x%4x\n", status);
1da177e4
LT
2211
2212 if ((status & IntLatch) == 0)
2213 goto handler_exit; /* No interrupt: shared IRQs cause this */
2214 handled = 1;
2215
2216 if (status & IntReq) {
2217 status |= vp->deferred;
2218 vp->deferred = 0;
2219 }
2220
2221 if (status == 0xffff) /* h/w no longer present (hotplug)? */
2222 goto handler_exit;
2223
2224 if (vortex_debug > 4)
39738e16 2225 pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
62afe595 2226 dev->name, status, ioread8(ioaddr + Timer));
1da177e4 2227
a095cfc4
BH
2228 window_set(vp, 7);
2229
1da177e4
LT
2230 do {
2231 if (vortex_debug > 5)
39738e16 2232 pr_debug("%s: In interrupt loop, status %4.4x.\n",
1da177e4
LT
2233 dev->name, status);
2234 if (status & RxComplete)
2235 vortex_rx(dev);
2236
2237 if (status & TxAvailable) {
2238 if (vortex_debug > 5)
39738e16 2239 pr_debug(" TX room bit was handled.\n");
1da177e4 2240 /* There's room in the FIFO for a full-sized packet. */
62afe595 2241 iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
1da177e4
LT
2242 netif_wake_queue (dev);
2243 }
2244
2245 if (status & DMADone) {
62afe595
JL
2246 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2247 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
1da177e4
LT
2248 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2249 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
62afe595 2250 if (ioread16(ioaddr + TxFree) > 1536) {
1da177e4
LT
2251 /*
2252 * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
2253 * insufficient FIFO room, the TxAvailable test will succeed and call
2254 * netif_wake_queue()
2255 */
2256 netif_wake_queue(dev);
2257 } else { /* Interrupt when FIFO has room for max-sized packet. */
62afe595 2258 iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
1da177e4
LT
2259 netif_stop_queue(dev);
2260 }
2261 }
2262 }
2263 /* Check for all uncommon interrupts at once. */
2264 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2265 if (status == 0xffff)
2266 break;
2267 vortex_error(dev, status);
2268 }
2269
2270 if (--work_done < 0) {
39738e16
AB
2271 pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
2272 dev->name, status);
1da177e4
LT
2273 /* Disable all pending interrupts. */
2274 do {
2275 vp->deferred |= status;
62afe595 2276 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
1da177e4 2277 ioaddr + EL3_CMD);
62afe595
JL
2278 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2279 } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
1da177e4
LT
2280 /* The timer will reenable interrupts. */
2281 mod_timer(&vp->timer, jiffies + 1*HZ);
2282 break;
2283 }
2284 /* Acknowledge the IRQ. */
62afe595
JL
2285 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2286 } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
1da177e4
LT
2287
2288 if (vortex_debug > 4)
39738e16 2289 pr_debug("%s: exiting interrupt, status %4.4x.\n",
1da177e4
LT
2290 dev->name, status);
2291handler_exit:
2292 spin_unlock(&vp->lock);
2293 return IRQ_RETVAL(handled);
2294}
2295
2296/*
2297 * This is the ISR for the boomerang series chips.
2298 * full_bus_master_tx == 1 && full_bus_master_rx == 1
2299 */
2300
2301static irqreturn_t
7d12e780 2302boomerang_interrupt(int irq, void *dev_id)
1da177e4
LT
2303{
2304 struct net_device *dev = dev_id;
2305 struct vortex_private *vp = netdev_priv(dev);
62afe595 2306 void __iomem *ioaddr;
1da177e4
LT
2307 int status;
2308 int work_done = max_interrupt_work;
2309
62afe595 2310 ioaddr = vp->ioaddr;
1da177e4
LT
2311
2312 /*
2313 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2314 * and boomerang_start_xmit
2315 */
2316 spin_lock(&vp->lock);
2317
62afe595 2318 status = ioread16(ioaddr + EL3_STATUS);
1da177e4
LT
2319
2320 if (vortex_debug > 6)
39738e16 2321 pr_debug("boomerang_interrupt. status=0x%4x\n", status);
1da177e4
LT
2322
2323 if ((status & IntLatch) == 0)
2324 goto handler_exit; /* No interrupt: shared IRQs can cause this */
2325
2326 if (status == 0xffff) { /* h/w no longer present (hotplug)? */
2327 if (vortex_debug > 1)
39738e16 2328 pr_debug("boomerang_interrupt(1): status = 0xffff\n");
1da177e4
LT
2329 goto handler_exit;
2330 }
2331
2332 if (status & IntReq) {
2333 status |= vp->deferred;
2334 vp->deferred = 0;
2335 }
2336
2337 if (vortex_debug > 4)
39738e16 2338 pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
62afe595 2339 dev->name, status, ioread8(ioaddr + Timer));
1da177e4
LT
2340 do {
2341 if (vortex_debug > 5)
39738e16 2342 pr_debug("%s: In interrupt loop, status %4.4x.\n",
1da177e4
LT
2343 dev->name, status);
2344 if (status & UpComplete) {
62afe595 2345 iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
1da177e4 2346 if (vortex_debug > 5)
39738e16 2347 pr_debug("boomerang_interrupt->boomerang_rx\n");
1da177e4
LT
2348 boomerang_rx(dev);
2349 }
2350
2351 if (status & DownComplete) {
2352 unsigned int dirty_tx = vp->dirty_tx;
2353
62afe595 2354 iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
1da177e4
LT
2355 while (vp->cur_tx - dirty_tx > 0) {
2356 int entry = dirty_tx % TX_RING_SIZE;
2357#if 1 /* AKPM: the latter is faster, but cyclone-only */
62afe595 2358 if (ioread32(ioaddr + DownListPtr) ==
1da177e4
LT
2359 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2360 break; /* It still hasn't been processed. */
2361#else
2362 if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2363 break; /* It still hasn't been processed. */
2364#endif
6aa20a22 2365
1da177e4
LT
2366 if (vp->tx_skbuff[entry]) {
2367 struct sk_buff *skb = vp->tx_skbuff[entry];
6aa20a22 2368#if DO_ZEROCOPY
1da177e4
LT
2369 int i;
2370 for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
2371 pci_unmap_single(VORTEX_PCI(vp),
2372 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2373 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2374 PCI_DMA_TODEVICE);
2375#else
2376 pci_unmap_single(VORTEX_PCI(vp),
2377 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2378#endif
2379 dev_kfree_skb_irq(skb);
2380 vp->tx_skbuff[entry] = NULL;
2381 } else {
39738e16 2382 pr_debug("boomerang_interrupt: no skb!\n");
1da177e4 2383 }
1daad055 2384 /* dev->stats.tx_packets++; Counted below. */
1da177e4
LT
2385 dirty_tx++;
2386 }
2387 vp->dirty_tx = dirty_tx;
2388 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2389 if (vortex_debug > 6)
39738e16 2390 pr_debug("boomerang_interrupt: wake queue\n");
1da177e4
LT
2391 netif_wake_queue (dev);
2392 }
2393 }
2394
2395 /* Check for all uncommon interrupts at once. */
2396 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2397 vortex_error(dev, status);
2398
2399 if (--work_done < 0) {
39738e16
AB
2400 pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
2401 dev->name, status);
1da177e4
LT
2402 /* Disable all pending interrupts. */
2403 do {
2404 vp->deferred |= status;
62afe595 2405 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
1da177e4 2406 ioaddr + EL3_CMD);
62afe595
JL
2407 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2408 } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
1da177e4
LT
2409 /* The timer will reenable interrupts. */
2410 mod_timer(&vp->timer, jiffies + 1*HZ);
2411 break;
2412 }
2413 /* Acknowledge the IRQ. */
62afe595 2414 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
1da177e4 2415 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
62afe595 2416 iowrite32(0x8000, vp->cb_fn_base + 4);
1da177e4 2417
62afe595 2418 } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
1da177e4
LT
2419
2420 if (vortex_debug > 4)
39738e16 2421 pr_debug("%s: exiting interrupt, status %4.4x.\n",
1da177e4
LT
2422 dev->name, status);
2423handler_exit:
2424 spin_unlock(&vp->lock);
2425 return IRQ_HANDLED;
2426}
2427
2428static int vortex_rx(struct net_device *dev)
2429{
2430 struct vortex_private *vp = netdev_priv(dev);
62afe595 2431 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2432 int i;
2433 short rx_status;
2434
2435 if (vortex_debug > 5)
39738e16 2436 pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
62afe595
JL
2437 ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
2438 while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
1da177e4 2439 if (rx_status & 0x4000) { /* Error, update stats. */
62afe595 2440 unsigned char rx_error = ioread8(ioaddr + RxErrors);
1da177e4 2441 if (vortex_debug > 2)
39738e16 2442 pr_debug(" Rx error: status %2.2x.\n", rx_error);
1daad055
PZ
2443 dev->stats.rx_errors++;
2444 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2445 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2446 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2447 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2448 if (rx_error & 0x10) dev->stats.rx_length_errors++;
1da177e4
LT
2449 } else {
2450 /* The packet length: up to 4.5K!. */
2451 int pkt_len = rx_status & 0x1fff;
2452 struct sk_buff *skb;
2453
2454 skb = dev_alloc_skb(pkt_len + 5);
2455 if (vortex_debug > 4)
39738e16 2456 pr_debug("Receiving packet size %d status %4.4x.\n",
1da177e4
LT
2457 pkt_len, rx_status);
2458 if (skb != NULL) {
1da177e4
LT
2459 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2460 /* 'skb_put()' points to the start of sk_buff data area. */
2461 if (vp->bus_master &&
62afe595 2462 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
1da177e4
LT
2463 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
2464 pkt_len, PCI_DMA_FROMDEVICE);
62afe595
JL
2465 iowrite32(dma, ioaddr + Wn7_MasterAddr);
2466 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2467 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2468 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
1da177e4
LT
2469 ;
2470 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
2471 } else {
62afe595
JL
2472 ioread32_rep(ioaddr + RX_FIFO,
2473 skb_put(skb, pkt_len),
2474 (pkt_len + 3) >> 2);
1da177e4 2475 }
62afe595 2476 iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
1da177e4
LT
2477 skb->protocol = eth_type_trans(skb, dev);
2478 netif_rx(skb);
1daad055 2479 dev->stats.rx_packets++;
1da177e4
LT
2480 /* Wait a limited time to go to next packet. */
2481 for (i = 200; i >= 0; i--)
62afe595 2482 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1da177e4
LT
2483 break;
2484 continue;
2485 } else if (vortex_debug > 0)
39738e16
AB
2486 pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
2487 dev->name, pkt_len);
1daad055 2488 dev->stats.rx_dropped++;
1da177e4 2489 }
1da177e4
LT
2490 issue_and_wait(dev, RxDiscard);
2491 }
2492
2493 return 0;
2494}
2495
2496static int
2497boomerang_rx(struct net_device *dev)
2498{
2499 struct vortex_private *vp = netdev_priv(dev);
2500 int entry = vp->cur_rx % RX_RING_SIZE;
62afe595 2501 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2502 int rx_status;
2503 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2504
2505 if (vortex_debug > 5)
39738e16 2506 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
1da177e4
LT
2507
2508 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2509 if (--rx_work_limit < 0)
2510 break;
2511 if (rx_status & RxDError) { /* Error, update stats. */
2512 unsigned char rx_error = rx_status >> 16;
2513 if (vortex_debug > 2)
39738e16 2514 pr_debug(" Rx error: status %2.2x.\n", rx_error);
1daad055
PZ
2515 dev->stats.rx_errors++;
2516 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2517 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2518 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2519 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2520 if (rx_error & 0x10) dev->stats.rx_length_errors++;
1da177e4
LT
2521 } else {
2522 /* The packet length: up to 4.5K!. */
2523 int pkt_len = rx_status & 0x1fff;
2524 struct sk_buff *skb;
2525 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2526
2527 if (vortex_debug > 4)
39738e16 2528 pr_debug("Receiving packet size %d status %4.4x.\n",
1da177e4
LT
2529 pkt_len, rx_status);
2530
2531 /* Check if the packet is long enough to just accept without
2532 copying to a properly sized skbuff. */
cc2d6596 2533 if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
2534 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2535 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2536 /* 'skb_put()' points to the start of sk_buff data area. */
2537 memcpy(skb_put(skb, pkt_len),
689be439 2538 vp->rx_skbuff[entry]->data,
1da177e4
LT
2539 pkt_len);
2540 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2541 vp->rx_copy++;
2542 } else {
2543 /* Pass up the skbuff already on the Rx ring. */
2544 skb = vp->rx_skbuff[entry];
2545 vp->rx_skbuff[entry] = NULL;
2546 skb_put(skb, pkt_len);
2547 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2548 vp->rx_nocopy++;
2549 }
2550 skb->protocol = eth_type_trans(skb, dev);
2551 { /* Use hardware checksum info. */
2552 int csum_bits = rx_status & 0xee000000;
2553 if (csum_bits &&
2554 (csum_bits == (IPChksumValid | TCPChksumValid) ||
2555 csum_bits == (IPChksumValid | UDPChksumValid))) {
2556 skb->ip_summed = CHECKSUM_UNNECESSARY;
2557 vp->rx_csumhits++;
2558 }
2559 }
2560 netif_rx(skb);
1daad055 2561 dev->stats.rx_packets++;
1da177e4
LT
2562 }
2563 entry = (++vp->cur_rx) % RX_RING_SIZE;
2564 }
2565 /* Refill the Rx ring buffers. */
2566 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2567 struct sk_buff *skb;
2568 entry = vp->dirty_rx % RX_RING_SIZE;
2569 if (vp->rx_skbuff[entry] == NULL) {
89d71a66 2570 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
1da177e4
LT
2571 if (skb == NULL) {
2572 static unsigned long last_jif;
ff5688ae 2573 if (time_after(jiffies, last_jif + 10 * HZ)) {
39738e16 2574 pr_warning("%s: memory shortage\n", dev->name);
1da177e4
LT
2575 last_jif = jiffies;
2576 }
2577 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2578 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2579 break; /* Bad news! */
2580 }
9a5d3414 2581
689be439 2582 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1da177e4
LT
2583 vp->rx_skbuff[entry] = skb;
2584 }
2585 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
62afe595 2586 iowrite16(UpUnstall, ioaddr + EL3_CMD);
1da177e4
LT
2587 }
2588 return 0;
2589}
2590
2591/*
2592 * If we've hit a total OOM refilling the Rx ring we poll once a second
2593 * for some memory. Otherwise there is no way to restart the rx process.
2594 */
2595static void
2596rx_oom_timer(unsigned long arg)
2597{
2598 struct net_device *dev = (struct net_device *)arg;
2599 struct vortex_private *vp = netdev_priv(dev);
2600
2601 spin_lock_irq(&vp->lock);
2602 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2603 boomerang_rx(dev);
2604 if (vortex_debug > 1) {
39738e16 2605 pr_debug("%s: rx_oom_timer %s\n", dev->name,
1da177e4
LT
2606 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2607 }
2608 spin_unlock_irq(&vp->lock);
2609}
2610
2611static void
2612vortex_down(struct net_device *dev, int final_down)
2613{
2614 struct vortex_private *vp = netdev_priv(dev);
62afe595 2615 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2616
2617 netif_stop_queue (dev);
2618
2619 del_timer_sync(&vp->rx_oom_timer);
2620 del_timer_sync(&vp->timer);
2621
1daad055 2622 /* Turn off statistics ASAP. We update dev->stats below. */
62afe595 2623 iowrite16(StatsDisable, ioaddr + EL3_CMD);
1da177e4
LT
2624
2625 /* Disable the receiver and transmitter. */
62afe595
JL
2626 iowrite16(RxDisable, ioaddr + EL3_CMD);
2627 iowrite16(TxDisable, ioaddr + EL3_CMD);
1da177e4
LT
2628
2629 /* Disable receiving 802.1q tagged frames */
2630 set_8021q_mode(dev, 0);
2631
2632 if (dev->if_port == XCVR_10base2)
2633 /* Turn off thinnet power. Green! */
62afe595 2634 iowrite16(StopCoax, ioaddr + EL3_CMD);
1da177e4 2635
62afe595 2636 iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
1da177e4
LT
2637
2638 update_stats(ioaddr, dev);
2639 if (vp->full_bus_master_rx)
62afe595 2640 iowrite32(0, ioaddr + UpListPtr);
1da177e4 2641 if (vp->full_bus_master_tx)
62afe595 2642 iowrite32(0, ioaddr + DownListPtr);
1da177e4
LT
2643
2644 if (final_down && VORTEX_PCI(vp)) {
3c8fad18 2645 vp->pm_state_valid = 1;
1da177e4
LT
2646 pci_save_state(VORTEX_PCI(vp));
2647 acpi_set_WOL(dev);
2648 }
2649}
2650
2651static int
2652vortex_close(struct net_device *dev)
2653{
2654 struct vortex_private *vp = netdev_priv(dev);
62afe595 2655 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2656 int i;
2657
2658 if (netif_device_present(dev))
2659 vortex_down(dev, 1);
2660
2661 if (vortex_debug > 1) {
39738e16 2662 pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
62afe595 2663 dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
39738e16 2664 pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
1da177e4
LT
2665 " tx_queued %d Rx pre-checksummed %d.\n",
2666 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2667 }
2668
2669#if DO_ZEROCOPY
32fb5f06
JL
2670 if (vp->rx_csumhits &&
2671 (vp->drv_flags & HAS_HWCKSM) == 0 &&
2672 (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
39738e16 2673 pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
1da177e4
LT
2674 }
2675#endif
6aa20a22 2676
1da177e4
LT
2677 free_irq(dev->irq, dev);
2678
2679 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2680 for (i = 0; i < RX_RING_SIZE; i++)
2681 if (vp->rx_skbuff[i]) {
2682 pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
2683 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2684 dev_kfree_skb(vp->rx_skbuff[i]);
2685 vp->rx_skbuff[i] = NULL;
2686 }
2687 }
2688 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2689 for (i = 0; i < TX_RING_SIZE; i++) {
2690 if (vp->tx_skbuff[i]) {
2691 struct sk_buff *skb = vp->tx_skbuff[i];
2692#if DO_ZEROCOPY
2693 int k;
2694
2695 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2696 pci_unmap_single(VORTEX_PCI(vp),
2697 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2698 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2699 PCI_DMA_TODEVICE);
2700#else
2701 pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2702#endif
2703 dev_kfree_skb(skb);
2704 vp->tx_skbuff[i] = NULL;
2705 }
2706 }
2707 }
2708
2709 return 0;
2710}
2711
2712static void
2713dump_tx_ring(struct net_device *dev)
2714{
2715 if (vortex_debug > 0) {
2716 struct vortex_private *vp = netdev_priv(dev);
62afe595 2717 void __iomem *ioaddr = vp->ioaddr;
6aa20a22 2718
1da177e4
LT
2719 if (vp->full_bus_master_tx) {
2720 int i;
62afe595 2721 int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
1da177e4 2722
39738e16 2723 pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
1da177e4
LT
2724 vp->full_bus_master_tx,
2725 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2726 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
39738e16 2727 pr_err(" Transmit list %8.8x vs. %p.\n",
62afe595 2728 ioread32(ioaddr + DownListPtr),
1da177e4
LT
2729 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2730 issue_and_wait(dev, DownStall);
2731 for (i = 0; i < TX_RING_SIZE; i++) {
0cb13536
JD
2732 unsigned int length;
2733
1da177e4 2734#if DO_ZEROCOPY
0cb13536 2735 length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
1da177e4 2736#else
0cb13536 2737 length = le32_to_cpu(vp->tx_ring[i].length);
1da177e4 2738#endif
0cb13536
JD
2739 pr_err(" %d: @%p length %8.8x status %8.8x\n",
2740 i, &vp->tx_ring[i], length,
1da177e4
LT
2741 le32_to_cpu(vp->tx_ring[i].status));
2742 }
2743 if (!stalled)
62afe595 2744 iowrite16(DownUnstall, ioaddr + EL3_CMD);
1da177e4
LT
2745 }
2746 }
2747}
2748
2749static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2750{
2751 struct vortex_private *vp = netdev_priv(dev);
62afe595 2752 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2753 unsigned long flags;
2754
2755 if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
2756 spin_lock_irqsave (&vp->lock, flags);
62afe595 2757 update_stats(ioaddr, dev);
1da177e4
LT
2758 spin_unlock_irqrestore (&vp->lock, flags);
2759 }
1daad055 2760 return &dev->stats;
1da177e4
LT
2761}
2762
2763/* Update statistics.
2764 Unlike with the EL3 we need not worry about interrupts changing
2765 the window setting from underneath us, but we must still guard
2766 against a race condition with a StatsUpdate interrupt updating the
2767 table. This is done by checking that the ASM (!) code generated uses
2768 atomic updates with '+='.
2769 */
62afe595 2770static void update_stats(void __iomem *ioaddr, struct net_device *dev)
1da177e4
LT
2771{
2772 struct vortex_private *vp = netdev_priv(dev);
1da177e4 2773
1da177e4
LT
2774 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2775 /* Switch to the stats window, and read everything. */
a095cfc4
BH
2776 dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
2777 dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
2778 dev->stats.tx_window_errors += window_read8(vp, 6, 4);
2779 dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
2780 dev->stats.tx_packets += window_read8(vp, 6, 6);
2781 dev->stats.tx_packets += (window_read8(vp, 6, 9) &
2782 0x30) << 4;
2783 /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
1da177e4
LT
2784 /* Don't bother with register 9, an extension of registers 6&7.
2785 If we do use the 6&7 values the atomic update assumption above
2786 is invalid. */
a095cfc4
BH
2787 dev->stats.rx_bytes += window_read16(vp, 6, 10);
2788 dev->stats.tx_bytes += window_read16(vp, 6, 12);
1da177e4 2789 /* Extra stats for get_ethtool_stats() */
a095cfc4
BH
2790 vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
2791 vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
2792 vp->xstats.tx_deferred += window_read8(vp, 6, 8);
2793 vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
1da177e4 2794
1daad055 2795 dev->stats.collisions = vp->xstats.tx_multiple_collisions
8d1d0340
SK
2796 + vp->xstats.tx_single_collisions
2797 + vp->xstats.tx_max_collisions;
2798
1da177e4 2799 {
a095cfc4 2800 u8 up = window_read8(vp, 4, 13);
1daad055
PZ
2801 dev->stats.rx_bytes += (up & 0x0f) << 16;
2802 dev->stats.tx_bytes += (up & 0xf0) << 12;
1da177e4 2803 }
1da177e4
LT
2804}
2805
2806static int vortex_nway_reset(struct net_device *dev)
2807{
2808 struct vortex_private *vp = netdev_priv(dev);
1da177e4
LT
2809 unsigned long flags;
2810 int rc;
2811
2812 spin_lock_irqsave(&vp->lock, flags);
1da177e4
LT
2813 rc = mii_nway_restart(&vp->mii);
2814 spin_unlock_irqrestore(&vp->lock, flags);
2815 return rc;
2816}
2817
1da177e4
LT
2818static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2819{
2820 struct vortex_private *vp = netdev_priv(dev);
1da177e4
LT
2821 unsigned long flags;
2822 int rc;
2823
2824 spin_lock_irqsave(&vp->lock, flags);
1da177e4
LT
2825 rc = mii_ethtool_gset(&vp->mii, cmd);
2826 spin_unlock_irqrestore(&vp->lock, flags);
2827 return rc;
2828}
2829
2830static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2831{
2832 struct vortex_private *vp = netdev_priv(dev);
1da177e4
LT
2833 unsigned long flags;
2834 int rc;
2835
2836 spin_lock_irqsave(&vp->lock, flags);
1da177e4
LT
2837 rc = mii_ethtool_sset(&vp->mii, cmd);
2838 spin_unlock_irqrestore(&vp->lock, flags);
2839 return rc;
2840}
2841
2842static u32 vortex_get_msglevel(struct net_device *dev)
2843{
2844 return vortex_debug;
2845}
2846
2847static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
2848{
2849 vortex_debug = dbg;
2850}
2851
b9f2c044 2852static int vortex_get_sset_count(struct net_device *dev, int sset)
1da177e4 2853{
b9f2c044
JG
2854 switch (sset) {
2855 case ETH_SS_STATS:
2856 return VORTEX_NUM_STATS;
2857 default:
2858 return -EOPNOTSUPP;
2859 }
1da177e4
LT
2860}
2861
2862static void vortex_get_ethtool_stats(struct net_device *dev,
2863 struct ethtool_stats *stats, u64 *data)
2864{
2865 struct vortex_private *vp = netdev_priv(dev);
62afe595 2866 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2867 unsigned long flags;
2868
2869 spin_lock_irqsave(&vp->lock, flags);
62afe595 2870 update_stats(ioaddr, dev);
1da177e4
LT
2871 spin_unlock_irqrestore(&vp->lock, flags);
2872
2873 data[0] = vp->xstats.tx_deferred;
8d1d0340
SK
2874 data[1] = vp->xstats.tx_max_collisions;
2875 data[2] = vp->xstats.tx_multiple_collisions;
2876 data[3] = vp->xstats.tx_single_collisions;
2877 data[4] = vp->xstats.rx_bad_ssd;
1da177e4
LT
2878}
2879
2880
2881static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2882{
2883 switch (stringset) {
2884 case ETH_SS_STATS:
2885 memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
2886 break;
2887 default:
2888 WARN_ON(1);
2889 break;
2890 }
2891}
2892
2893static void vortex_get_drvinfo(struct net_device *dev,
2894 struct ethtool_drvinfo *info)
2895{
2896 struct vortex_private *vp = netdev_priv(dev);
2897
2898 strcpy(info->driver, DRV_NAME);
1da177e4
LT
2899 if (VORTEX_PCI(vp)) {
2900 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
2901 } else {
2902 if (VORTEX_EISA(vp))
86de79b6 2903 strcpy(info->bus_info, dev_name(vp->gendev));
1da177e4
LT
2904 else
2905 sprintf(info->bus_info, "EISA 0x%lx %d",
2906 dev->base_addr, dev->irq);
2907 }
2908}
2909
7282d491 2910static const struct ethtool_ops vortex_ethtool_ops = {
1da177e4
LT
2911 .get_drvinfo = vortex_get_drvinfo,
2912 .get_strings = vortex_get_strings,
2913 .get_msglevel = vortex_get_msglevel,
2914 .set_msglevel = vortex_set_msglevel,
2915 .get_ethtool_stats = vortex_get_ethtool_stats,
b9f2c044 2916 .get_sset_count = vortex_get_sset_count,
1da177e4
LT
2917 .get_settings = vortex_get_settings,
2918 .set_settings = vortex_set_settings,
373a6887 2919 .get_link = ethtool_op_get_link,
1da177e4
LT
2920 .nway_reset = vortex_nway_reset,
2921};
2922
2923#ifdef CONFIG_PCI
2924/*
2925 * Must power the device up to do MDIO operations
2926 */
2927static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2928{
2929 int err;
2930 struct vortex_private *vp = netdev_priv(dev);
1da177e4 2931 unsigned long flags;
cc2d6596 2932 pci_power_t state = 0;
1da177e4
LT
2933
2934 if(VORTEX_PCI(vp))
2935 state = VORTEX_PCI(vp)->current_state;
2936
2937 /* The kernel core really should have pci_get_power_state() */
2938
2939 if(state != 0)
2940 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2941 spin_lock_irqsave(&vp->lock, flags);
1da177e4
LT
2942 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2943 spin_unlock_irqrestore(&vp->lock, flags);
2944 if(state != 0)
2945 pci_set_power_state(VORTEX_PCI(vp), state);
2946
2947 return err;
2948}
2949#endif
2950
2951
2952/* Pre-Cyclone chips have no documented multicast filter, so the only
2953 multicast setting is to receive all multicast frames. At least
2954 the chip has a very clean way to set the mode, unlike many others. */
2955static void set_rx_mode(struct net_device *dev)
2956{
62afe595
JL
2957 struct vortex_private *vp = netdev_priv(dev);
2958 void __iomem *ioaddr = vp->ioaddr;
1da177e4
LT
2959 int new_mode;
2960
2961 if (dev->flags & IFF_PROMISC) {
d5b20697 2962 if (vortex_debug > 3)
39738e16 2963 pr_notice("%s: Setting promiscuous mode.\n", dev->name);
1da177e4 2964 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
59ce25d9 2965 } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
1da177e4
LT
2966 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
2967 } else
2968 new_mode = SetRxFilter | RxStation | RxBroadcast;
2969
62afe595 2970 iowrite16(new_mode, ioaddr + EL3_CMD);
1da177e4
LT
2971}
2972
2973#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
2974/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
2975 Note that this must be done after each RxReset due to some backwards
2976 compatibility logic in the Cyclone and Tornado ASICs */
2977
2978/* The Ethernet Type used for 802.1q tagged frames */
2979#define VLAN_ETHER_TYPE 0x8100
2980
2981static void set_8021q_mode(struct net_device *dev, int enable)
2982{
2983 struct vortex_private *vp = netdev_priv(dev);
1da177e4
LT
2984 int mac_ctrl;
2985
2986 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
2987 /* cyclone and tornado chipsets can recognize 802.1q
2988 * tagged frames and treat them correctly */
2989
2990 int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */
2991 if (enable)
2992 max_pkt_size += 4; /* 802.1Q VLAN tag */
2993
a095cfc4 2994 window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
1da177e4
LT
2995
2996 /* set VlanEtherType to let the hardware checksumming
2997 treat tagged frames correctly */
a095cfc4 2998 window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
1da177e4
LT
2999 } else {
3000 /* on older cards we have to enable large frames */
3001
3002 vp->large_frames = dev->mtu > 1500 || enable;
3003
a095cfc4 3004 mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
1da177e4
LT
3005 if (vp->large_frames)
3006 mac_ctrl |= 0x40;
3007 else
3008 mac_ctrl &= ~0x40;
a095cfc4 3009 window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
1da177e4 3010 }
1da177e4
LT
3011}
3012#else
3013
3014static void set_8021q_mode(struct net_device *dev, int enable)
3015{
3016}
3017
3018
3019#endif
3020
3021/* MII transceiver control section.
3022 Read and write the MII registers using software-generated serial
3023 MDIO protocol. See the MII specifications or DP83840A data sheet
3024 for details. */
3025
3026/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
3027 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3028 "overclocking" issues. */
a095cfc4
BH
3029static void mdio_delay(struct vortex_private *vp)
3030{
3031 window_read32(vp, 4, Wn4_PhysicalMgmt);
3032}
1da177e4
LT
3033
3034#define MDIO_SHIFT_CLK 0x01
3035#define MDIO_DIR_WRITE 0x04
3036#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
3037#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
3038#define MDIO_DATA_READ 0x02
3039#define MDIO_ENB_IN 0x00
3040
3041/* Generate the preamble required for initial synchronization and
3042 a few older transceivers. */
a095cfc4 3043static void mdio_sync(struct vortex_private *vp, int bits)
1da177e4 3044{
1da177e4
LT
3045 /* Establish sync by sending at least 32 logic ones. */
3046 while (-- bits >= 0) {
a095cfc4
BH
3047 window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3048 mdio_delay(vp);
3049 window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3050 4, Wn4_PhysicalMgmt);
3051 mdio_delay(vp);
1da177e4
LT
3052 }
3053}
3054
3055static int mdio_read(struct net_device *dev, int phy_id, int location)
3056{
3057 int i;
62afe595 3058 struct vortex_private *vp = netdev_priv(dev);
1da177e4
LT
3059 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3060 unsigned int retval = 0;
1da177e4
LT
3061
3062 if (mii_preamble_required)
a095cfc4 3063 mdio_sync(vp, 32);
1da177e4
LT
3064
3065 /* Shift the read command bits out. */
3066 for (i = 14; i >= 0; i--) {
3067 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
a095cfc4
BH
3068 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3069 mdio_delay(vp);
3070 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3071 4, Wn4_PhysicalMgmt);
3072 mdio_delay(vp);
1da177e4
LT
3073 }
3074 /* Read the two transition, 16 data, and wire-idle bits. */
3075 for (i = 19; i > 0; i--) {
a095cfc4
BH
3076 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3077 mdio_delay(vp);
3078 retval = (retval << 1) |
3079 ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3080 MDIO_DATA_READ) ? 1 : 0);
3081 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3082 4, Wn4_PhysicalMgmt);
3083 mdio_delay(vp);
1da177e4
LT
3084 }
3085 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
3086}
3087
3088static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3089{
62afe595 3090 struct vortex_private *vp = netdev_priv(dev);
1da177e4 3091 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
1da177e4
LT
3092 int i;
3093
3094 if (mii_preamble_required)
a095cfc4 3095 mdio_sync(vp, 32);
1da177e4
LT
3096
3097 /* Shift the command bits out. */
3098 for (i = 31; i >= 0; i--) {
3099 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
a095cfc4
BH
3100 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3101 mdio_delay(vp);
3102 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3103 4, Wn4_PhysicalMgmt);
3104 mdio_delay(vp);
1da177e4
LT
3105 }
3106 /* Leave the interface idle. */
3107 for (i = 1; i >= 0; i--) {
a095cfc4
BH
3108 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3109 mdio_delay(vp);
3110 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3111 4, Wn4_PhysicalMgmt);
3112 mdio_delay(vp);
1da177e4 3113 }
1da177e4 3114}
a880c4cd 3115
1da177e4
LT
3116/* ACPI: Advanced Configuration and Power Interface. */
3117/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
3118static void acpi_set_WOL(struct net_device *dev)
3119{
3120 struct vortex_private *vp = netdev_priv(dev);
62afe595 3121 void __iomem *ioaddr = vp->ioaddr;
1da177e4 3122
c17931c5
SK
3123 device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3124
1da177e4
LT
3125 if (vp->enable_wol) {
3126 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
a095cfc4 3127 window_write16(vp, 2, 7, 0x0c);
1da177e4 3128 /* The RxFilter must accept the WOL frames. */
62afe595
JL
3129 iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3130 iowrite16(RxEnable, ioaddr + EL3_CMD);
1da177e4 3131
1a1769f3 3132 if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
39738e16 3133 pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
1a1769f3
SK
3134
3135 vp->enable_wol = 0;
3136 return;
3137 }
3c8fad18
DR
3138
3139 /* Change the power state to D3; RxEnable doesn't take effect. */
3140 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
1da177e4 3141 }
1da177e4
LT
3142}
3143
3144
a880c4cd 3145static void __devexit vortex_remove_one(struct pci_dev *pdev)
1da177e4
LT
3146{
3147 struct net_device *dev = pci_get_drvdata(pdev);
3148 struct vortex_private *vp;
3149
3150 if (!dev) {
39738e16 3151 pr_err("vortex_remove_one called for Compaq device!\n");
1da177e4
LT
3152 BUG();
3153 }
3154
3155 vp = netdev_priv(dev);
3156
62afe595
JL
3157 if (vp->cb_fn_base)
3158 pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
3159
1da177e4
LT
3160 unregister_netdev(dev);
3161
3162 if (VORTEX_PCI(vp)) {
3163 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
3164 if (vp->pm_state_valid)
3165 pci_restore_state(VORTEX_PCI(vp));
3166 pci_disable_device(VORTEX_PCI(vp));
3167 }
3168 /* Should really use issue_and_wait() here */
62afe595
JL
3169 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3170 vp->ioaddr + EL3_CMD);
3171
3172 pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
1da177e4
LT
3173
3174 pci_free_consistent(pdev,
3175 sizeof(struct boom_rx_desc) * RX_RING_SIZE
3176 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3177 vp->rx_ring,
3178 vp->rx_ring_dma);
3179 if (vp->must_free_region)
3180 release_region(dev->base_addr, vp->io_size);
3181 free_netdev(dev);
3182}
3183
3184
3185static struct pci_driver vortex_driver = {
3186 .name = "3c59x",
3187 .probe = vortex_init_one,
3188 .remove = __devexit_p(vortex_remove_one),
3189 .id_table = vortex_pci_tbl,
7bfc4ab5 3190 .driver.pm = VORTEX_PM_OPS,
1da177e4
LT
3191};
3192
3193
3194static int vortex_have_pci;
3195static int vortex_have_eisa;
3196
3197
a880c4cd 3198static int __init vortex_init(void)
1da177e4
LT
3199{
3200 int pci_rc, eisa_rc;
3201
29917620 3202 pci_rc = pci_register_driver(&vortex_driver);
1da177e4
LT
3203 eisa_rc = vortex_eisa_init();
3204
3205 if (pci_rc == 0)
3206 vortex_have_pci = 1;
3207 if (eisa_rc > 0)
3208 vortex_have_eisa = 1;
3209
3210 return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
3211}
3212
3213
a880c4cd 3214static void __exit vortex_eisa_cleanup(void)
1da177e4
LT
3215{
3216 struct vortex_private *vp;
62afe595 3217 void __iomem *ioaddr;
1da177e4
LT
3218
3219#ifdef CONFIG_EISA
3220 /* Take care of the EISA devices */
a880c4cd 3221 eisa_driver_unregister(&vortex_eisa_driver);
1da177e4 3222#endif
6aa20a22 3223
1da177e4 3224 if (compaq_net_device) {
454d7c9b 3225 vp = netdev_priv(compaq_net_device);
62afe595
JL
3226 ioaddr = ioport_map(compaq_net_device->base_addr,
3227 VORTEX_TOTAL_SIZE);
1da177e4 3228
a880c4cd
SK
3229 unregister_netdev(compaq_net_device);
3230 iowrite16(TotalReset, ioaddr + EL3_CMD);
62afe595
JL
3231 release_region(compaq_net_device->base_addr,
3232 VORTEX_TOTAL_SIZE);
1da177e4 3233
a880c4cd 3234 free_netdev(compaq_net_device);
1da177e4
LT
3235 }
3236}
3237
3238
a880c4cd 3239static void __exit vortex_cleanup(void)
1da177e4
LT
3240{
3241 if (vortex_have_pci)
a880c4cd 3242 pci_unregister_driver(&vortex_driver);
1da177e4 3243 if (vortex_have_eisa)
a880c4cd 3244 vortex_eisa_cleanup();
1da177e4
LT
3245}
3246
3247
3248module_init(vortex_init);
3249module_exit(vortex_cleanup);