]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/typhoon.c
net: trans_start cleanups
[net-next-2.6.git] / drivers / net / typhoon.c
CommitLineData
1da177e4
LT
1/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2/*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
18
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
22
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
25
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46*/
47
48/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
50 */
51static int rx_copybreak = 200;
52
53/* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
57 */
58static unsigned int use_mmio = 2;
59
60/* end user-configurable values */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 */
64static const int multicast_filter_limit = 32;
65
66/* Operational parameters that are set at compile time. */
67
68/* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
73 *
74 * We don't currently use the Hi Tx ring so, don't make it very big.
75 *
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78 */
79#define TXHI_ENTRIES 2
80#define TXLO_ENTRIES 128
81#define RX_ENTRIES 32
82#define COMMAND_ENTRIES 16
83#define RESPONSE_ENTRIES 32
84
85#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88/* The 3XP will preload and remove 64 entries from the free buffer
6aa20a22 89 * list, and we need one entry to keep the ring from wrapping, so
1da177e4
LT
90 * to keep this a power of two, we use 128 entries.
91 */
92#define RXFREE_ENTRIES 128
93#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
94
95/* Operational parameters that usually are not changed. */
96
97/* Time in jiffies before concluding the transmitter is hung. */
98#define TX_TIMEOUT (2*HZ)
99
100#define PKT_BUF_SZ 1536
a8c9a53c 101#define FIRMWARE_NAME "3com/typhoon.bin"
1da177e4 102
0bc88e4a
JP
103#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
104
1da177e4
LT
105#include <linux/module.h>
106#include <linux/kernel.h>
d43c36dc 107#include <linux/sched.h>
1da177e4
LT
108#include <linux/string.h>
109#include <linux/timer.h>
110#include <linux/errno.h>
111#include <linux/ioport.h>
1da177e4
LT
112#include <linux/interrupt.h>
113#include <linux/pci.h>
114#include <linux/netdevice.h>
115#include <linux/etherdevice.h>
116#include <linux/skbuff.h>
d7fe0f24 117#include <linux/mm.h>
1da177e4
LT
118#include <linux/init.h>
119#include <linux/delay.h>
120#include <linux/ethtool.h>
121#include <linux/if_vlan.h>
122#include <linux/crc32.h>
123#include <linux/bitops.h>
124#include <asm/processor.h>
125#include <asm/io.h>
126#include <asm/uaccess.h>
127#include <linux/in6.h>
1da177e4 128#include <linux/dma-mapping.h>
b775a750 129#include <linux/firmware.h>
0bc88e4a 130#include <generated/utsrelease.h>
1da177e4
LT
131
132#include "typhoon.h"
1da177e4 133
1da177e4 134MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
0bc88e4a 135MODULE_VERSION(UTS_RELEASE);
1da177e4 136MODULE_LICENSE("GPL");
b775a750 137MODULE_FIRMWARE(FIRMWARE_NAME);
1da177e4
LT
138MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
139MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
140 "the buffer given back to the NIC. Default "
141 "is 200.");
142MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
143 "Default is to try MMIO and fallback to PIO.");
144module_param(rx_copybreak, int, 0);
145module_param(use_mmio, int, 0);
146
147#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
148#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
149#undef NETIF_F_TSO
150#endif
151
152#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
153#error TX ring too small!
154#endif
155
156struct typhoon_card_info {
0bc88e4a
JP
157 const char *name;
158 const int capabilities;
1da177e4
LT
159};
160
161#define TYPHOON_CRYPTO_NONE 0x00
162#define TYPHOON_CRYPTO_DES 0x01
163#define TYPHOON_CRYPTO_3DES 0x02
164#define TYPHOON_CRYPTO_VARIABLE 0x04
165#define TYPHOON_FIBER 0x08
166#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
167
168enum typhoon_cards {
169 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
170 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
171 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
172 TYPHOON_FXM,
173};
174
175/* directly indexed by enum typhoon_cards, above */
952b3494 176static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
1da177e4
LT
177 { "3Com Typhoon (3C990-TX)",
178 TYPHOON_CRYPTO_NONE},
179 { "3Com Typhoon (3CR990-TX-95)",
180 TYPHOON_CRYPTO_DES},
181 { "3Com Typhoon (3CR990-TX-97)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183 { "3Com Typhoon (3C990SVR)",
184 TYPHOON_CRYPTO_NONE},
185 { "3Com Typhoon (3CR990SVR95)",
186 TYPHOON_CRYPTO_DES},
187 { "3Com Typhoon (3CR990SVR97)",
188 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189 { "3Com Typhoon2 (3C990B-TX-M)",
190 TYPHOON_CRYPTO_VARIABLE},
191 { "3Com Typhoon2 (3C990BSVR)",
192 TYPHOON_CRYPTO_VARIABLE},
193 { "3Com Typhoon (3CR990-FX-95)",
194 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
195 { "3Com Typhoon (3CR990-FX-97)",
196 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
197 { "3Com Typhoon (3CR990-FX-95 Server)",
198 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
199 { "3Com Typhoon (3CR990-FX-97 Server)",
200 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
201 { "3Com Typhoon2 (3C990B-FX-97)",
202 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
203};
204
205/* Notes on the new subsystem numbering scheme:
7f927fcc 206 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
1da177e4
LT
207 * bit 4 indicates if this card has secured firmware (we don't support it)
208 * bit 8 indicates if this is a (0) copper or (1) fiber card
209 * bits 12-16 indicate card type: (0) client and (1) server
210 */
a3aa1884 211static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
1da177e4
LT
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
219 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
220 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
221 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
222 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
223 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
224 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
226 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
227 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
228 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
229 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
230 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
232 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
234 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
236 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
238 { 0, }
239};
240MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
241
242/* Define the shared memory area
243 * Align everything the 3XP will normally be using.
244 * We'll need to move/align txHi if we start using that ring.
245 */
246#define __3xp_aligned ____cacheline_aligned
247struct typhoon_shared {
248 struct typhoon_interface iface;
249 struct typhoon_indexes indexes __3xp_aligned;
250 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
251 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
252 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
253 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
254 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
255 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
256 u32 zeroWord;
257 struct tx_desc txHi[TXHI_ENTRIES];
258} __attribute__ ((packed));
259
260struct rxbuff_ent {
261 struct sk_buff *skb;
262 dma_addr_t dma_addr;
263};
264
265struct typhoon {
266 /* Tx cache line section */
6aa20a22 267 struct transmit_ring txLoRing ____cacheline_aligned;
1da177e4
LT
268 struct pci_dev * tx_pdev;
269 void __iomem *tx_ioaddr;
270 u32 txlo_dma_addr;
271
272 /* Irq/Rx cache line section */
273 void __iomem *ioaddr ____cacheline_aligned;
274 struct typhoon_indexes *indexes;
275 u8 awaiting_resp;
276 u8 duplex;
277 u8 speed;
278 u8 card_state;
279 struct basic_ring rxLoRing;
280 struct pci_dev * pdev;
281 struct net_device * dev;
bea3348e 282 struct napi_struct napi;
1da177e4
LT
283 spinlock_t state_lock;
284 struct vlan_group * vlgrp;
285 struct basic_ring rxHiRing;
286 struct basic_ring rxBuffRing;
287 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
288
289 /* general section */
290 spinlock_t command_lock ____cacheline_aligned;
291 struct basic_ring cmdRing;
292 struct basic_ring respRing;
293 struct net_device_stats stats;
294 struct net_device_stats stats_saved;
1da177e4
LT
295 struct typhoon_shared * shared;
296 dma_addr_t shared_dma;
03a710ff
AV
297 __le16 xcvr_select;
298 __le16 wol_events;
299 __le32 offload;
1da177e4
LT
300
301 /* unused stuff (future use) */
302 int capabilities;
303 struct transmit_ring txHiRing;
304};
305
306enum completion_wait_values {
307 NoWait = 0, WaitNoSleep, WaitSleep,
308};
309
310/* These are the values for the typhoon.card_state variable.
311 * These determine where the statistics will come from in get_stats().
312 * The sleep image does not support the statistics we need.
313 */
314enum state_values {
315 Sleeping = 0, Running,
316};
317
318/* PCI writes are not guaranteed to be posted in order, but outstanding writes
319 * cannot pass a read, so this forces current writes to post.
320 */
321#define typhoon_post_pci_writes(x) \
322 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
323
324/* We'll wait up to six seconds for a reset, and half a second normally.
325 */
326#define TYPHOON_UDELAY 50
327#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
328#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
329#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
330
1da177e4 331#if defined(NETIF_F_TSO)
7967168c 332#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
1da177e4
LT
333#define TSO_NUM_DESCRIPTORS 2
334#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
335#else
336#define NETIF_F_TSO 0
337#define skb_tso_size(x) 0
338#define TSO_NUM_DESCRIPTORS 0
339#define TSO_OFFLOAD_ON 0
340#endif
341
342static inline void
343typhoon_inc_index(u32 *index, const int count, const int num_entries)
344{
345 /* Increment a ring index -- we can use this for all rings execept
346 * the Rx rings, as they use different size descriptors
347 * otherwise, everything is the same size as a cmd_desc
348 */
349 *index += count * sizeof(struct cmd_desc);
350 *index %= num_entries * sizeof(struct cmd_desc);
351}
352
353static inline void
354typhoon_inc_cmd_index(u32 *index, const int count)
355{
356 typhoon_inc_index(index, count, COMMAND_ENTRIES);
357}
358
359static inline void
360typhoon_inc_resp_index(u32 *index, const int count)
361{
362 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
363}
364
365static inline void
366typhoon_inc_rxfree_index(u32 *index, const int count)
367{
368 typhoon_inc_index(index, count, RXFREE_ENTRIES);
369}
370
371static inline void
372typhoon_inc_tx_index(u32 *index, const int count)
373{
374 /* if we start using the Hi Tx ring, this needs updateing */
375 typhoon_inc_index(index, count, TXLO_ENTRIES);
376}
377
378static inline void
379typhoon_inc_rx_index(u32 *index, const int count)
380{
381 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
382 *index += count * sizeof(struct rx_desc);
383 *index %= RX_ENTRIES * sizeof(struct rx_desc);
384}
385
386static int
387typhoon_reset(void __iomem *ioaddr, int wait_type)
388{
389 int i, err = 0;
390 int timeout;
391
392 if(wait_type == WaitNoSleep)
393 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
394 else
395 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
396
397 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
399
400 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
401 typhoon_post_pci_writes(ioaddr);
402 udelay(1);
403 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
404
405 if(wait_type != NoWait) {
406 for(i = 0; i < timeout; i++) {
407 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
408 TYPHOON_STATUS_WAITING_FOR_HOST)
409 goto out;
410
3173c890
NA
411 if(wait_type == WaitSleep)
412 schedule_timeout_uninterruptible(1);
413 else
1da177e4
LT
414 udelay(TYPHOON_UDELAY);
415 }
416
417 err = -ETIMEDOUT;
418 }
419
420out:
421 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
423
424 /* The 3XP seems to need a little extra time to complete the load
425 * of the sleep image before we can reliably boot it. Failure to
426 * do this occasionally results in a hung adapter after boot in
427 * typhoon_init_one() while trying to read the MAC address or
428 * putting the card to sleep. 3Com's driver waits 5ms, but
429 * that seems to be overkill. However, if we can sleep, we might
430 * as well give it that much time. Otherwise, we'll give it 500us,
431 * which should be enough (I've see it work well at 100us, but still
432 * saw occasional problems.)
433 */
434 if(wait_type == WaitSleep)
435 msleep(5);
436 else
437 udelay(500);
438 return err;
439}
440
441static int
442typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
443{
444 int i, err = 0;
445
446 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
447 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
448 goto out;
449 udelay(TYPHOON_UDELAY);
450 }
451
452 err = -ETIMEDOUT;
453
454out:
455 return err;
456}
457
458static inline void
459typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
460{
461 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
462 netif_carrier_off(dev);
463 else
464 netif_carrier_on(dev);
465}
466
467static inline void
468typhoon_hello(struct typhoon *tp)
469{
470 struct basic_ring *ring = &tp->cmdRing;
471 struct cmd_desc *cmd;
472
473 /* We only get a hello request if we've not sent anything to the
474 * card in a long while. If the lock is held, then we're in the
475 * process of issuing a command, so we don't need to respond.
476 */
477 if(spin_trylock(&tp->command_lock)) {
478 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
479 typhoon_inc_cmd_index(&ring->lastWrite, 1);
480
481 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
5fe88eae 482 wmb();
1da177e4
LT
483 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
484 spin_unlock(&tp->command_lock);
485 }
486}
487
488static int
489typhoon_process_response(struct typhoon *tp, int resp_size,
490 struct resp_desc *resp_save)
491{
492 struct typhoon_indexes *indexes = tp->indexes;
493 struct resp_desc *resp;
494 u8 *base = tp->respRing.ringBase;
495 int count, len, wrap_len;
496 u32 cleared;
497 u32 ready;
498
499 cleared = le32_to_cpu(indexes->respCleared);
500 ready = le32_to_cpu(indexes->respReady);
501 while(cleared != ready) {
502 resp = (struct resp_desc *)(base + cleared);
503 count = resp->numDesc + 1;
504 if(resp_save && resp->seqNo) {
505 if(count > resp_size) {
506 resp_save->flags = TYPHOON_RESP_ERROR;
507 goto cleanup;
508 }
509
510 wrap_len = 0;
511 len = count * sizeof(*resp);
512 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
513 wrap_len = cleared + len - RESPONSE_RING_SIZE;
514 len = RESPONSE_RING_SIZE - cleared;
515 }
516
517 memcpy(resp_save, resp, len);
518 if(unlikely(wrap_len)) {
519 resp_save += len / sizeof(*resp);
520 memcpy(resp_save, base, wrap_len);
521 }
522
523 resp_save = NULL;
524 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
525 typhoon_media_status(tp->dev, resp);
526 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
527 typhoon_hello(tp);
528 } else {
0bc88e4a
JP
529 netdev_err(tp->dev,
530 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
531 le16_to_cpu(resp->cmd),
532 resp->numDesc, resp->flags,
533 le16_to_cpu(resp->parm1),
534 le32_to_cpu(resp->parm2),
535 le32_to_cpu(resp->parm3));
1da177e4
LT
536 }
537
538cleanup:
539 typhoon_inc_resp_index(&cleared, count);
540 }
541
542 indexes->respCleared = cpu_to_le32(cleared);
543 wmb();
544 return (resp_save == NULL);
545}
546
547static inline int
548typhoon_num_free(int lastWrite, int lastRead, int ringSize)
549{
550 /* this works for all descriptors but rx_desc, as they are a
551 * different size than the cmd_desc -- everyone else is the same
552 */
553 lastWrite /= sizeof(struct cmd_desc);
554 lastRead /= sizeof(struct cmd_desc);
555 return (ringSize + lastRead - lastWrite - 1) % ringSize;
556}
557
558static inline int
559typhoon_num_free_cmd(struct typhoon *tp)
560{
561 int lastWrite = tp->cmdRing.lastWrite;
562 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
563
564 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
565}
566
567static inline int
568typhoon_num_free_resp(struct typhoon *tp)
569{
570 int respReady = le32_to_cpu(tp->indexes->respReady);
571 int respCleared = le32_to_cpu(tp->indexes->respCleared);
572
573 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
574}
575
576static inline int
577typhoon_num_free_tx(struct transmit_ring *ring)
578{
579 /* if we start using the Hi Tx ring, this needs updating */
580 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
581}
582
583static int
584typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
585 int num_resp, struct resp_desc *resp)
586{
587 struct typhoon_indexes *indexes = tp->indexes;
588 struct basic_ring *ring = &tp->cmdRing;
589 struct resp_desc local_resp;
590 int i, err = 0;
591 int got_resp;
592 int freeCmd, freeResp;
593 int len, wrap_len;
594
595 spin_lock(&tp->command_lock);
596
597 freeCmd = typhoon_num_free_cmd(tp);
598 freeResp = typhoon_num_free_resp(tp);
599
600 if(freeCmd < num_cmd || freeResp < num_resp) {
0bc88e4a
JP
601 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
602 freeCmd, num_cmd, freeResp, num_resp);
1da177e4
LT
603 err = -ENOMEM;
604 goto out;
605 }
606
607 if(cmd->flags & TYPHOON_CMD_RESPOND) {
608 /* If we're expecting a response, but the caller hasn't given
609 * us a place to put it, we'll provide one.
610 */
611 tp->awaiting_resp = 1;
612 if(resp == NULL) {
613 resp = &local_resp;
614 num_resp = 1;
615 }
616 }
617
618 wrap_len = 0;
619 len = num_cmd * sizeof(*cmd);
620 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
621 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
622 len = COMMAND_RING_SIZE - ring->lastWrite;
623 }
624
625 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
626 if(unlikely(wrap_len)) {
627 struct cmd_desc *wrap_ptr = cmd;
628 wrap_ptr += len / sizeof(*cmd);
629 memcpy(ring->ringBase, wrap_ptr, wrap_len);
630 }
631
632 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
633
59c51591 634 /* "I feel a presence... another warrior is on the mesa."
1da177e4
LT
635 */
636 wmb();
637 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
638 typhoon_post_pci_writes(tp->ioaddr);
639
640 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
641 goto out;
642
643 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
644 * preempt or do anything other than take interrupts. So, don't
645 * wait for a response unless you have to.
646 *
647 * I've thought about trying to sleep here, but we're called
648 * from many contexts that don't allow that. Also, given the way
649 * 3Com has implemented irq coalescing, we would likely timeout --
650 * this has been observed in real life!
651 *
652 * The big killer is we have to wait to get stats from the card,
653 * though we could go to a periodic refresh of those if we don't
654 * mind them getting somewhat stale. The rest of the waiting
655 * commands occur during open/close/suspend/resume, so they aren't
656 * time critical. Creating SAs in the future will also have to
657 * wait here.
658 */
659 got_resp = 0;
660 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
661 if(indexes->respCleared != indexes->respReady)
662 got_resp = typhoon_process_response(tp, num_resp,
663 resp);
664 udelay(TYPHOON_UDELAY);
665 }
666
667 if(!got_resp) {
668 err = -ETIMEDOUT;
669 goto out;
670 }
671
672 /* Collect the error response even if we don't care about the
673 * rest of the response
674 */
675 if(resp->flags & TYPHOON_RESP_ERROR)
676 err = -EIO;
677
678out:
679 if(tp->awaiting_resp) {
680 tp->awaiting_resp = 0;
681 smp_wmb();
682
683 /* Ugh. If a response was added to the ring between
684 * the call to typhoon_process_response() and the clearing
685 * of tp->awaiting_resp, we could have missed the interrupt
686 * and it could hang in the ring an indeterminate amount of
687 * time. So, check for it, and interrupt ourselves if this
688 * is the case.
689 */
690 if(indexes->respCleared != indexes->respReady)
691 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
692 }
693
694 spin_unlock(&tp->command_lock);
695 return err;
696}
697
698static void
699typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
700{
701 struct typhoon *tp = netdev_priv(dev);
702 struct cmd_desc xp_cmd;
703 int err;
704
705 spin_lock_bh(&tp->state_lock);
706 if(!tp->vlgrp != !grp) {
707 /* We've either been turned on for the first time, or we've
708 * been turned off. Update the 3XP.
709 */
710 if(grp)
711 tp->offload |= TYPHOON_OFFLOAD_VLAN;
712 else
713 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
714
715 /* If the interface is up, the runtime is running -- and we
716 * must be up for the vlan core to call us.
717 *
718 * Do the command outside of the spin lock, as it is slow.
719 */
720 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
721 TYPHOON_CMD_SET_OFFLOAD_TASKS);
722 xp_cmd.parm2 = tp->offload;
723 xp_cmd.parm3 = tp->offload;
724 spin_unlock_bh(&tp->state_lock);
725 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
726 if(err < 0)
0bc88e4a 727 netdev_err(tp->dev, "vlan offload error %d\n", -err);
1da177e4
LT
728 spin_lock_bh(&tp->state_lock);
729 }
730
731 /* now make the change visible */
732 tp->vlgrp = grp;
733 spin_unlock_bh(&tp->state_lock);
734}
735
1da177e4
LT
736static inline void
737typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738 u32 ring_dma)
739{
740 struct tcpopt_desc *tcpd;
741 u32 tcpd_offset = ring_dma;
742
743 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744 tcpd_offset += txRing->lastWrite;
745 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746 typhoon_inc_tx_index(&txRing->lastWrite, 1);
747
748 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
749 tcpd->numDesc = 1;
750 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753 tcpd->bytesTx = cpu_to_le32(skb->len);
754 tcpd->status = 0;
755}
756
61357325 757static netdev_tx_t
1da177e4
LT
758typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
759{
760 struct typhoon *tp = netdev_priv(dev);
761 struct transmit_ring *txRing;
762 struct tx_desc *txd, *first_txd;
763 dma_addr_t skb_dma;
764 int numDesc;
765
766 /* we have two rings to choose from, but we only use txLo for now
767 * If we start using the Hi ring as well, we'll need to update
768 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
7f927fcc 769 * and TXHI_ENTRIES to match, as well as update the TSO code below
1da177e4
LT
770 * to get the right DMA address
771 */
772 txRing = &tp->txLoRing;
773
774 /* We need one descriptor for each fragment of the sk_buff, plus the
775 * one for the ->data area of it.
776 *
777 * The docs say a maximum of 16 fragment descriptors per TCP option
778 * descriptor, then make a new packet descriptor and option descriptor
779 * for the next 16 fragments. The engineers say just an option
780 * descriptor is needed. I've tested up to 26 fragments with a single
781 * packet descriptor/option descriptor combo, so I use that for now.
782 *
783 * If problems develop with TSO, check this first.
784 */
785 numDesc = skb_shinfo(skb)->nr_frags + 1;
89114afd 786 if (skb_is_gso(skb))
1da177e4
LT
787 numDesc++;
788
789 /* When checking for free space in the ring, we need to also
790 * account for the initial Tx descriptor, and we always must leave
791 * at least one descriptor unused in the ring so that it doesn't
792 * wrap and look empty.
793 *
794 * The only time we should loop here is when we hit the race
795 * between marking the queue awake and updating the cleared index.
796 * Just loop and it will appear. This comes from the acenic driver.
797 */
798 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
799 smp_rmb();
800
801 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802 typhoon_inc_tx_index(&txRing->lastWrite, 1);
803
804 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805 first_txd->numDesc = 0;
806 first_txd->len = 0;
71f1bb1a 807 first_txd->tx_addr = (u64)((unsigned long) skb);
1da177e4
LT
808 first_txd->processFlags = 0;
809
84fa7933 810 if(skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
811 /* The 3XP will figure out if this is UDP/TCP */
812 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
813 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
815 }
816
817 if(vlan_tx_tag_present(skb)) {
818 first_txd->processFlags |=
819 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
820 first_txd->processFlags |=
03a710ff 821 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
1da177e4
LT
822 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
823 }
824
89114afd 825 if (skb_is_gso(skb)) {
1da177e4
LT
826 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
827 first_txd->numDesc++;
828
829 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
830 }
831
832 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
833 typhoon_inc_tx_index(&txRing->lastWrite, 1);
834
835 /* No need to worry about padding packet -- the firmware pads
836 * it with zeros to ETH_ZLEN for us.
837 */
838 if(skb_shinfo(skb)->nr_frags == 0) {
839 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
840 PCI_DMA_TODEVICE);
841 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
842 txd->len = cpu_to_le16(skb->len);
71f1bb1a
AV
843 txd->frag.addr = cpu_to_le32(skb_dma);
844 txd->frag.addrHi = 0;
1da177e4
LT
845 first_txd->numDesc++;
846 } else {
847 int i, len;
848
849 len = skb_headlen(skb);
850 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
851 PCI_DMA_TODEVICE);
852 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
853 txd->len = cpu_to_le16(len);
71f1bb1a
AV
854 txd->frag.addr = cpu_to_le32(skb_dma);
855 txd->frag.addrHi = 0;
1da177e4
LT
856 first_txd->numDesc++;
857
858 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
859 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
860 void *frag_addr;
861
862 txd = (struct tx_desc *) (txRing->ringBase +
863 txRing->lastWrite);
864 typhoon_inc_tx_index(&txRing->lastWrite, 1);
865
866 len = frag->size;
867 frag_addr = (void *) page_address(frag->page) +
868 frag->page_offset;
869 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
870 PCI_DMA_TODEVICE);
871 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
872 txd->len = cpu_to_le16(len);
71f1bb1a
AV
873 txd->frag.addr = cpu_to_le32(skb_dma);
874 txd->frag.addrHi = 0;
1da177e4
LT
875 first_txd->numDesc++;
876 }
877 }
878
879 /* Kick the 3XP
880 */
881 wmb();
882 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
883
1da177e4
LT
884 /* If we don't have room to put the worst case packet on the
885 * queue, then we must stop the queue. We need 2 extra
886 * descriptors -- one to prevent ring wrap, and one for the
887 * Tx header.
888 */
889 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
890
891 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
892 netif_stop_queue(dev);
893
894 /* A Tx complete IRQ could have gotten inbetween, making
895 * the ring free again. Only need to recheck here, since
896 * Tx is serialized.
897 */
898 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
899 netif_wake_queue(dev);
900 }
901
6ed10654 902 return NETDEV_TX_OK;
1da177e4
LT
903}
904
905static void
906typhoon_set_rx_mode(struct net_device *dev)
907{
908 struct typhoon *tp = netdev_priv(dev);
909 struct cmd_desc xp_cmd;
910 u32 mc_filter[2];
03a710ff 911 __le16 filter;
1da177e4
LT
912
913 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
914 if(dev->flags & IFF_PROMISC) {
1da177e4 915 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
4cd24eaf 916 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1da177e4
LT
917 (dev->flags & IFF_ALLMULTI)) {
918 /* Too many to match, or accept all multicasts. */
919 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
4cd24eaf 920 } else if (!netdev_mc_empty(dev)) {
22bedad3 921 struct netdev_hw_addr *ha;
1da177e4
LT
922
923 memset(mc_filter, 0, sizeof(mc_filter));
22bedad3
JP
924 netdev_for_each_mc_addr(ha, dev) {
925 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
1da177e4
LT
926 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
927 }
928
929 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
930 TYPHOON_CMD_SET_MULTICAST_HASH);
931 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
932 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
933 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
934 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
935
936 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
937 }
938
939 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
940 xp_cmd.parm1 = filter;
941 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942}
943
944static int
945typhoon_do_get_stats(struct typhoon *tp)
946{
947 struct net_device_stats *stats = &tp->stats;
948 struct net_device_stats *saved = &tp->stats_saved;
949 struct cmd_desc xp_cmd;
950 struct resp_desc xp_resp[7];
951 struct stats_resp *s = (struct stats_resp *) xp_resp;
952 int err;
953
954 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
955 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
956 if(err < 0)
957 return err;
958
959 /* 3Com's Linux driver uses txMultipleCollisions as it's
960 * collisions value, but there is some other collision info as well...
961 *
962 * The extra status reported would be a good candidate for
963 * ethtool_ops->get_{strings,stats}()
964 */
965 stats->tx_packets = le32_to_cpu(s->txPackets);
73eac064 966 stats->tx_bytes = le64_to_cpu(s->txBytes);
1da177e4
LT
967 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
968 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
969 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
970 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
73eac064 971 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
1da177e4
LT
972 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
973 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
974 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
975 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
976 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
977 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
978 SPEED_100 : SPEED_10;
979 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
980 DUPLEX_FULL : DUPLEX_HALF;
981
982 /* add in the saved statistics
983 */
984 stats->tx_packets += saved->tx_packets;
985 stats->tx_bytes += saved->tx_bytes;
986 stats->tx_errors += saved->tx_errors;
987 stats->collisions += saved->collisions;
988 stats->rx_packets += saved->rx_packets;
989 stats->rx_bytes += saved->rx_bytes;
990 stats->rx_fifo_errors += saved->rx_fifo_errors;
991 stats->rx_errors += saved->rx_errors;
992 stats->rx_crc_errors += saved->rx_crc_errors;
993 stats->rx_length_errors += saved->rx_length_errors;
994
995 return 0;
996}
997
998static struct net_device_stats *
999typhoon_get_stats(struct net_device *dev)
1000{
1001 struct typhoon *tp = netdev_priv(dev);
1002 struct net_device_stats *stats = &tp->stats;
1003 struct net_device_stats *saved = &tp->stats_saved;
1004
1005 smp_rmb();
1006 if(tp->card_state == Sleeping)
1007 return saved;
1008
1009 if(typhoon_do_get_stats(tp) < 0) {
0bc88e4a 1010 netdev_err(dev, "error getting stats\n");
1da177e4
LT
1011 return saved;
1012 }
1013
1014 return stats;
1015}
1016
1017static int
1018typhoon_set_mac_address(struct net_device *dev, void *addr)
1019{
1020 struct sockaddr *saddr = (struct sockaddr *) addr;
1021
1022 if(netif_running(dev))
1023 return -EBUSY;
1024
1025 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1026 return 0;
1027}
1028
1029static void
1030typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1031{
1032 struct typhoon *tp = netdev_priv(dev);
1033 struct pci_dev *pci_dev = tp->pdev;
1034 struct cmd_desc xp_cmd;
1035 struct resp_desc xp_resp[3];
1036
1037 smp_rmb();
1038 if(tp->card_state == Sleeping) {
1039 strcpy(info->fw_version, "Sleep image");
1040 } else {
1041 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1042 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1043 strcpy(info->fw_version, "Unknown runtime");
1044 } else {
fdcfd77c 1045 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1da177e4 1046 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
6aa20a22 1047 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1da177e4
LT
1048 sleep_ver & 0xfff);
1049 }
1050 }
1051
0bc88e4a
JP
1052 strcpy(info->driver, KBUILD_MODNAME);
1053 strcpy(info->version, UTS_RELEASE);
1da177e4
LT
1054 strcpy(info->bus_info, pci_name(pci_dev));
1055}
1056
1057static int
1058typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1059{
1060 struct typhoon *tp = netdev_priv(dev);
1061
1062 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1063 SUPPORTED_Autoneg;
1064
1065 switch (tp->xcvr_select) {
1066 case TYPHOON_XCVR_10HALF:
1067 cmd->advertising = ADVERTISED_10baseT_Half;
1068 break;
1069 case TYPHOON_XCVR_10FULL:
1070 cmd->advertising = ADVERTISED_10baseT_Full;
1071 break;
1072 case TYPHOON_XCVR_100HALF:
1073 cmd->advertising = ADVERTISED_100baseT_Half;
1074 break;
1075 case TYPHOON_XCVR_100FULL:
1076 cmd->advertising = ADVERTISED_100baseT_Full;
1077 break;
1078 case TYPHOON_XCVR_AUTONEG:
1079 cmd->advertising = ADVERTISED_10baseT_Half |
1080 ADVERTISED_10baseT_Full |
1081 ADVERTISED_100baseT_Half |
1082 ADVERTISED_100baseT_Full |
1083 ADVERTISED_Autoneg;
1084 break;
1085 }
1086
1087 if(tp->capabilities & TYPHOON_FIBER) {
1088 cmd->supported |= SUPPORTED_FIBRE;
1089 cmd->advertising |= ADVERTISED_FIBRE;
1090 cmd->port = PORT_FIBRE;
1091 } else {
1092 cmd->supported |= SUPPORTED_10baseT_Half |
1093 SUPPORTED_10baseT_Full |
1094 SUPPORTED_TP;
1095 cmd->advertising |= ADVERTISED_TP;
1096 cmd->port = PORT_TP;
1097 }
1098
1099 /* need to get stats to make these link speed/duplex valid */
1100 typhoon_do_get_stats(tp);
1101 cmd->speed = tp->speed;
1102 cmd->duplex = tp->duplex;
1103 cmd->phy_address = 0;
1104 cmd->transceiver = XCVR_INTERNAL;
1105 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1106 cmd->autoneg = AUTONEG_ENABLE;
1107 else
1108 cmd->autoneg = AUTONEG_DISABLE;
1109 cmd->maxtxpkt = 1;
1110 cmd->maxrxpkt = 1;
1111
1112 return 0;
1113}
1114
1115static int
1116typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1117{
1118 struct typhoon *tp = netdev_priv(dev);
1119 struct cmd_desc xp_cmd;
03a710ff 1120 __le16 xcvr;
1da177e4
LT
1121 int err;
1122
1123 err = -EINVAL;
1124 if(cmd->autoneg == AUTONEG_ENABLE) {
1125 xcvr = TYPHOON_XCVR_AUTONEG;
1126 } else {
1127 if(cmd->duplex == DUPLEX_HALF) {
1128 if(cmd->speed == SPEED_10)
1129 xcvr = TYPHOON_XCVR_10HALF;
1130 else if(cmd->speed == SPEED_100)
1131 xcvr = TYPHOON_XCVR_100HALF;
1132 else
1133 goto out;
1134 } else if(cmd->duplex == DUPLEX_FULL) {
1135 if(cmd->speed == SPEED_10)
1136 xcvr = TYPHOON_XCVR_10FULL;
1137 else if(cmd->speed == SPEED_100)
1138 xcvr = TYPHOON_XCVR_100FULL;
1139 else
1140 goto out;
1141 } else
1142 goto out;
1143 }
1144
1145 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
b46281f9 1146 xp_cmd.parm1 = xcvr;
1da177e4
LT
1147 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1148 if(err < 0)
1149 goto out;
1150
1151 tp->xcvr_select = xcvr;
1152 if(cmd->autoneg == AUTONEG_ENABLE) {
1153 tp->speed = 0xff; /* invalid */
1154 tp->duplex = 0xff; /* invalid */
1155 } else {
1156 tp->speed = cmd->speed;
1157 tp->duplex = cmd->duplex;
1158 }
1159
1160out:
1161 return err;
1162}
1163
1164static void
1165typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1166{
1167 struct typhoon *tp = netdev_priv(dev);
1168
1169 wol->supported = WAKE_PHY | WAKE_MAGIC;
1170 wol->wolopts = 0;
1171 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1172 wol->wolopts |= WAKE_PHY;
1173 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1174 wol->wolopts |= WAKE_MAGIC;
1175 memset(&wol->sopass, 0, sizeof(wol->sopass));
1176}
1177
1178static int
1179typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1180{
1181 struct typhoon *tp = netdev_priv(dev);
1182
1183 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1184 return -EINVAL;
1185
1186 tp->wol_events = 0;
1187 if(wol->wolopts & WAKE_PHY)
1188 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1189 if(wol->wolopts & WAKE_MAGIC)
1190 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1191
1192 return 0;
1193}
1194
1195static u32
1196typhoon_get_rx_csum(struct net_device *dev)
1197{
1198 /* For now, we don't allow turning off RX checksums.
1199 */
1200 return 1;
1201}
1202
1203static void
1204typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1205{
1206 ering->rx_max_pending = RXENT_ENTRIES;
1207 ering->rx_mini_max_pending = 0;
1208 ering->rx_jumbo_max_pending = 0;
1209 ering->tx_max_pending = TXLO_ENTRIES - 1;
1210
1211 ering->rx_pending = RXENT_ENTRIES;
1212 ering->rx_mini_pending = 0;
1213 ering->rx_jumbo_pending = 0;
1214 ering->tx_pending = TXLO_ENTRIES - 1;
1215}
1216
7282d491 1217static const struct ethtool_ops typhoon_ethtool_ops = {
1da177e4
LT
1218 .get_settings = typhoon_get_settings,
1219 .set_settings = typhoon_set_settings,
1220 .get_drvinfo = typhoon_get_drvinfo,
1221 .get_wol = typhoon_get_wol,
1222 .set_wol = typhoon_set_wol,
1223 .get_link = ethtool_op_get_link,
1224 .get_rx_csum = typhoon_get_rx_csum,
1da177e4 1225 .set_tx_csum = ethtool_op_set_tx_csum,
1da177e4 1226 .set_sg = ethtool_op_set_sg,
1da177e4
LT
1227 .set_tso = ethtool_op_set_tso,
1228 .get_ringparam = typhoon_get_ringparam,
1229};
1230
1231static int
1232typhoon_wait_interrupt(void __iomem *ioaddr)
1233{
1234 int i, err = 0;
1235
1236 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1237 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1238 TYPHOON_INTR_BOOTCMD)
1239 goto out;
1240 udelay(TYPHOON_UDELAY);
1241 }
1242
1243 err = -ETIMEDOUT;
1244
1245out:
1246 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1247 return err;
1248}
1249
1250#define shared_offset(x) offsetof(struct typhoon_shared, x)
1251
1252static void
1253typhoon_init_interface(struct typhoon *tp)
1254{
1255 struct typhoon_interface *iface = &tp->shared->iface;
1256 dma_addr_t shared_dma;
1257
1258 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1259
1260 /* The *Hi members of iface are all init'd to zero by the memset().
1261 */
1262 shared_dma = tp->shared_dma + shared_offset(indexes);
1263 iface->ringIndex = cpu_to_le32(shared_dma);
1264
1265 shared_dma = tp->shared_dma + shared_offset(txLo);
1266 iface->txLoAddr = cpu_to_le32(shared_dma);
1267 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1268
1269 shared_dma = tp->shared_dma + shared_offset(txHi);
1270 iface->txHiAddr = cpu_to_le32(shared_dma);
1271 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1272
1273 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1274 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1275 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1276 sizeof(struct rx_free));
1277
1278 shared_dma = tp->shared_dma + shared_offset(rxLo);
1279 iface->rxLoAddr = cpu_to_le32(shared_dma);
1280 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1281
1282 shared_dma = tp->shared_dma + shared_offset(rxHi);
1283 iface->rxHiAddr = cpu_to_le32(shared_dma);
1284 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1285
1286 shared_dma = tp->shared_dma + shared_offset(cmd);
1287 iface->cmdAddr = cpu_to_le32(shared_dma);
1288 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1289
1290 shared_dma = tp->shared_dma + shared_offset(resp);
1291 iface->respAddr = cpu_to_le32(shared_dma);
1292 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1293
1294 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1295 iface->zeroAddr = cpu_to_le32(shared_dma);
1296
1297 tp->indexes = &tp->shared->indexes;
1298 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1299 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1300 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1301 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1302 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1303 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1304 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1305
1306 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1307 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1308
8cc085c7 1309 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1da177e4 1310 tp->card_state = Sleeping;
1da177e4
LT
1311
1312 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1313 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1314
1315 spin_lock_init(&tp->command_lock);
1316 spin_lock_init(&tp->state_lock);
5fe88eae
DD
1317
1318 /* Force the writes to the shared memory area out before continuing. */
1319 wmb();
1da177e4
LT
1320}
1321
1322static void
1323typhoon_init_rings(struct typhoon *tp)
1324{
1325 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1326
1327 tp->txLoRing.lastWrite = 0;
1328 tp->txHiRing.lastWrite = 0;
1329 tp->rxLoRing.lastWrite = 0;
1330 tp->rxHiRing.lastWrite = 0;
1331 tp->rxBuffRing.lastWrite = 0;
1332 tp->cmdRing.lastWrite = 0;
1333 tp->cmdRing.lastWrite = 0;
1334
1335 tp->txLoRing.lastRead = 0;
1336 tp->txHiRing.lastRead = 0;
1337}
1338
b775a750
BH
1339static const struct firmware *typhoon_fw;
1340
1341static int
1342typhoon_request_firmware(struct typhoon *tp)
1343{
a8c9a53c
DD
1344 const struct typhoon_file_header *fHdr;
1345 const struct typhoon_section_header *sHdr;
1346 const u8 *image_data;
1347 u32 numSections;
1348 u32 section_len;
1349 u32 remaining;
b775a750
BH
1350 int err;
1351
1352 if (typhoon_fw)
1353 return 0;
1354
1355 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1356 if (err) {
0bc88e4a
JP
1357 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1358 FIRMWARE_NAME);
b775a750
BH
1359 return err;
1360 }
1361
a8c9a53c
DD
1362 image_data = (u8 *) typhoon_fw->data;
1363 remaining = typhoon_fw->size;
1364 if (remaining < sizeof(struct typhoon_file_header))
1365 goto invalid_fw;
d517c4a1 1366
a8c9a53c
DD
1367 fHdr = (struct typhoon_file_header *) image_data;
1368 if (memcmp(fHdr->tag, "TYPHOON", 8))
1369 goto invalid_fw;
1370
1371 numSections = le32_to_cpu(fHdr->numSections);
1372 image_data += sizeof(struct typhoon_file_header);
1373 remaining -= sizeof(struct typhoon_file_header);
1374
1375 while (numSections--) {
1376 if (remaining < sizeof(struct typhoon_section_header))
1377 goto invalid_fw;
1378
1379 sHdr = (struct typhoon_section_header *) image_data;
1380 image_data += sizeof(struct typhoon_section_header);
1381 section_len = le32_to_cpu(sHdr->len);
1382
1383 if (remaining < section_len)
1384 goto invalid_fw;
1385
1386 image_data += section_len;
1387 remaining -= section_len;
b775a750
BH
1388 }
1389
1390 return 0;
d517c4a1 1391
a8c9a53c 1392invalid_fw:
0bc88e4a 1393 netdev_err(tp->dev, "Invalid firmware image\n");
d517c4a1
DM
1394 release_firmware(typhoon_fw);
1395 typhoon_fw = NULL;
a8c9a53c 1396 return -EINVAL;
b775a750
BH
1397}
1398
1da177e4
LT
1399static int
1400typhoon_download_firmware(struct typhoon *tp)
1401{
1402 void __iomem *ioaddr = tp->ioaddr;
1403 struct pci_dev *pdev = tp->pdev;
b775a750
BH
1404 const struct typhoon_file_header *fHdr;
1405 const struct typhoon_section_header *sHdr;
1406 const u8 *image_data;
a8c9a53c
DD
1407 void *dpage;
1408 dma_addr_t dpage_dma;
71f1bb1a 1409 __sum16 csum;
1da177e4
LT
1410 u32 irqEnabled;
1411 u32 irqMasked;
1412 u32 numSections;
1413 u32 section_len;
a8c9a53c 1414 u32 len;
1da177e4
LT
1415 u32 load_addr;
1416 u32 hmac;
1417 int i;
1418 int err;
1419
a8c9a53c 1420 image_data = (u8 *) typhoon_fw->data;
b775a750 1421 fHdr = (struct typhoon_file_header *) image_data;
1da177e4 1422
a8c9a53c
DD
1423 /* Cannot just map the firmware image using pci_map_single() as
1424 * the firmware is vmalloc()'d and may not be physically contiguous,
1425 * so we allocate some consistent memory to copy the sections into.
1426 */
1da177e4 1427 err = -ENOMEM;
a8c9a53c
DD
1428 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1429 if(!dpage) {
0bc88e4a 1430 netdev_err(tp->dev, "no DMA mem for firmware\n");
1da177e4
LT
1431 goto err_out;
1432 }
1433
1434 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1435 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1436 ioaddr + TYPHOON_REG_INTR_ENABLE);
1437 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1438 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1439 ioaddr + TYPHOON_REG_INTR_MASK);
1440
1441 err = -ETIMEDOUT;
1442 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 1443 netdev_err(tp->dev, "card ready timeout\n");
1da177e4
LT
1444 goto err_out_irq;
1445 }
1446
1447 numSections = le32_to_cpu(fHdr->numSections);
1448 load_addr = le32_to_cpu(fHdr->startAddr);
1449
1450 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1451 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1452 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1453 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1454 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1455 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1456 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1457 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1458 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1459 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1460 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1461 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1462 typhoon_post_pci_writes(ioaddr);
1463 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1464
1465 image_data += sizeof(struct typhoon_file_header);
1466
1467 /* The ioread32() in typhoon_wait_interrupt() will force the
1468 * last write to the command register to post, so
1469 * we don't need a typhoon_post_pci_writes() after it.
1470 */
1471 for(i = 0; i < numSections; i++) {
1472 sHdr = (struct typhoon_section_header *) image_data;
1473 image_data += sizeof(struct typhoon_section_header);
1474 load_addr = le32_to_cpu(sHdr->startAddr);
1475 section_len = le32_to_cpu(sHdr->len);
1476
a8c9a53c
DD
1477 while(section_len) {
1478 len = min_t(u32, section_len, PAGE_SIZE);
1da177e4 1479
a8c9a53c
DD
1480 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1481 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1482 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1483 netdev_err(tp->dev, "segment ready timeout\n");
a8c9a53c
DD
1484 goto err_out_irq;
1485 }
1da177e4 1486
a8c9a53c
DD
1487 /* Do an pseudo IPv4 checksum on the data -- first
1488 * need to convert each u16 to cpu order before
1489 * summing. Fortunately, due to the properties of
1490 * the checksum, we can do this once, at the end.
1491 */
1492 csum = csum_fold(csum_partial_copy_nocheck(image_data,
0bc88e4a
JP
1493 dpage, len,
1494 0));
a8c9a53c
DD
1495
1496 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1497 iowrite32(le16_to_cpu((__force __le16)csum),
1498 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1499 iowrite32(load_addr,
1500 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1501 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1502 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1503 typhoon_post_pci_writes(ioaddr);
1504 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
0bc88e4a 1505 ioaddr + TYPHOON_REG_COMMAND);
a8c9a53c
DD
1506
1507 image_data += len;
1508 load_addr += len;
1509 section_len -= len;
1510 }
1da177e4
LT
1511 }
1512
1513 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1514 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1515 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1516 netdev_err(tp->dev, "final segment ready timeout\n");
1da177e4
LT
1517 goto err_out_irq;
1518 }
1519
1520 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1521
1522 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a
JP
1523 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1524 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1525 goto err_out_irq;
1526 }
1527
1528 err = 0;
1529
1530err_out_irq:
1531 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1532 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1533
a8c9a53c 1534 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1da177e4
LT
1535
1536err_out:
1537 return err;
1538}
1539
1540static int
1541typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1542{
1543 void __iomem *ioaddr = tp->ioaddr;
1544
1545 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
0bc88e4a 1546 netdev_err(tp->dev, "boot ready timeout\n");
1da177e4
LT
1547 goto out_timeout;
1548 }
1549
1550 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1551 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1552 typhoon_post_pci_writes(ioaddr);
1553 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1554 ioaddr + TYPHOON_REG_COMMAND);
1555
1556 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
0bc88e4a
JP
1557 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1558 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1559 goto out_timeout;
1560 }
1561
1562 /* Clear the Transmit and Command ready registers
1563 */
1564 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1565 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1566 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1567 typhoon_post_pci_writes(ioaddr);
1568 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1569
1570 return 0;
1571
1572out_timeout:
1573 return -ETIMEDOUT;
1574}
1575
1576static u32
1577typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1578 volatile __le32 * index)
1da177e4
LT
1579{
1580 u32 lastRead = txRing->lastRead;
1581 struct tx_desc *tx;
1582 dma_addr_t skb_dma;
1583 int dma_len;
1584 int type;
1585
1586 while(lastRead != le32_to_cpu(*index)) {
1587 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1588 type = tx->flags & TYPHOON_TYPE_MASK;
1589
1590 if(type == TYPHOON_TX_DESC) {
1591 /* This tx_desc describes a packet.
1592 */
71f1bb1a 1593 unsigned long ptr = tx->tx_addr;
1da177e4
LT
1594 struct sk_buff *skb = (struct sk_buff *) ptr;
1595 dev_kfree_skb_irq(skb);
1596 } else if(type == TYPHOON_FRAG_DESC) {
1597 /* This tx_desc describes a memory mapping. Free it.
1598 */
71f1bb1a 1599 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1da177e4
LT
1600 dma_len = le16_to_cpu(tx->len);
1601 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1602 PCI_DMA_TODEVICE);
1603 }
1604
1605 tx->flags = 0;
1606 typhoon_inc_tx_index(&lastRead, 1);
1607 }
1608
1609 return lastRead;
1610}
1611
1612static void
1613typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1614 volatile __le32 * index)
1da177e4
LT
1615{
1616 u32 lastRead;
1617 int numDesc = MAX_SKB_FRAGS + 1;
1618
1619 /* This will need changing if we start to use the Hi Tx ring. */
1620 lastRead = typhoon_clean_tx(tp, txRing, index);
1621 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1622 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1623 netif_wake_queue(tp->dev);
1624
1625 txRing->lastRead = lastRead;
1626 smp_wmb();
1627}
1628
1629static void
1630typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1631{
1632 struct typhoon_indexes *indexes = tp->indexes;
1633 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1634 struct basic_ring *ring = &tp->rxBuffRing;
1635 struct rx_free *r;
1636
1637 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1638 le32_to_cpu(indexes->rxBuffCleared)) {
1da177e4
LT
1639 /* no room in ring, just drop the skb
1640 */
1641 dev_kfree_skb_any(rxb->skb);
1642 rxb->skb = NULL;
1643 return;
1644 }
1645
1646 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1647 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1648 r->virtAddr = idx;
1649 r->physAddr = cpu_to_le32(rxb->dma_addr);
1650
1651 /* Tell the card about it */
1652 wmb();
1653 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1654}
1655
1656static int
1657typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1658{
1659 struct typhoon_indexes *indexes = tp->indexes;
1660 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1661 struct basic_ring *ring = &tp->rxBuffRing;
1662 struct rx_free *r;
1663 struct sk_buff *skb;
1664 dma_addr_t dma_addr;
1665
1666 rxb->skb = NULL;
1667
1668 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1669 le32_to_cpu(indexes->rxBuffCleared))
1da177e4
LT
1670 return -ENOMEM;
1671
1672 skb = dev_alloc_skb(PKT_BUF_SZ);
1673 if(!skb)
1674 return -ENOMEM;
1675
1676#if 0
1677 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1678 * address! Pretty please?
1679 */
1680 skb_reserve(skb, 2);
1681#endif
1682
1683 skb->dev = tp->dev;
689be439 1684 dma_addr = pci_map_single(tp->pdev, skb->data,
1da177e4
LT
1685 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1686
1687 /* Since no card does 64 bit DAC, the high bits will never
1688 * change from zero.
1689 */
1690 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1691 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1692 r->virtAddr = idx;
1693 r->physAddr = cpu_to_le32(dma_addr);
1694 rxb->skb = skb;
1695 rxb->dma_addr = dma_addr;
1696
1697 /* Tell the card about it */
1698 wmb();
1699 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1700 return 0;
1701}
1702
1703static int
03a710ff
AV
1704typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1705 volatile __le32 * cleared, int budget)
1da177e4
LT
1706{
1707 struct rx_desc *rx;
1708 struct sk_buff *skb, *new_skb;
1709 struct rxbuff_ent *rxb;
1710 dma_addr_t dma_addr;
1711 u32 local_ready;
1712 u32 rxaddr;
1713 int pkt_len;
1714 u32 idx;
03a710ff 1715 __le32 csum_bits;
1da177e4
LT
1716 int received;
1717
1718 received = 0;
1719 local_ready = le32_to_cpu(*ready);
1720 rxaddr = le32_to_cpu(*cleared);
1721 while(rxaddr != local_ready && budget > 0) {
1722 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1723 idx = rx->addr;
1724 rxb = &tp->rxbuffers[idx];
1725 skb = rxb->skb;
1726 dma_addr = rxb->dma_addr;
1727
1728 typhoon_inc_rx_index(&rxaddr, 1);
1729
1730 if(rx->flags & TYPHOON_RX_ERROR) {
1731 typhoon_recycle_rx_skb(tp, idx);
1732 continue;
1733 }
1734
1735 pkt_len = le16_to_cpu(rx->frameLen);
1736
1737 if(pkt_len < rx_copybreak &&
1738 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1739 skb_reserve(new_skb, 2);
1740 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1741 PKT_BUF_SZ,
1742 PCI_DMA_FROMDEVICE);
8c7b7faa 1743 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1da177e4
LT
1744 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1745 PKT_BUF_SZ,
1746 PCI_DMA_FROMDEVICE);
1747 skb_put(new_skb, pkt_len);
1748 typhoon_recycle_rx_skb(tp, idx);
1749 } else {
1750 new_skb = skb;
1751 skb_put(new_skb, pkt_len);
1752 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1753 PCI_DMA_FROMDEVICE);
1754 typhoon_alloc_rx_skb(tp, idx);
1755 }
1756 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1757 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1758 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1759 if(csum_bits ==
8e95a202
JP
1760 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1761 csum_bits ==
1da177e4
LT
1762 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1763 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1764 } else
1765 new_skb->ip_summed = CHECKSUM_NONE;
1766
1767 spin_lock(&tp->state_lock);
1768 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1769 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1770 ntohl(rx->vlanTag) & 0xffff);
1771 else
1772 netif_receive_skb(new_skb);
1773 spin_unlock(&tp->state_lock);
1774
1da177e4
LT
1775 received++;
1776 budget--;
1777 }
1778 *cleared = cpu_to_le32(rxaddr);
1779
1780 return received;
1781}
1782
1783static void
1784typhoon_fill_free_ring(struct typhoon *tp)
1785{
1786 u32 i;
1787
1788 for(i = 0; i < RXENT_ENTRIES; i++) {
1789 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1790 if(rxb->skb)
1791 continue;
1792 if(typhoon_alloc_rx_skb(tp, i) < 0)
1793 break;
1794 }
1795}
1796
1797static int
bea3348e 1798typhoon_poll(struct napi_struct *napi, int budget)
1da177e4 1799{
bea3348e 1800 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1da177e4 1801 struct typhoon_indexes *indexes = tp->indexes;
bea3348e 1802 int work_done;
1da177e4
LT
1803
1804 rmb();
1805 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1806 typhoon_process_response(tp, 0, NULL);
1807
1808 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1809 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1810
1da177e4 1811 work_done = 0;
1da177e4
LT
1812
1813 if(indexes->rxHiCleared != indexes->rxHiReady) {
bea3348e 1814 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1da177e4 1815 &indexes->rxHiCleared, budget);
1da177e4
LT
1816 }
1817
1818 if(indexes->rxLoCleared != indexes->rxLoReady) {
1819 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
bea3348e 1820 &indexes->rxLoCleared, budget - work_done);
1da177e4
LT
1821 }
1822
1823 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1824 /* rxBuff ring is empty, try to fill it. */
1825 typhoon_fill_free_ring(tp);
1826 }
1827
bea3348e 1828 if (work_done < budget) {
288379f0 1829 napi_complete(napi);
1da177e4
LT
1830 iowrite32(TYPHOON_INTR_NONE,
1831 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1832 typhoon_post_pci_writes(tp->ioaddr);
1833 }
1834
bea3348e 1835 return work_done;
1da177e4
LT
1836}
1837
1838static irqreturn_t
7d12e780 1839typhoon_interrupt(int irq, void *dev_instance)
1da177e4 1840{
06efcad0 1841 struct net_device *dev = dev_instance;
8f15ea42 1842 struct typhoon *tp = netdev_priv(dev);
1da177e4
LT
1843 void __iomem *ioaddr = tp->ioaddr;
1844 u32 intr_status;
1845
1846 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1847 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1848 return IRQ_NONE;
1849
1850 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1851
288379f0 1852 if (napi_schedule_prep(&tp->napi)) {
1da177e4
LT
1853 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1854 typhoon_post_pci_writes(ioaddr);
288379f0 1855 __napi_schedule(&tp->napi);
1da177e4 1856 } else {
0bc88e4a 1857 netdev_err(dev, "Error, poll already scheduled\n");
1da177e4
LT
1858 }
1859 return IRQ_HANDLED;
1860}
1861
1862static void
1863typhoon_free_rx_rings(struct typhoon *tp)
1864{
1865 u32 i;
1866
1867 for(i = 0; i < RXENT_ENTRIES; i++) {
1868 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1869 if(rxb->skb) {
1870 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1871 PCI_DMA_FROMDEVICE);
1872 dev_kfree_skb(rxb->skb);
1873 rxb->skb = NULL;
1874 }
1875 }
1876}
1877
1878static int
03a710ff 1879typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1da177e4
LT
1880{
1881 struct pci_dev *pdev = tp->pdev;
1882 void __iomem *ioaddr = tp->ioaddr;
1883 struct cmd_desc xp_cmd;
1884 int err;
1885
1886 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1887 xp_cmd.parm1 = events;
1888 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1889 if(err < 0) {
0bc88e4a
JP
1890 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1891 err);
1da177e4
LT
1892 return err;
1893 }
1894
1895 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1896 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1897 if(err < 0) {
0bc88e4a 1898 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1da177e4
LT
1899 return err;
1900 }
1901
1902 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1903 return -ETIMEDOUT;
1904
1905 /* Since we cannot monitor the status of the link while sleeping,
1906 * tell the world it went away.
1907 */
1908 netif_carrier_off(tp->dev);
1909
2a569579 1910 pci_enable_wake(tp->pdev, state, 1);
1da177e4 1911 pci_disable_device(pdev);
2a569579 1912 return pci_set_power_state(pdev, state);
1da177e4
LT
1913}
1914
1915static int
1916typhoon_wakeup(struct typhoon *tp, int wait_type)
1917{
1918 struct pci_dev *pdev = tp->pdev;
1919 void __iomem *ioaddr = tp->ioaddr;
1920
1921 pci_set_power_state(pdev, PCI_D0);
1922 pci_restore_state(pdev);
1923
1924 /* Post 2.x.x versions of the Sleep Image require a reset before
1925 * we can download the Runtime Image. But let's not make users of
1926 * the old firmware pay for the reset.
1927 */
1928 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1929 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1930 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1931 return typhoon_reset(ioaddr, wait_type);
1932
1933 return 0;
1934}
1935
1936static int
1937typhoon_start_runtime(struct typhoon *tp)
1938{
1939 struct net_device *dev = tp->dev;
1940 void __iomem *ioaddr = tp->ioaddr;
1941 struct cmd_desc xp_cmd;
1942 int err;
1943
1944 typhoon_init_rings(tp);
1945 typhoon_fill_free_ring(tp);
1946
1947 err = typhoon_download_firmware(tp);
1948 if(err < 0) {
0bc88e4a 1949 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1da177e4
LT
1950 goto error_out;
1951 }
1952
1953 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a 1954 netdev_err(tp->dev, "cannot boot 3XP\n");
1da177e4
LT
1955 err = -EIO;
1956 goto error_out;
1957 }
1958
1959 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1960 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1961 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1962 if(err < 0)
1963 goto error_out;
1964
1965 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
1966 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1967 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4
LT
1968 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1969 if(err < 0)
1970 goto error_out;
1971
1972 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1973 * us some more information on how to control it.
1974 */
1975 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1976 xp_cmd.parm1 = 0;
1977 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1978 if(err < 0)
1979 goto error_out;
1980
1981 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1982 xp_cmd.parm1 = tp->xcvr_select;
1983 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1984 if(err < 0)
1985 goto error_out;
1986
1987 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
649aa95d 1988 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1da177e4
LT
1989 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1990 if(err < 0)
1991 goto error_out;
1992
1993 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1994 spin_lock_bh(&tp->state_lock);
1995 xp_cmd.parm2 = tp->offload;
1996 xp_cmd.parm3 = tp->offload;
1997 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1998 spin_unlock_bh(&tp->state_lock);
1999 if(err < 0)
2000 goto error_out;
2001
2002 typhoon_set_rx_mode(dev);
2003
2004 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2005 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2006 if(err < 0)
2007 goto error_out;
2008
2009 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2010 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2011 if(err < 0)
2012 goto error_out;
2013
2014 tp->card_state = Running;
2015 smp_wmb();
2016
2017 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2018 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2019 typhoon_post_pci_writes(ioaddr);
2020
2021 return 0;
2022
2023error_out:
2024 typhoon_reset(ioaddr, WaitNoSleep);
2025 typhoon_free_rx_rings(tp);
2026 typhoon_init_rings(tp);
2027 return err;
2028}
2029
2030static int
2031typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2032{
2033 struct typhoon_indexes *indexes = tp->indexes;
2034 struct transmit_ring *txLo = &tp->txLoRing;
2035 void __iomem *ioaddr = tp->ioaddr;
2036 struct cmd_desc xp_cmd;
2037 int i;
2038
2039 /* Disable interrupts early, since we can't schedule a poll
2040 * when called with !netif_running(). This will be posted
2041 * when we force the posting of the command.
2042 */
2043 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2044
2045 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2046 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2047
2048 /* Wait 1/2 sec for any outstanding transmits to occur
2049 * We'll cleanup after the reset if this times out.
2050 */
2051 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2052 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2053 break;
2054 udelay(TYPHOON_UDELAY);
2055 }
2056
2057 if(i == TYPHOON_WAIT_TIMEOUT)
0bc88e4a 2058 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1da177e4
LT
2059
2060 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2061 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2062
2063 /* save the statistics so when we bring the interface up again,
2064 * the values reported to userspace are correct.
2065 */
2066 tp->card_state = Sleeping;
2067 smp_wmb();
2068 typhoon_do_get_stats(tp);
2069 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2070
2071 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2072 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2073
2074 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
0bc88e4a 2075 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
1da177e4
LT
2076
2077 if(typhoon_reset(ioaddr, wait_type) < 0) {
0bc88e4a 2078 netdev_err(tp->dev, "unable to reset 3XP\n");
1da177e4
LT
2079 return -ETIMEDOUT;
2080 }
2081
2082 /* cleanup any outstanding Tx packets */
2083 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2084 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2085 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2086 }
2087
2088 return 0;
2089}
2090
2091static void
2092typhoon_tx_timeout(struct net_device *dev)
2093{
2094 struct typhoon *tp = netdev_priv(dev);
2095
2096 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
0bc88e4a 2097 netdev_warn(dev, "could not reset in tx timeout\n");
a089377f 2098 goto truly_dead;
1da177e4
LT
2099 }
2100
2101 /* If we ever start using the Hi ring, it will need cleaning too */
2102 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2103 typhoon_free_rx_rings(tp);
2104
2105 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2106 netdev_err(dev, "could not start runtime in tx timeout\n");
a089377f 2107 goto truly_dead;
1da177e4
LT
2108 }
2109
2110 netif_wake_queue(dev);
2111 return;
2112
a089377f 2113truly_dead:
1da177e4
LT
2114 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2115 typhoon_reset(tp->ioaddr, NoWait);
2116 netif_carrier_off(dev);
2117}
2118
2119static int
2120typhoon_open(struct net_device *dev)
2121{
2122 struct typhoon *tp = netdev_priv(dev);
2123 int err;
2124
b775a750
BH
2125 err = typhoon_request_firmware(tp);
2126 if (err)
2127 goto out;
2128
1da177e4
LT
2129 err = typhoon_wakeup(tp, WaitSleep);
2130 if(err < 0) {
0bc88e4a 2131 netdev_err(dev, "unable to wakeup device\n");
1da177e4
LT
2132 goto out_sleep;
2133 }
2134
aa36ab8e 2135 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
1da177e4
LT
2136 dev->name, dev);
2137 if(err < 0)
2138 goto out_sleep;
2139
bea3348e
SH
2140 napi_enable(&tp->napi);
2141
1da177e4 2142 err = typhoon_start_runtime(tp);
bea3348e
SH
2143 if(err < 0) {
2144 napi_disable(&tp->napi);
1da177e4 2145 goto out_irq;
bea3348e 2146 }
1da177e4
LT
2147
2148 netif_start_queue(dev);
2149 return 0;
2150
2151out_irq:
2152 free_irq(dev->irq, dev);
2153
2154out_sleep:
2155 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2156 netdev_err(dev, "unable to reboot into sleep img\n");
1da177e4
LT
2157 typhoon_reset(tp->ioaddr, NoWait);
2158 goto out;
2159 }
2160
6aa20a22 2161 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2162 netdev_err(dev, "unable to go back to sleep\n");
1da177e4
LT
2163
2164out:
2165 return err;
2166}
2167
2168static int
2169typhoon_close(struct net_device *dev)
2170{
2171 struct typhoon *tp = netdev_priv(dev);
2172
2173 netif_stop_queue(dev);
bea3348e 2174 napi_disable(&tp->napi);
1da177e4
LT
2175
2176 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
0bc88e4a 2177 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2178
2179 /* Make sure there is no irq handler running on a different CPU. */
1da177e4
LT
2180 free_irq(dev->irq, dev);
2181
2182 typhoon_free_rx_rings(tp);
2183 typhoon_init_rings(tp);
2184
2185 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
0bc88e4a 2186 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2187
2188 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2189 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2190
2191 return 0;
2192}
2193
2194#ifdef CONFIG_PM
2195static int
2196typhoon_resume(struct pci_dev *pdev)
2197{
2198 struct net_device *dev = pci_get_drvdata(pdev);
2199 struct typhoon *tp = netdev_priv(dev);
2200
2201 /* If we're down, resume when we are upped.
2202 */
2203 if(!netif_running(dev))
2204 return 0;
2205
2206 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
0bc88e4a 2207 netdev_err(dev, "critical: could not wake up in resume\n");
1da177e4
LT
2208 goto reset;
2209 }
2210
2211 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2212 netdev_err(dev, "critical: could not start runtime in resume\n");
1da177e4
LT
2213 goto reset;
2214 }
2215
2216 netif_device_attach(dev);
1da177e4
LT
2217 return 0;
2218
2219reset:
2220 typhoon_reset(tp->ioaddr, NoWait);
2221 return -EBUSY;
2222}
2223
2224static int
2225typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2226{
2227 struct net_device *dev = pci_get_drvdata(pdev);
2228 struct typhoon *tp = netdev_priv(dev);
2229 struct cmd_desc xp_cmd;
2230
2231 /* If we're down, we're already suspended.
2232 */
2233 if(!netif_running(dev))
2234 return 0;
2235
2236 spin_lock_bh(&tp->state_lock);
2237 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2238 spin_unlock_bh(&tp->state_lock);
0bc88e4a 2239 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
1da177e4
LT
2240 return -EBUSY;
2241 }
2242 spin_unlock_bh(&tp->state_lock);
2243
2244 netif_device_detach(dev);
2245
2246 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
0bc88e4a 2247 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2248 goto need_resume;
2249 }
2250
2251 typhoon_free_rx_rings(tp);
2252 typhoon_init_rings(tp);
2253
2254 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2255 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2256 goto need_resume;
2257 }
2258
2259 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
2260 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2261 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4 2262 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2263 netdev_err(dev, "unable to set mac address in suspend\n");
1da177e4
LT
2264 goto need_resume;
2265 }
2266
2267 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2268 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2269 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2270 netdev_err(dev, "unable to set rx filter in suspend\n");
1da177e4
LT
2271 goto need_resume;
2272 }
2273
2a569579 2274 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
0bc88e4a 2275 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2276 goto need_resume;
2277 }
2278
2279 return 0;
2280
2281need_resume:
2282 typhoon_resume(pdev);
2283 return -EBUSY;
2284}
1da177e4
LT
2285#endif
2286
2287static int __devinit
2288typhoon_test_mmio(struct pci_dev *pdev)
2289{
2290 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2291 int mode = 0;
2292 u32 val;
2293
2294 if(!ioaddr)
2295 goto out;
2296
2297 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2298 TYPHOON_STATUS_WAITING_FOR_HOST)
2299 goto out_unmap;
2300
2301 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2302 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2303 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2304
2305 /* Ok, see if we can change our interrupt status register by
2306 * sending ourselves an interrupt. If so, then MMIO works.
2307 * The 50usec delay is arbitrary -- it could probably be smaller.
2308 */
2309 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2310 if((val & TYPHOON_INTR_SELF) == 0) {
2311 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2312 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2313 udelay(50);
2314 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2315 if(val & TYPHOON_INTR_SELF)
2316 mode = 1;
2317 }
2318
2319 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2320 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2321 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2322 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2323
2324out_unmap:
2325 pci_iounmap(pdev, ioaddr);
2326
2327out:
2328 if(!mode)
0bc88e4a 2329 pr_info("%s: falling back to port IO\n", pci_name(pdev));
1da177e4
LT
2330 return mode;
2331}
2332
8bdd5553
SH
2333static const struct net_device_ops typhoon_netdev_ops = {
2334 .ndo_open = typhoon_open,
2335 .ndo_stop = typhoon_close,
2336 .ndo_start_xmit = typhoon_start_tx,
2337 .ndo_set_multicast_list = typhoon_set_rx_mode,
2338 .ndo_tx_timeout = typhoon_tx_timeout,
2339 .ndo_get_stats = typhoon_get_stats,
2340 .ndo_validate_addr = eth_validate_addr,
2341 .ndo_set_mac_address = typhoon_set_mac_address,
2342 .ndo_change_mtu = eth_change_mtu,
2343 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2344};
2345
1da177e4
LT
2346static int __devinit
2347typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2348{
1da177e4
LT
2349 struct net_device *dev;
2350 struct typhoon *tp;
2351 int card_id = (int) ent->driver_data;
2352 void __iomem *ioaddr;
2353 void *shared;
2354 dma_addr_t shared_dma;
2355 struct cmd_desc xp_cmd;
2356 struct resp_desc xp_resp[3];
1da177e4 2357 int err = 0;
0bc88e4a 2358 const char *err_msg;
1da177e4
LT
2359
2360 dev = alloc_etherdev(sizeof(*tp));
2361 if(dev == NULL) {
0bc88e4a 2362 err_msg = "unable to alloc new net device";
1da177e4
LT
2363 err = -ENOMEM;
2364 goto error_out;
2365 }
1da177e4
LT
2366 SET_NETDEV_DEV(dev, &pdev->dev);
2367
2368 err = pci_enable_device(pdev);
2369 if(err < 0) {
0bc88e4a 2370 err_msg = "unable to enable device";
1da177e4
LT
2371 goto error_out_dev;
2372 }
2373
2374 err = pci_set_mwi(pdev);
2375 if(err < 0) {
0bc88e4a 2376 err_msg = "unable to set MWI";
1da177e4
LT
2377 goto error_out_disable;
2378 }
2379
284901a9 2380 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4 2381 if(err < 0) {
0bc88e4a 2382 err_msg = "No usable DMA configuration";
1da177e4
LT
2383 goto error_out_mwi;
2384 }
2385
2386 /* sanity checks on IO and MMIO BARs
2387 */
2388 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
0bc88e4a 2389 err_msg = "region #1 not a PCI IO resource, aborting";
1da177e4
LT
2390 err = -ENODEV;
2391 goto error_out_mwi;
2392 }
2393 if(pci_resource_len(pdev, 0) < 128) {
0bc88e4a 2394 err_msg = "Invalid PCI IO region size, aborting";
1da177e4
LT
2395 err = -ENODEV;
2396 goto error_out_mwi;
2397 }
2398 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
0bc88e4a 2399 err_msg = "region #1 not a PCI MMIO resource, aborting";
1da177e4
LT
2400 err = -ENODEV;
2401 goto error_out_mwi;
2402 }
2403 if(pci_resource_len(pdev, 1) < 128) {
0bc88e4a 2404 err_msg = "Invalid PCI MMIO region size, aborting";
1da177e4
LT
2405 err = -ENODEV;
2406 goto error_out_mwi;
2407 }
2408
0bc88e4a 2409 err = pci_request_regions(pdev, KBUILD_MODNAME);
1da177e4 2410 if(err < 0) {
0bc88e4a 2411 err_msg = "could not request regions";
1da177e4
LT
2412 goto error_out_mwi;
2413 }
2414
2415 /* map our registers
2416 */
2417 if(use_mmio != 0 && use_mmio != 1)
2418 use_mmio = typhoon_test_mmio(pdev);
2419
2420 ioaddr = pci_iomap(pdev, use_mmio, 128);
2421 if (!ioaddr) {
0bc88e4a 2422 err_msg = "cannot remap registers, aborting";
1da177e4
LT
2423 err = -EIO;
2424 goto error_out_regions;
2425 }
2426
2427 /* allocate pci dma space for rx and tx descriptor rings
2428 */
2429 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2430 &shared_dma);
2431 if(!shared) {
0bc88e4a 2432 err_msg = "could not allocate DMA memory";
1da177e4
LT
2433 err = -ENOMEM;
2434 goto error_out_remap;
2435 }
2436
2437 dev->irq = pdev->irq;
2438 tp = netdev_priv(dev);
2439 tp->shared = (struct typhoon_shared *) shared;
2440 tp->shared_dma = shared_dma;
2441 tp->pdev = pdev;
2442 tp->tx_pdev = pdev;
2443 tp->ioaddr = ioaddr;
2444 tp->tx_ioaddr = ioaddr;
2445 tp->dev = dev;
2446
2447 /* Init sequence:
2448 * 1) Reset the adapter to clear any bad juju
2449 * 2) Reload the sleep image
2450 * 3) Boot the sleep image
2451 * 4) Get the hardware address.
2452 * 5) Put the card to sleep.
2453 */
2454 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
0bc88e4a 2455 err_msg = "could not reset 3XP";
1da177e4
LT
2456 err = -EIO;
2457 goto error_out_dma;
2458 }
2459
2460 /* Now that we've reset the 3XP and are sure it's not going to
2461 * write all over memory, enable bus mastering, and save our
2462 * state for resuming after a suspend.
2463 */
2464 pci_set_master(pdev);
2465 pci_save_state(pdev);
2466
1da177e4
LT
2467 typhoon_init_interface(tp);
2468 typhoon_init_rings(tp);
2469
2470 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2471 err_msg = "cannot boot 3XP sleep image";
1da177e4
LT
2472 err = -EIO;
2473 goto error_out_reset;
2474 }
2475
2476 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2477 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
0bc88e4a 2478 err_msg = "cannot read MAC address";
1da177e4
LT
2479 err = -EIO;
2480 goto error_out_reset;
2481 }
2482
03a710ff
AV
2483 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2484 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
1da177e4
LT
2485
2486 if(!is_valid_ether_addr(dev->dev_addr)) {
0bc88e4a 2487 err_msg = "Could not obtain valid ethernet address, aborting";
1da177e4
LT
2488 goto error_out_reset;
2489 }
2490
2491 /* Read the Sleep Image version last, so the response is valid
2492 * later when we print out the version reported.
2493 */
2494 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2495 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
0bc88e4a 2496 err_msg = "Could not get Sleep Image version";
1da177e4
LT
2497 goto error_out_reset;
2498 }
2499
2500 tp->capabilities = typhoon_card_info[card_id].capabilities;
2501 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2502
2503 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2504 * READ_VERSIONS command. Those versions are OK after waking up
2505 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2506 * seem to need a little extra help to get started. Since we don't
2507 * know how to nudge it along, just kick it.
2508 */
2509 if(xp_resp[0].numDesc != 0)
2510 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2511
2512 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
0bc88e4a 2513 err_msg = "cannot put adapter to sleep";
1da177e4
LT
2514 err = -EIO;
2515 goto error_out_reset;
2516 }
2517
2518 /* The chip-specific entries in the device structure. */
8bdd5553 2519 dev->netdev_ops = &typhoon_netdev_ops;
bea3348e 2520 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
1da177e4 2521 dev->watchdog_timeo = TX_TIMEOUT;
25805dcf 2522
1da177e4
LT
2523 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2524
2525 /* We can handle scatter gather, up to 16 entries, and
2526 * we can do IP checksumming (only version 4, doh...)
2527 */
2528 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2529 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2530 dev->features |= NETIF_F_TSO;
2531
0bc88e4a
JP
2532 if(register_netdev(dev) < 0) {
2533 err_msg = "unable to register netdev";
1da177e4 2534 goto error_out_reset;
0bc88e4a 2535 }
1da177e4
LT
2536
2537 pci_set_drvdata(pdev, dev);
2538
0bc88e4a
JP
2539 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2540 typhoon_card_info[card_id].name,
2541 use_mmio ? "MMIO" : "IO",
2542 (unsigned long long)pci_resource_start(pdev, use_mmio),
2543 dev->dev_addr);
1da177e4
LT
2544
2545 /* xp_resp still contains the response to the READ_VERSIONS command.
2546 * For debugging, let the user know what version he has.
2547 */
2548 if(xp_resp[0].numDesc == 0) {
2549 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2550 * of version is Month/Day of build.
2551 */
2552 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
0bc88e4a
JP
2553 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2554 monthday >> 8, monthday & 0xff);
1da177e4
LT
2555 } else if(xp_resp[0].numDesc == 2) {
2556 /* This is the Typhoon 1.1+ type Sleep Image
2557 */
2558 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2559 u8 *ver_string = (u8 *) &xp_resp[1];
2560 ver_string[25] = 0;
0bc88e4a
JP
2561 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2562 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2563 sleep_ver & 0xfff, ver_string);
1da177e4 2564 } else {
0bc88e4a
JP
2565 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2566 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
1da177e4 2567 }
6aa20a22 2568
1da177e4
LT
2569 return 0;
2570
2571error_out_reset:
2572 typhoon_reset(ioaddr, NoWait);
2573
2574error_out_dma:
2575 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2576 shared, shared_dma);
2577error_out_remap:
2578 pci_iounmap(pdev, ioaddr);
2579error_out_regions:
2580 pci_release_regions(pdev);
2581error_out_mwi:
2582 pci_clear_mwi(pdev);
2583error_out_disable:
2584 pci_disable_device(pdev);
2585error_out_dev:
2586 free_netdev(dev);
2587error_out:
0bc88e4a 2588 pr_err("%s: %s\n", pci_name(pdev), err_msg);
1da177e4
LT
2589 return err;
2590}
2591
2592static void __devexit
2593typhoon_remove_one(struct pci_dev *pdev)
2594{
2595 struct net_device *dev = pci_get_drvdata(pdev);
2596 struct typhoon *tp = netdev_priv(dev);
2597
2598 unregister_netdev(dev);
2599 pci_set_power_state(pdev, PCI_D0);
2600 pci_restore_state(pdev);
2601 typhoon_reset(tp->ioaddr, NoWait);
2602 pci_iounmap(pdev, tp->ioaddr);
2603 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2604 tp->shared, tp->shared_dma);
2605 pci_release_regions(pdev);
2606 pci_clear_mwi(pdev);
2607 pci_disable_device(pdev);
2608 pci_set_drvdata(pdev, NULL);
2609 free_netdev(dev);
2610}
2611
2612static struct pci_driver typhoon_driver = {
0bc88e4a 2613 .name = KBUILD_MODNAME,
1da177e4
LT
2614 .id_table = typhoon_pci_tbl,
2615 .probe = typhoon_init_one,
2616 .remove = __devexit_p(typhoon_remove_one),
2617#ifdef CONFIG_PM
2618 .suspend = typhoon_suspend,
2619 .resume = typhoon_resume,
1da177e4
LT
2620#endif
2621};
2622
2623static int __init
2624typhoon_init(void)
2625{
29917620 2626 return pci_register_driver(&typhoon_driver);
1da177e4
LT
2627}
2628
2629static void __exit
2630typhoon_cleanup(void)
2631{
a8c9a53c 2632 if (typhoon_fw)
b775a750 2633 release_firmware(typhoon_fw);
1da177e4
LT
2634 pci_unregister_driver(&typhoon_driver);
2635}
2636
2637module_init(typhoon_init);
2638module_exit(typhoon_cleanup);