]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/8139cp.c
8139cp: remove gratuitous indirection
[net-next-2.6.git] / drivers / net / 8139cp.c
CommitLineData
1da177e4
LT
1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
18
19 See the file COPYING in this distribution for more information.
20
21 Contributors:
f3b197ac 22
1da177e4
LT
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
f3b197ac 26
1da177e4
LT
27 TODO:
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
30
31 Low priority TODO:
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
39 Tx descriptor bit
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
43
44 NOTES:
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
47
48 */
49
50#define DRV_NAME "8139cp"
51#define DRV_VERSION "1.2"
52#define DRV_RELDATE "Mar 22, 2004"
53
54
1da177e4 55#include <linux/module.h>
e21ba282 56#include <linux/moduleparam.h>
1da177e4
LT
57#include <linux/kernel.h>
58#include <linux/compiler.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/init.h>
62#include <linux/pci.h>
8662d061 63#include <linux/dma-mapping.h>
1da177e4
LT
64#include <linux/delay.h>
65#include <linux/ethtool.h>
66#include <linux/mii.h>
67#include <linux/if_vlan.h>
68#include <linux/crc32.h>
69#include <linux/in.h>
70#include <linux/ip.h>
71#include <linux/tcp.h>
72#include <linux/udp.h>
73#include <linux/cache.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/uaccess.h>
77
78/* VLAN tagging feature enable/disable */
79#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80#define CP_VLAN_TAG_USED 1
81#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83#else
84#define CP_VLAN_TAG_USED 0
85#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
87#endif
88
89/* These identify the driver base version and may not be removed. */
90static char version[] =
91KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
a78d8927 95MODULE_VERSION(DRV_VERSION);
1da177e4
LT
96MODULE_LICENSE("GPL");
97
98static int debug = -1;
e21ba282 99module_param(debug, int, 0);
1da177e4
LT
100MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
101
102/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104static int multicast_filter_limit = 32;
e21ba282 105module_param(multicast_filter_limit, int, 0);
1da177e4
LT
106MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
107
108#define PFX DRV_NAME ": "
109
110#ifndef TRUE
111#define FALSE 0
112#define TRUE (!FALSE)
113#endif
114
115#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
116 NETIF_MSG_PROBE | \
117 NETIF_MSG_LINK)
118#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120#define CP_REGS_SIZE (0xff + 1)
121#define CP_REGS_VER 1 /* version 1 */
122#define CP_RX_RING_SIZE 64
123#define CP_TX_RING_SIZE 64
124#define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
127 CP_STATS_SIZE)
128#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130#define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
134
135#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
136#define RX_OFFSET 2
137#define CP_INTERNAL_PHY 32
138
139/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
144
145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT (6*HZ)
147
148/* hardware minimum and maximum for a single frame's data payload */
149#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150#define CP_MAX_MTU 4096
151
152enum {
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
185
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
fcec3456
JG
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
1da177e4
LT
194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
207 RxProtoTCP = 1,
208 RxProtoUDP = 2,
209 RxProtoIP = 3,
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
222
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
225
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
235
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
251
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
255
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
260
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
268
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
272
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
276
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
280
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
285
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
290
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
294
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
301
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
305};
306
307static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
310
311struct cp_desc {
312 u32 opts1;
313 u32 opts2;
314 u64 addr;
315};
316
317struct ring_info {
318 struct sk_buff *skb;
5734418d 319 u32 len;
1da177e4
LT
320};
321
322struct cp_dma_stats {
323 u64 tx_ok;
324 u64 rx_ok;
325 u64 tx_err;
326 u32 rx_err;
327 u16 rx_fifo;
328 u16 frame_align;
329 u32 tx_ok_1col;
330 u32 tx_ok_mcol;
331 u64 rx_ok_phys;
332 u64 rx_ok_bcast;
333 u32 rx_ok_mcast;
334 u16 tx_abort;
335 u16 tx_underrun;
336} __attribute__((packed));
337
338struct cp_extra_stats {
339 unsigned long rx_frags;
340};
341
342struct cp_private {
343 void __iomem *regs;
344 struct net_device *dev;
345 spinlock_t lock;
346 u32 msg_enable;
347
348 struct pci_dev *pdev;
349 u32 rx_config;
350 u16 cpcmd;
351
352 struct net_device_stats net_stats;
353 struct cp_extra_stats cp_stats;
1da177e4
LT
354
355 unsigned rx_tail ____cacheline_aligned;
356 struct cp_desc *rx_ring;
357 struct ring_info rx_skb[CP_RX_RING_SIZE];
358 unsigned rx_buf_sz;
359
360 unsigned tx_head ____cacheline_aligned;
361 unsigned tx_tail;
362
363 struct cp_desc *tx_ring;
364 struct ring_info tx_skb[CP_TX_RING_SIZE];
365 dma_addr_t ring_dma;
366
367#if CP_VLAN_TAG_USED
368 struct vlan_group *vlgrp;
369#endif
370
371 unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
372
373 struct mii_if_info mii_if;
374};
375
376#define cpr8(reg) readb(cp->regs + (reg))
377#define cpr16(reg) readw(cp->regs + (reg))
378#define cpr32(reg) readl(cp->regs + (reg))
379#define cpw8(reg,val) writeb((val), cp->regs + (reg))
380#define cpw16(reg,val) writew((val), cp->regs + (reg))
381#define cpw32(reg,val) writel((val), cp->regs + (reg))
382#define cpw8_f(reg,val) do { \
383 writeb((val), cp->regs + (reg)); \
384 readb(cp->regs + (reg)); \
385 } while (0)
386#define cpw16_f(reg,val) do { \
387 writew((val), cp->regs + (reg)); \
388 readw(cp->regs + (reg)); \
389 } while (0)
390#define cpw32_f(reg,val) do { \
391 writel((val), cp->regs + (reg)); \
392 readl(cp->regs + (reg)); \
393 } while (0)
394
395
396static void __cp_set_rx_mode (struct net_device *dev);
397static void cp_tx (struct cp_private *cp);
398static void cp_clean_rings (struct cp_private *cp);
7502cd10
SK
399#ifdef CONFIG_NET_POLL_CONTROLLER
400static void cp_poll_controller(struct net_device *dev);
401#endif
722fdb33
PC
402static int cp_get_eeprom_len(struct net_device *dev);
403static int cp_get_eeprom(struct net_device *dev,
404 struct ethtool_eeprom *eeprom, u8 *data);
405static int cp_set_eeprom(struct net_device *dev,
406 struct ethtool_eeprom *eeprom, u8 *data);
1da177e4
LT
407
408static struct pci_device_id cp_pci_tbl[] = {
409 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
410 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
411 { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
412 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
413 { },
414};
415MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
416
417static struct {
418 const char str[ETH_GSTRING_LEN];
419} ethtool_stats_keys[] = {
420 { "tx_ok" },
421 { "rx_ok" },
422 { "tx_err" },
423 { "rx_err" },
424 { "rx_fifo" },
425 { "frame_align" },
426 { "tx_ok_1col" },
427 { "tx_ok_mcol" },
428 { "rx_ok_phys" },
429 { "rx_ok_bcast" },
430 { "rx_ok_mcast" },
431 { "tx_abort" },
432 { "tx_underrun" },
433 { "rx_frags" },
434};
435
436
437#if CP_VLAN_TAG_USED
438static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
439{
440 struct cp_private *cp = netdev_priv(dev);
441 unsigned long flags;
442
443 spin_lock_irqsave(&cp->lock, flags);
444 cp->vlgrp = grp;
445 cp->cpcmd |= RxVlanOn;
446 cpw16(CpCmd, cp->cpcmd);
447 spin_unlock_irqrestore(&cp->lock, flags);
448}
449
450static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
451{
452 struct cp_private *cp = netdev_priv(dev);
453 unsigned long flags;
454
455 spin_lock_irqsave(&cp->lock, flags);
456 cp->cpcmd &= ~RxVlanOn;
457 cpw16(CpCmd, cp->cpcmd);
458 if (cp->vlgrp)
459 cp->vlgrp->vlan_devices[vid] = NULL;
460 spin_unlock_irqrestore(&cp->lock, flags);
461}
462#endif /* CP_VLAN_TAG_USED */
463
464static inline void cp_set_rxbufsize (struct cp_private *cp)
465{
466 unsigned int mtu = cp->dev->mtu;
f3b197ac 467
1da177e4
LT
468 if (mtu > ETH_DATA_LEN)
469 /* MTU + ethernet header + FCS + optional VLAN tag */
470 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
471 else
472 cp->rx_buf_sz = PKT_BUF_SZ;
473}
474
475static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
476 struct cp_desc *desc)
477{
478 skb->protocol = eth_type_trans (skb, cp->dev);
479
480 cp->net_stats.rx_packets++;
481 cp->net_stats.rx_bytes += skb->len;
482 cp->dev->last_rx = jiffies;
483
484#if CP_VLAN_TAG_USED
485 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
486 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
487 be16_to_cpu(desc->opts2 & 0xffff));
488 } else
489#endif
490 netif_receive_skb(skb);
491}
492
493static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
494 u32 status, u32 len)
495{
496 if (netif_msg_rx_err (cp))
497 printk (KERN_DEBUG
498 "%s: rx err, slot %d status 0x%x len %d\n",
499 cp->dev->name, rx_tail, status, len);
500 cp->net_stats.rx_errors++;
501 if (status & RxErrFrame)
502 cp->net_stats.rx_frame_errors++;
503 if (status & RxErrCRC)
504 cp->net_stats.rx_crc_errors++;
505 if ((status & RxErrRunt) || (status & RxErrLong))
506 cp->net_stats.rx_length_errors++;
507 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
508 cp->net_stats.rx_length_errors++;
509 if (status & RxErrFIFO)
510 cp->net_stats.rx_fifo_errors++;
511}
512
513static inline unsigned int cp_rx_csum_ok (u32 status)
514{
515 unsigned int protocol = (status >> 16) & 0x3;
f3b197ac 516
1da177e4
LT
517 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
518 return 1;
519 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
520 return 1;
521 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
522 return 1;
523 return 0;
524}
525
526static int cp_rx_poll (struct net_device *dev, int *budget)
527{
528 struct cp_private *cp = netdev_priv(dev);
529 unsigned rx_tail = cp->rx_tail;
530 unsigned rx_work = dev->quota;
531 unsigned rx;
532
533rx_status_loop:
534 rx = 0;
535 cpw16(IntrStatus, cp_rx_intr_mask);
536
537 while (1) {
538 u32 status, len;
539 dma_addr_t mapping;
540 struct sk_buff *skb, *new_skb;
541 struct cp_desc *desc;
542 unsigned buflen;
543
544 skb = cp->rx_skb[rx_tail].skb;
5d9428de 545 BUG_ON(!skb);
1da177e4
LT
546
547 desc = &cp->rx_ring[rx_tail];
548 status = le32_to_cpu(desc->opts1);
549 if (status & DescOwn)
550 break;
551
552 len = (status & 0x1fff) - 4;
3598b57b 553 mapping = le64_to_cpu(desc->addr);
1da177e4
LT
554
555 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
556 /* we don't support incoming fragmented frames.
557 * instead, we attempt to ensure that the
558 * pre-allocated RX skbs are properly sized such
559 * that RX fragments are never encountered
560 */
561 cp_rx_err_acct(cp, rx_tail, status, len);
562 cp->net_stats.rx_dropped++;
563 cp->cp_stats.rx_frags++;
564 goto rx_next;
565 }
566
567 if (status & (RxError | RxErrFIFO)) {
568 cp_rx_err_acct(cp, rx_tail, status, len);
569 goto rx_next;
570 }
571
572 if (netif_msg_rx_status(cp))
573 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
c48e9399 574 dev->name, rx_tail, status, len);
1da177e4
LT
575
576 buflen = cp->rx_buf_sz + RX_OFFSET;
577 new_skb = dev_alloc_skb (buflen);
578 if (!new_skb) {
579 cp->net_stats.rx_dropped++;
580 goto rx_next;
581 }
582
583 skb_reserve(new_skb, RX_OFFSET);
c48e9399 584 new_skb->dev = dev;
1da177e4
LT
585
586 pci_unmap_single(cp->pdev, mapping,
587 buflen, PCI_DMA_FROMDEVICE);
588
589 /* Handle checksum offloading for incoming packets. */
590 if (cp_rx_csum_ok(status))
591 skb->ip_summed = CHECKSUM_UNNECESSARY;
592 else
593 skb->ip_summed = CHECKSUM_NONE;
594
595 skb_put(skb, len);
596
3598b57b
FR
597 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
598 PCI_DMA_FROMDEVICE);
1da177e4
LT
599 cp->rx_skb[rx_tail].skb = new_skb;
600
601 cp_rx_skb(cp, skb, desc);
602 rx++;
603
604rx_next:
605 cp->rx_ring[rx_tail].opts2 = 0;
606 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
607 if (rx_tail == (CP_RX_RING_SIZE - 1))
608 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
609 cp->rx_buf_sz);
610 else
611 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
612 rx_tail = NEXT_RX(rx_tail);
613
614 if (!rx_work--)
615 break;
616 }
617
618 cp->rx_tail = rx_tail;
619
620 dev->quota -= rx;
621 *budget -= rx;
622
623 /* if we did not reach work limit, then we're done with
624 * this round of polling
625 */
626 if (rx_work) {
627 if (cpr16(IntrStatus) & cp_rx_intr_mask)
628 goto rx_status_loop;
629
630 local_irq_disable();
631 cpw16_f(IntrMask, cp_intr_mask);
632 __netif_rx_complete(dev);
633 local_irq_enable();
634
635 return 0; /* done */
636 }
637
638 return 1; /* not done */
639}
640
641static irqreturn_t
642cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
643{
644 struct net_device *dev = dev_instance;
645 struct cp_private *cp;
646 u16 status;
647
648 if (unlikely(dev == NULL))
649 return IRQ_NONE;
650 cp = netdev_priv(dev);
651
652 status = cpr16(IntrStatus);
653 if (!status || (status == 0xFFFF))
654 return IRQ_NONE;
655
656 if (netif_msg_intr(cp))
657 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
658 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
659
660 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
661
662 spin_lock(&cp->lock);
663
664 /* close possible race's with dev_close */
665 if (unlikely(!netif_running(dev))) {
666 cpw16(IntrMask, 0);
667 spin_unlock(&cp->lock);
668 return IRQ_HANDLED;
669 }
670
671 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
672 if (netif_rx_schedule_prep(dev)) {
673 cpw16_f(IntrMask, cp_norx_intr_mask);
674 __netif_rx_schedule(dev);
675 }
676
677 if (status & (TxOK | TxErr | TxEmpty | SWInt))
678 cp_tx(cp);
679 if (status & LinkChg)
680 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
681
682 spin_unlock(&cp->lock);
683
684 if (status & PciErr) {
685 u16 pci_status;
686
687 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
688 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
689 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
690 dev->name, status, pci_status);
691
692 /* TODO: reset hardware */
693 }
694
695 return IRQ_HANDLED;
696}
697
7502cd10
SK
698#ifdef CONFIG_NET_POLL_CONTROLLER
699/*
700 * Polling receive - used by netconsole and other diagnostic tools
701 * to allow network i/o with interrupts disabled.
702 */
703static void cp_poll_controller(struct net_device *dev)
704{
705 disable_irq(dev->irq);
706 cp_interrupt(dev->irq, dev, NULL);
707 enable_irq(dev->irq);
708}
709#endif
710
1da177e4
LT
711static void cp_tx (struct cp_private *cp)
712{
713 unsigned tx_head = cp->tx_head;
714 unsigned tx_tail = cp->tx_tail;
715
716 while (tx_tail != tx_head) {
3598b57b 717 struct cp_desc *txd = cp->tx_ring + tx_tail;
1da177e4
LT
718 struct sk_buff *skb;
719 u32 status;
720
721 rmb();
3598b57b 722 status = le32_to_cpu(txd->opts1);
1da177e4
LT
723 if (status & DescOwn)
724 break;
725
726 skb = cp->tx_skb[tx_tail].skb;
5d9428de 727 BUG_ON(!skb);
1da177e4 728
3598b57b 729 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
5734418d 730 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
1da177e4
LT
731
732 if (status & LastFrag) {
733 if (status & (TxError | TxFIFOUnder)) {
734 if (netif_msg_tx_err(cp))
735 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
736 cp->dev->name, status);
737 cp->net_stats.tx_errors++;
738 if (status & TxOWC)
739 cp->net_stats.tx_window_errors++;
740 if (status & TxMaxCol)
741 cp->net_stats.tx_aborted_errors++;
742 if (status & TxLinkFail)
743 cp->net_stats.tx_carrier_errors++;
744 if (status & TxFIFOUnder)
745 cp->net_stats.tx_fifo_errors++;
746 } else {
747 cp->net_stats.collisions +=
748 ((status >> TxColCntShift) & TxColCntMask);
749 cp->net_stats.tx_packets++;
750 cp->net_stats.tx_bytes += skb->len;
751 if (netif_msg_tx_done(cp))
752 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
753 }
754 dev_kfree_skb_irq(skb);
755 }
756
757 cp->tx_skb[tx_tail].skb = NULL;
758
759 tx_tail = NEXT_TX(tx_tail);
760 }
761
762 cp->tx_tail = tx_tail;
763
764 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
765 netif_wake_queue(cp->dev);
766}
767
768static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
769{
770 struct cp_private *cp = netdev_priv(dev);
771 unsigned entry;
fcec3456 772 u32 eor, flags;
1da177e4
LT
773#if CP_VLAN_TAG_USED
774 u32 vlan_tag = 0;
775#endif
fcec3456 776 int mss = 0;
1da177e4
LT
777
778 spin_lock_irq(&cp->lock);
779
780 /* This is a hard error, log it. */
781 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
782 netif_stop_queue(dev);
783 spin_unlock_irq(&cp->lock);
784 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
785 dev->name);
786 return 1;
787 }
788
789#if CP_VLAN_TAG_USED
790 if (cp->vlgrp && vlan_tx_tag_present(skb))
791 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
792#endif
793
794 entry = cp->tx_head;
795 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
fcec3456 796 if (dev->features & NETIF_F_TSO)
7967168c 797 mss = skb_shinfo(skb)->gso_size;
fcec3456 798
1da177e4
LT
799 if (skb_shinfo(skb)->nr_frags == 0) {
800 struct cp_desc *txd = &cp->tx_ring[entry];
801 u32 len;
802 dma_addr_t mapping;
803
804 len = skb->len;
805 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
806 CP_VLAN_TX_TAG(txd, vlan_tag);
807 txd->addr = cpu_to_le64(mapping);
808 wmb();
809
fcec3456
JG
810 flags = eor | len | DescOwn | FirstFrag | LastFrag;
811
812 if (mss)
813 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
814 else if (skb->ip_summed == CHECKSUM_HW) {
1da177e4
LT
815 const struct iphdr *ip = skb->nh.iph;
816 if (ip->protocol == IPPROTO_TCP)
fcec3456 817 flags |= IPCS | TCPCS;
1da177e4 818 else if (ip->protocol == IPPROTO_UDP)
fcec3456 819 flags |= IPCS | UDPCS;
1da177e4 820 else
5734418d 821 WARN_ON(1); /* we need a WARN() */
fcec3456
JG
822 }
823
824 txd->opts1 = cpu_to_le32(flags);
1da177e4
LT
825 wmb();
826
827 cp->tx_skb[entry].skb = skb;
5734418d 828 cp->tx_skb[entry].len = len;
1da177e4
LT
829 entry = NEXT_TX(entry);
830 } else {
831 struct cp_desc *txd;
832 u32 first_len, first_eor;
833 dma_addr_t first_mapping;
834 int frag, first_entry = entry;
835 const struct iphdr *ip = skb->nh.iph;
836
837 /* We must give this initial chunk to the device last.
838 * Otherwise we could race with the device.
839 */
840 first_eor = eor;
841 first_len = skb_headlen(skb);
842 first_mapping = pci_map_single(cp->pdev, skb->data,
843 first_len, PCI_DMA_TODEVICE);
844 cp->tx_skb[entry].skb = skb;
5734418d 845 cp->tx_skb[entry].len = first_len;
1da177e4
LT
846 entry = NEXT_TX(entry);
847
848 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
849 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
850 u32 len;
851 u32 ctrl;
852 dma_addr_t mapping;
853
854 len = this_frag->size;
855 mapping = pci_map_single(cp->pdev,
856 ((void *) page_address(this_frag->page) +
857 this_frag->page_offset),
858 len, PCI_DMA_TODEVICE);
859 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
860
fcec3456
JG
861 ctrl = eor | len | DescOwn;
862
863 if (mss)
864 ctrl |= LargeSend |
865 ((mss & MSSMask) << MSSShift);
866 else if (skb->ip_summed == CHECKSUM_HW) {
1da177e4 867 if (ip->protocol == IPPROTO_TCP)
fcec3456 868 ctrl |= IPCS | TCPCS;
1da177e4 869 else if (ip->protocol == IPPROTO_UDP)
fcec3456 870 ctrl |= IPCS | UDPCS;
1da177e4
LT
871 else
872 BUG();
fcec3456 873 }
1da177e4
LT
874
875 if (frag == skb_shinfo(skb)->nr_frags - 1)
876 ctrl |= LastFrag;
877
878 txd = &cp->tx_ring[entry];
879 CP_VLAN_TX_TAG(txd, vlan_tag);
880 txd->addr = cpu_to_le64(mapping);
881 wmb();
882
883 txd->opts1 = cpu_to_le32(ctrl);
884 wmb();
885
886 cp->tx_skb[entry].skb = skb;
5734418d 887 cp->tx_skb[entry].len = len;
1da177e4
LT
888 entry = NEXT_TX(entry);
889 }
890
891 txd = &cp->tx_ring[first_entry];
892 CP_VLAN_TX_TAG(txd, vlan_tag);
893 txd->addr = cpu_to_le64(first_mapping);
894 wmb();
895
896 if (skb->ip_summed == CHECKSUM_HW) {
897 if (ip->protocol == IPPROTO_TCP)
898 txd->opts1 = cpu_to_le32(first_eor | first_len |
899 FirstFrag | DescOwn |
900 IPCS | TCPCS);
901 else if (ip->protocol == IPPROTO_UDP)
902 txd->opts1 = cpu_to_le32(first_eor | first_len |
903 FirstFrag | DescOwn |
904 IPCS | UDPCS);
905 else
906 BUG();
907 } else
908 txd->opts1 = cpu_to_le32(first_eor | first_len |
909 FirstFrag | DescOwn);
910 wmb();
911 }
912 cp->tx_head = entry;
913 if (netif_msg_tx_queued(cp))
914 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
915 dev->name, entry, skb->len);
916 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
917 netif_stop_queue(dev);
918
919 spin_unlock_irq(&cp->lock);
920
921 cpw8(TxPoll, NormalTxPoll);
922 dev->trans_start = jiffies;
923
924 return 0;
925}
926
927/* Set or clear the multicast filter for this adaptor.
928 This routine is not state sensitive and need not be SMP locked. */
929
930static void __cp_set_rx_mode (struct net_device *dev)
931{
932 struct cp_private *cp = netdev_priv(dev);
933 u32 mc_filter[2]; /* Multicast hash filter */
934 int i, rx_mode;
935 u32 tmp;
936
937 /* Note: do not reorder, GCC is clever about common statements. */
938 if (dev->flags & IFF_PROMISC) {
939 /* Unconditionally log net taps. */
940 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
941 dev->name);
942 rx_mode =
943 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
944 AcceptAllPhys;
945 mc_filter[1] = mc_filter[0] = 0xffffffff;
946 } else if ((dev->mc_count > multicast_filter_limit)
947 || (dev->flags & IFF_ALLMULTI)) {
948 /* Too many to filter perfectly -- accept all multicasts. */
949 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
950 mc_filter[1] = mc_filter[0] = 0xffffffff;
951 } else {
952 struct dev_mc_list *mclist;
953 rx_mode = AcceptBroadcast | AcceptMyPhys;
954 mc_filter[1] = mc_filter[0] = 0;
955 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
956 i++, mclist = mclist->next) {
957 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
958
959 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
960 rx_mode |= AcceptMulticast;
961 }
962 }
963
964 /* We can safely update without stopping the chip. */
965 tmp = cp_rx_config | rx_mode;
966 if (cp->rx_config != tmp) {
967 cpw32_f (RxConfig, tmp);
968 cp->rx_config = tmp;
969 }
970 cpw32_f (MAR0 + 0, mc_filter[0]);
971 cpw32_f (MAR0 + 4, mc_filter[1]);
972}
973
974static void cp_set_rx_mode (struct net_device *dev)
975{
976 unsigned long flags;
977 struct cp_private *cp = netdev_priv(dev);
978
979 spin_lock_irqsave (&cp->lock, flags);
980 __cp_set_rx_mode(dev);
981 spin_unlock_irqrestore (&cp->lock, flags);
982}
983
984static void __cp_get_stats(struct cp_private *cp)
985{
986 /* only lower 24 bits valid; write any value to clear */
987 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
988 cpw32 (RxMissed, 0);
989}
990
991static struct net_device_stats *cp_get_stats(struct net_device *dev)
992{
993 struct cp_private *cp = netdev_priv(dev);
994 unsigned long flags;
995
996 /* The chip only need report frame silently dropped. */
997 spin_lock_irqsave(&cp->lock, flags);
998 if (netif_running(dev) && netif_device_present(dev))
999 __cp_get_stats(cp);
1000 spin_unlock_irqrestore(&cp->lock, flags);
1001
1002 return &cp->net_stats;
1003}
1004
1005static void cp_stop_hw (struct cp_private *cp)
1006{
1007 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
1008 cpw16_f(IntrMask, 0);
1009 cpw8(Cmd, 0);
1010 cpw16_f(CpCmd, 0);
1011 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1012
1013 cp->rx_tail = 0;
1014 cp->tx_head = cp->tx_tail = 0;
1015}
1016
1017static void cp_reset_hw (struct cp_private *cp)
1018{
1019 unsigned work = 1000;
1020
1021 cpw8(Cmd, CmdReset);
1022
1023 while (work--) {
1024 if (!(cpr8(Cmd) & CmdReset))
1025 return;
1026
3173c890 1027 schedule_timeout_uninterruptible(10);
1da177e4
LT
1028 }
1029
1030 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1031}
1032
1033static inline void cp_start_hw (struct cp_private *cp)
1034{
1035 cpw16(CpCmd, cp->cpcmd);
1036 cpw8(Cmd, RxOn | TxOn);
1037}
1038
1039static void cp_init_hw (struct cp_private *cp)
1040{
1041 struct net_device *dev = cp->dev;
1042 dma_addr_t ring_dma;
1043
1044 cp_reset_hw(cp);
1045
1046 cpw8_f (Cfg9346, Cfg9346_Unlock);
1047
1048 /* Restore our idea of the MAC address. */
1049 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1050 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1051
1052 cp_start_hw(cp);
1053 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1054
1055 __cp_set_rx_mode(dev);
1056 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1057
1058 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1059 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1060 cpw8(Config3, PARMEnable);
1061 cp->wol_enabled = 0;
1062
f3b197ac 1063 cpw8(Config5, cpr8(Config5) & PMEStatus);
1da177e4
LT
1064
1065 cpw32_f(HiTxRingAddr, 0);
1066 cpw32_f(HiTxRingAddr + 4, 0);
1067
1068 ring_dma = cp->ring_dma;
1069 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1070 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1071
1072 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1073 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1074 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1075
1076 cpw16(MultiIntr, 0);
1077
1078 cpw16_f(IntrMask, cp_intr_mask);
1079
1080 cpw8_f(Cfg9346, Cfg9346_Lock);
1081}
1082
1083static int cp_refill_rx (struct cp_private *cp)
1084{
1085 unsigned i;
1086
1087 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1088 struct sk_buff *skb;
3598b57b 1089 dma_addr_t mapping;
1da177e4
LT
1090
1091 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1092 if (!skb)
1093 goto err_out;
1094
1095 skb->dev = cp->dev;
1096 skb_reserve(skb, RX_OFFSET);
1097
3598b57b
FR
1098 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1099 PCI_DMA_FROMDEVICE);
1da177e4 1100 cp->rx_skb[i].skb = skb;
1da177e4
LT
1101
1102 cp->rx_ring[i].opts2 = 0;
3598b57b 1103 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1da177e4
LT
1104 if (i == (CP_RX_RING_SIZE - 1))
1105 cp->rx_ring[i].opts1 =
1106 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1107 else
1108 cp->rx_ring[i].opts1 =
1109 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1110 }
1111
1112 return 0;
1113
1114err_out:
1115 cp_clean_rings(cp);
1116 return -ENOMEM;
1117}
1118
576cfa93
FR
1119static void cp_init_rings_index (struct cp_private *cp)
1120{
1121 cp->rx_tail = 0;
1122 cp->tx_head = cp->tx_tail = 0;
1123}
1124
1da177e4
LT
1125static int cp_init_rings (struct cp_private *cp)
1126{
1127 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1128 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1129
576cfa93 1130 cp_init_rings_index(cp);
1da177e4
LT
1131
1132 return cp_refill_rx (cp);
1133}
1134
1135static int cp_alloc_rings (struct cp_private *cp)
1136{
1137 void *mem;
1138
1139 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1140 if (!mem)
1141 return -ENOMEM;
1142
1143 cp->rx_ring = mem;
1144 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1145
1da177e4
LT
1146 return cp_init_rings(cp);
1147}
1148
1149static void cp_clean_rings (struct cp_private *cp)
1150{
3598b57b 1151 struct cp_desc *desc;
1da177e4
LT
1152 unsigned i;
1153
1da177e4
LT
1154 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1155 if (cp->rx_skb[i].skb) {
3598b57b
FR
1156 desc = cp->rx_ring + i;
1157 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1da177e4
LT
1158 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1159 dev_kfree_skb(cp->rx_skb[i].skb);
1160 }
1161 }
1162
1163 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1164 if (cp->tx_skb[i].skb) {
1165 struct sk_buff *skb = cp->tx_skb[i].skb;
5734418d 1166
3598b57b
FR
1167 desc = cp->tx_ring + i;
1168 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
5734418d 1169 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
3598b57b 1170 if (le32_to_cpu(desc->opts1) & LastFrag)
5734418d 1171 dev_kfree_skb(skb);
1da177e4
LT
1172 cp->net_stats.tx_dropped++;
1173 }
1174 }
1175
5734418d
FR
1176 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1177 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1178
1da177e4
LT
1179 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1180 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1181}
1182
1183static void cp_free_rings (struct cp_private *cp)
1184{
1185 cp_clean_rings(cp);
1186 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1187 cp->rx_ring = NULL;
1188 cp->tx_ring = NULL;
1da177e4
LT
1189}
1190
1191static int cp_open (struct net_device *dev)
1192{
1193 struct cp_private *cp = netdev_priv(dev);
1194 int rc;
1195
1196 if (netif_msg_ifup(cp))
1197 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1198
1199 rc = cp_alloc_rings(cp);
1200 if (rc)
1201 return rc;
1202
1203 cp_init_hw(cp);
1204
1fb9df5d 1205 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1206 if (rc)
1207 goto err_out_hw;
1208
1209 netif_carrier_off(dev);
1210 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1211 netif_start_queue(dev);
1212
1213 return 0;
1214
1215err_out_hw:
1216 cp_stop_hw(cp);
1217 cp_free_rings(cp);
1218 return rc;
1219}
1220
1221static int cp_close (struct net_device *dev)
1222{
1223 struct cp_private *cp = netdev_priv(dev);
1224 unsigned long flags;
1225
1226 if (netif_msg_ifdown(cp))
1227 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1228
1229 spin_lock_irqsave(&cp->lock, flags);
1230
1231 netif_stop_queue(dev);
1232 netif_carrier_off(dev);
1233
1234 cp_stop_hw(cp);
1235
1236 spin_unlock_irqrestore(&cp->lock, flags);
1237
1238 synchronize_irq(dev->irq);
1239 free_irq(dev->irq, dev);
1240
1241 cp_free_rings(cp);
1242 return 0;
1243}
1244
1245#ifdef BROKEN
1246static int cp_change_mtu(struct net_device *dev, int new_mtu)
1247{
1248 struct cp_private *cp = netdev_priv(dev);
1249 int rc;
1250 unsigned long flags;
1251
1252 /* check for invalid MTU, according to hardware limits */
1253 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1254 return -EINVAL;
1255
1256 /* if network interface not up, no need for complexity */
1257 if (!netif_running(dev)) {
1258 dev->mtu = new_mtu;
1259 cp_set_rxbufsize(cp); /* set new rx buf size */
1260 return 0;
1261 }
1262
1263 spin_lock_irqsave(&cp->lock, flags);
1264
1265 cp_stop_hw(cp); /* stop h/w and free rings */
1266 cp_clean_rings(cp);
1267
1268 dev->mtu = new_mtu;
1269 cp_set_rxbufsize(cp); /* set new rx buf size */
1270
1271 rc = cp_init_rings(cp); /* realloc and restart h/w */
1272 cp_start_hw(cp);
1273
1274 spin_unlock_irqrestore(&cp->lock, flags);
1275
1276 return rc;
1277}
1278#endif /* BROKEN */
1279
f71e1309 1280static const char mii_2_8139_map[8] = {
1da177e4
LT
1281 BasicModeCtrl,
1282 BasicModeStatus,
1283 0,
1284 0,
1285 NWayAdvert,
1286 NWayLPAR,
1287 NWayExpansion,
1288 0
1289};
1290
1291static int mdio_read(struct net_device *dev, int phy_id, int location)
1292{
1293 struct cp_private *cp = netdev_priv(dev);
1294
1295 return location < 8 && mii_2_8139_map[location] ?
1296 readw(cp->regs + mii_2_8139_map[location]) : 0;
1297}
1298
1299
1300static void mdio_write(struct net_device *dev, int phy_id, int location,
1301 int value)
1302{
1303 struct cp_private *cp = netdev_priv(dev);
1304
1305 if (location == 0) {
1306 cpw8(Cfg9346, Cfg9346_Unlock);
1307 cpw16(BasicModeCtrl, value);
1308 cpw8(Cfg9346, Cfg9346_Lock);
1309 } else if (location < 8 && mii_2_8139_map[location])
1310 cpw16(mii_2_8139_map[location], value);
1311}
1312
1313/* Set the ethtool Wake-on-LAN settings */
1314static int netdev_set_wol (struct cp_private *cp,
1315 const struct ethtool_wolinfo *wol)
1316{
1317 u8 options;
1318
1319 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1320 /* If WOL is being disabled, no need for complexity */
1321 if (wol->wolopts) {
1322 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1323 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1324 }
1325
1326 cpw8 (Cfg9346, Cfg9346_Unlock);
1327 cpw8 (Config3, options);
1328 cpw8 (Cfg9346, Cfg9346_Lock);
1329
1330 options = 0; /* Paranoia setting */
1331 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1332 /* If WOL is being disabled, no need for complexity */
1333 if (wol->wolopts) {
1334 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1335 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1336 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1337 }
1338
1339 cpw8 (Config5, options);
1340
1341 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1342
1343 return 0;
1344}
1345
1346/* Get the ethtool Wake-on-LAN settings */
1347static void netdev_get_wol (struct cp_private *cp,
1348 struct ethtool_wolinfo *wol)
1349{
1350 u8 options;
1351
1352 wol->wolopts = 0; /* Start from scratch */
1353 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1354 WAKE_MCAST | WAKE_UCAST;
1355 /* We don't need to go on if WOL is disabled */
1356 if (!cp->wol_enabled) return;
f3b197ac 1357
1da177e4
LT
1358 options = cpr8 (Config3);
1359 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1360 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1361
1362 options = 0; /* Paranoia setting */
1363 options = cpr8 (Config5);
1364 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1365 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1366 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1367}
1368
1369static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1370{
1371 struct cp_private *cp = netdev_priv(dev);
1372
1373 strcpy (info->driver, DRV_NAME);
1374 strcpy (info->version, DRV_VERSION);
1375 strcpy (info->bus_info, pci_name(cp->pdev));
1376}
1377
1378static int cp_get_regs_len(struct net_device *dev)
1379{
1380 return CP_REGS_SIZE;
1381}
1382
1383static int cp_get_stats_count (struct net_device *dev)
1384{
1385 return CP_NUM_STATS;
1386}
1387
1388static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1389{
1390 struct cp_private *cp = netdev_priv(dev);
1391 int rc;
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&cp->lock, flags);
1395 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1396 spin_unlock_irqrestore(&cp->lock, flags);
1397
1398 return rc;
1399}
1400
1401static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1402{
1403 struct cp_private *cp = netdev_priv(dev);
1404 int rc;
1405 unsigned long flags;
1406
1407 spin_lock_irqsave(&cp->lock, flags);
1408 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1409 spin_unlock_irqrestore(&cp->lock, flags);
1410
1411 return rc;
1412}
1413
1414static int cp_nway_reset(struct net_device *dev)
1415{
1416 struct cp_private *cp = netdev_priv(dev);
1417 return mii_nway_restart(&cp->mii_if);
1418}
1419
1420static u32 cp_get_msglevel(struct net_device *dev)
1421{
1422 struct cp_private *cp = netdev_priv(dev);
1423 return cp->msg_enable;
1424}
1425
1426static void cp_set_msglevel(struct net_device *dev, u32 value)
1427{
1428 struct cp_private *cp = netdev_priv(dev);
1429 cp->msg_enable = value;
1430}
1431
1432static u32 cp_get_rx_csum(struct net_device *dev)
1433{
1434 struct cp_private *cp = netdev_priv(dev);
1435 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1436}
1437
1438static int cp_set_rx_csum(struct net_device *dev, u32 data)
1439{
1440 struct cp_private *cp = netdev_priv(dev);
1441 u16 cmd = cp->cpcmd, newcmd;
1442
1443 newcmd = cmd;
1444
1445 if (data)
1446 newcmd |= RxChkSum;
1447 else
1448 newcmd &= ~RxChkSum;
1449
1450 if (newcmd != cmd) {
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&cp->lock, flags);
1454 cp->cpcmd = newcmd;
1455 cpw16_f(CpCmd, newcmd);
1456 spin_unlock_irqrestore(&cp->lock, flags);
1457 }
1458
1459 return 0;
1460}
1461
1462static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1463 void *p)
1464{
1465 struct cp_private *cp = netdev_priv(dev);
1466 unsigned long flags;
1467
1468 if (regs->len < CP_REGS_SIZE)
1469 return /* -EINVAL */;
1470
1471 regs->version = CP_REGS_VER;
1472
1473 spin_lock_irqsave(&cp->lock, flags);
1474 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1475 spin_unlock_irqrestore(&cp->lock, flags);
1476}
1477
1478static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1479{
1480 struct cp_private *cp = netdev_priv(dev);
1481 unsigned long flags;
1482
1483 spin_lock_irqsave (&cp->lock, flags);
1484 netdev_get_wol (cp, wol);
1485 spin_unlock_irqrestore (&cp->lock, flags);
1486}
1487
1488static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1489{
1490 struct cp_private *cp = netdev_priv(dev);
1491 unsigned long flags;
1492 int rc;
1493
1494 spin_lock_irqsave (&cp->lock, flags);
1495 rc = netdev_set_wol (cp, wol);
1496 spin_unlock_irqrestore (&cp->lock, flags);
1497
1498 return rc;
1499}
1500
1501static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1502{
1503 switch (stringset) {
1504 case ETH_SS_STATS:
1505 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1506 break;
1507 default:
1508 BUG();
1509 break;
1510 }
1511}
1512
1513static void cp_get_ethtool_stats (struct net_device *dev,
1514 struct ethtool_stats *estats, u64 *tmp_stats)
1515{
1516 struct cp_private *cp = netdev_priv(dev);
8b512927
SH
1517 struct cp_dma_stats *nic_stats;
1518 dma_addr_t dma;
1da177e4
LT
1519 int i;
1520
8b512927
SH
1521 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1522 if (!nic_stats)
1523 return;
97f568d8 1524
1da177e4 1525 /* begin NIC statistics dump */
8b512927
SH
1526 cpw32(StatsAddr + 4, (u64)dma >> 32);
1527 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1da177e4
LT
1528 cpr32(StatsAddr);
1529
97f568d8 1530 for (i = 0; i < 1000; i++) {
1da177e4
LT
1531 if ((cpr32(StatsAddr) & DumpStats) == 0)
1532 break;
97f568d8 1533 udelay(10);
1da177e4 1534 }
97f568d8
SH
1535 cpw32(StatsAddr, 0);
1536 cpw32(StatsAddr + 4, 0);
8b512927 1537 cpr32(StatsAddr);
1da177e4
LT
1538
1539 i = 0;
8b512927
SH
1540 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1541 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1542 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1543 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1544 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1545 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1546 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1547 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1548 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1549 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1550 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1551 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1552 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1da177e4 1553 tmp_stats[i++] = cp->cp_stats.rx_frags;
5d9428de 1554 BUG_ON(i != CP_NUM_STATS);
8b512927
SH
1555
1556 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1da177e4
LT
1557}
1558
1559static struct ethtool_ops cp_ethtool_ops = {
1560 .get_drvinfo = cp_get_drvinfo,
1561 .get_regs_len = cp_get_regs_len,
1562 .get_stats_count = cp_get_stats_count,
1563 .get_settings = cp_get_settings,
1564 .set_settings = cp_set_settings,
1565 .nway_reset = cp_nway_reset,
1566 .get_link = ethtool_op_get_link,
1567 .get_msglevel = cp_get_msglevel,
1568 .set_msglevel = cp_set_msglevel,
1569 .get_rx_csum = cp_get_rx_csum,
1570 .set_rx_csum = cp_set_rx_csum,
1571 .get_tx_csum = ethtool_op_get_tx_csum,
1572 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1573 .get_sg = ethtool_op_get_sg,
1574 .set_sg = ethtool_op_set_sg,
fcec3456
JG
1575 .get_tso = ethtool_op_get_tso,
1576 .set_tso = ethtool_op_set_tso,
1da177e4
LT
1577 .get_regs = cp_get_regs,
1578 .get_wol = cp_get_wol,
1579 .set_wol = cp_set_wol,
1580 .get_strings = cp_get_strings,
1581 .get_ethtool_stats = cp_get_ethtool_stats,
bb0ce608 1582 .get_perm_addr = ethtool_op_get_perm_addr,
722fdb33
PC
1583 .get_eeprom_len = cp_get_eeprom_len,
1584 .get_eeprom = cp_get_eeprom,
1585 .set_eeprom = cp_set_eeprom,
1da177e4
LT
1586};
1587
1588static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1589{
1590 struct cp_private *cp = netdev_priv(dev);
1591 int rc;
1592 unsigned long flags;
1593
1594 if (!netif_running(dev))
1595 return -EINVAL;
1596
1597 spin_lock_irqsave(&cp->lock, flags);
1598 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1599 spin_unlock_irqrestore(&cp->lock, flags);
1600 return rc;
1601}
1602
1603/* Serial EEPROM section. */
1604
1605/* EEPROM_Ctrl bits. */
1606#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1607#define EE_CS 0x08 /* EEPROM chip select. */
1608#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1609#define EE_WRITE_0 0x00
1610#define EE_WRITE_1 0x02
1611#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1612#define EE_ENB (0x80 | EE_CS)
1613
1614/* Delay between EEPROM clock transitions.
1615 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1616 */
1617
1618#define eeprom_delay() readl(ee_addr)
1619
1620/* The EEPROM commands include the alway-set leading bit. */
722fdb33 1621#define EE_EXTEND_CMD (4)
1da177e4
LT
1622#define EE_WRITE_CMD (5)
1623#define EE_READ_CMD (6)
1624#define EE_ERASE_CMD (7)
1625
722fdb33
PC
1626#define EE_EWDS_ADDR (0)
1627#define EE_WRAL_ADDR (1)
1628#define EE_ERAL_ADDR (2)
1629#define EE_EWEN_ADDR (3)
1630
1631#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1da177e4 1632
722fdb33
PC
1633static void eeprom_cmd_start(void __iomem *ee_addr)
1634{
1da177e4
LT
1635 writeb (EE_ENB & ~EE_CS, ee_addr);
1636 writeb (EE_ENB, ee_addr);
1637 eeprom_delay ();
722fdb33 1638}
1da177e4 1639
722fdb33
PC
1640static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1641{
1642 int i;
1643
1644 /* Shift the command bits out. */
1645 for (i = cmd_len - 1; i >= 0; i--) {
1646 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1da177e4
LT
1647 writeb (EE_ENB | dataval, ee_addr);
1648 eeprom_delay ();
1649 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1650 eeprom_delay ();
1651 }
1652 writeb (EE_ENB, ee_addr);
1653 eeprom_delay ();
722fdb33
PC
1654}
1655
1656static void eeprom_cmd_end(void __iomem *ee_addr)
1657{
1658 writeb (~EE_CS, ee_addr);
1659 eeprom_delay ();
1660}
1661
1662static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1663 int addr_len)
1664{
1665 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1666
1667 eeprom_cmd_start(ee_addr);
1668 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1669 eeprom_cmd_end(ee_addr);
1670}
1671
1672static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1673{
1674 int i;
1675 u16 retval = 0;
1676 void __iomem *ee_addr = ioaddr + Cfg9346;
1677 int read_cmd = location | (EE_READ_CMD << addr_len);
1678
1679 eeprom_cmd_start(ee_addr);
1680 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1da177e4
LT
1681
1682 for (i = 16; i > 0; i--) {
1683 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1684 eeprom_delay ();
1685 retval =
1686 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1687 0);
1688 writeb (EE_ENB, ee_addr);
1689 eeprom_delay ();
1690 }
1691
722fdb33 1692 eeprom_cmd_end(ee_addr);
1da177e4
LT
1693
1694 return retval;
1695}
1696
722fdb33
PC
1697static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1698 int addr_len)
1699{
1700 int i;
1701 void __iomem *ee_addr = ioaddr + Cfg9346;
1702 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1703
1704 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1705
1706 eeprom_cmd_start(ee_addr);
1707 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1708 eeprom_cmd(ee_addr, val, 16);
1709 eeprom_cmd_end(ee_addr);
1710
1711 eeprom_cmd_start(ee_addr);
1712 for (i = 0; i < 20000; i++)
1713 if (readb(ee_addr) & EE_DATA_READ)
1714 break;
1715 eeprom_cmd_end(ee_addr);
1716
1717 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1718}
1719
1720static int cp_get_eeprom_len(struct net_device *dev)
1721{
1722 struct cp_private *cp = netdev_priv(dev);
1723 int size;
1724
1725 spin_lock_irq(&cp->lock);
1726 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1727 spin_unlock_irq(&cp->lock);
1728
1729 return size;
1730}
1731
1732static int cp_get_eeprom(struct net_device *dev,
1733 struct ethtool_eeprom *eeprom, u8 *data)
1734{
1735 struct cp_private *cp = netdev_priv(dev);
1736 unsigned int addr_len;
1737 u16 val;
1738 u32 offset = eeprom->offset >> 1;
1739 u32 len = eeprom->len;
1740 u32 i = 0;
1741
1742 eeprom->magic = CP_EEPROM_MAGIC;
1743
1744 spin_lock_irq(&cp->lock);
1745
1746 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1747
1748 if (eeprom->offset & 1) {
1749 val = read_eeprom(cp->regs, offset, addr_len);
1750 data[i++] = (u8)(val >> 8);
1751 offset++;
1752 }
1753
1754 while (i < len - 1) {
1755 val = read_eeprom(cp->regs, offset, addr_len);
1756 data[i++] = (u8)val;
1757 data[i++] = (u8)(val >> 8);
1758 offset++;
1759 }
1760
1761 if (i < len) {
1762 val = read_eeprom(cp->regs, offset, addr_len);
1763 data[i] = (u8)val;
1764 }
1765
1766 spin_unlock_irq(&cp->lock);
1767 return 0;
1768}
1769
1770static int cp_set_eeprom(struct net_device *dev,
1771 struct ethtool_eeprom *eeprom, u8 *data)
1772{
1773 struct cp_private *cp = netdev_priv(dev);
1774 unsigned int addr_len;
1775 u16 val;
1776 u32 offset = eeprom->offset >> 1;
1777 u32 len = eeprom->len;
1778 u32 i = 0;
1779
1780 if (eeprom->magic != CP_EEPROM_MAGIC)
1781 return -EINVAL;
1782
1783 spin_lock_irq(&cp->lock);
1784
1785 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1786
1787 if (eeprom->offset & 1) {
1788 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1789 val |= (u16)data[i++] << 8;
1790 write_eeprom(cp->regs, offset, val, addr_len);
1791 offset++;
1792 }
1793
1794 while (i < len - 1) {
1795 val = (u16)data[i++];
1796 val |= (u16)data[i++] << 8;
1797 write_eeprom(cp->regs, offset, val, addr_len);
1798 offset++;
1799 }
1800
1801 if (i < len) {
1802 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1803 val |= (u16)data[i];
1804 write_eeprom(cp->regs, offset, val, addr_len);
1805 }
1806
1807 spin_unlock_irq(&cp->lock);
1808 return 0;
1809}
1810
1da177e4
LT
1811/* Put the board into D3cold state and wait for WakeUp signal */
1812static void cp_set_d3_state (struct cp_private *cp)
1813{
1814 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1815 pci_set_power_state (cp->pdev, PCI_D3hot);
1816}
1817
1818static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1819{
1820 struct net_device *dev;
1821 struct cp_private *cp;
1822 int rc;
1823 void __iomem *regs;
2427ddd8 1824 resource_size_t pciaddr;
1da177e4
LT
1825 unsigned int addr_len, i, pci_using_dac;
1826 u8 pci_rev;
1827
1828#ifndef MODULE
1829 static int version_printed;
1830 if (version_printed++ == 0)
1831 printk("%s", version);
1832#endif
1833
1834 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1835
1836 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1837 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
9b91cf9d 1838 dev_err(&pdev->dev,
2e8a538d
JG
1839 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1840 pdev->vendor, pdev->device, pci_rev);
9b91cf9d 1841 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1da177e4
LT
1842 return -ENODEV;
1843 }
1844
1845 dev = alloc_etherdev(sizeof(struct cp_private));
1846 if (!dev)
1847 return -ENOMEM;
1848 SET_MODULE_OWNER(dev);
1849 SET_NETDEV_DEV(dev, &pdev->dev);
1850
1851 cp = netdev_priv(dev);
1852 cp->pdev = pdev;
1853 cp->dev = dev;
1854 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1855 spin_lock_init (&cp->lock);
1856 cp->mii_if.dev = dev;
1857 cp->mii_if.mdio_read = mdio_read;
1858 cp->mii_if.mdio_write = mdio_write;
1859 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1860 cp->mii_if.phy_id_mask = 0x1f;
1861 cp->mii_if.reg_num_mask = 0x1f;
1862 cp_set_rxbufsize(cp);
1863
1864 rc = pci_enable_device(pdev);
1865 if (rc)
1866 goto err_out_free;
1867
1868 rc = pci_set_mwi(pdev);
1869 if (rc)
1870 goto err_out_disable;
1871
1872 rc = pci_request_regions(pdev, DRV_NAME);
1873 if (rc)
1874 goto err_out_mwi;
1875
1876 pciaddr = pci_resource_start(pdev, 1);
1877 if (!pciaddr) {
1878 rc = -EIO;
9b91cf9d 1879 dev_err(&pdev->dev, "no MMIO resource\n");
1da177e4
LT
1880 goto err_out_res;
1881 }
1882 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1883 rc = -EIO;
9b91cf9d 1884 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
2e8a538d 1885 (unsigned long long)pci_resource_len(pdev, 1));
1da177e4
LT
1886 goto err_out_res;
1887 }
1888
1889 /* Configure DMA attributes. */
1890 if ((sizeof(dma_addr_t) > 4) &&
8662d061
TK
1891 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1892 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
1893 pci_using_dac = 1;
1894 } else {
1895 pci_using_dac = 0;
1896
8662d061 1897 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4 1898 if (rc) {
9b91cf9d 1899 dev_err(&pdev->dev,
2e8a538d 1900 "No usable DMA configuration, aborting.\n");
1da177e4
LT
1901 goto err_out_res;
1902 }
8662d061 1903 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4 1904 if (rc) {
9b91cf9d 1905 dev_err(&pdev->dev,
2e8a538d
JG
1906 "No usable consistent DMA configuration, "
1907 "aborting.\n");
1da177e4
LT
1908 goto err_out_res;
1909 }
1910 }
1911
1912 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1913 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1914
1915 regs = ioremap(pciaddr, CP_REGS_SIZE);
1916 if (!regs) {
1917 rc = -EIO;
4626dd46 1918 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
2e8a538d
JG
1919 (unsigned long long)pci_resource_len(pdev, 1),
1920 (unsigned long long)pciaddr);
1da177e4
LT
1921 goto err_out_res;
1922 }
1923 dev->base_addr = (unsigned long) regs;
1924 cp->regs = regs;
1925
1926 cp_stop_hw(cp);
1927
1928 /* read MAC address from EEPROM */
1929 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1930 for (i = 0; i < 3; i++)
1931 ((u16 *) (dev->dev_addr))[i] =
1932 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
bb0ce608 1933 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1934
1935 dev->open = cp_open;
1936 dev->stop = cp_close;
1937 dev->set_multicast_list = cp_set_rx_mode;
1938 dev->hard_start_xmit = cp_start_xmit;
1939 dev->get_stats = cp_get_stats;
1940 dev->do_ioctl = cp_ioctl;
1941 dev->poll = cp_rx_poll;
7502cd10
SK
1942#ifdef CONFIG_NET_POLL_CONTROLLER
1943 dev->poll_controller = cp_poll_controller;
1944#endif
1da177e4
LT
1945 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1946#ifdef BROKEN
1947 dev->change_mtu = cp_change_mtu;
1948#endif
1949 dev->ethtool_ops = &cp_ethtool_ops;
1950#if 0
1951 dev->tx_timeout = cp_tx_timeout;
1952 dev->watchdog_timeo = TX_TIMEOUT;
1953#endif
1954
1955#if CP_VLAN_TAG_USED
1956 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1957 dev->vlan_rx_register = cp_vlan_rx_register;
1958 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1959#endif
1960
1961 if (pci_using_dac)
1962 dev->features |= NETIF_F_HIGHDMA;
1963
fcec3456
JG
1964#if 0 /* disabled by default until verified */
1965 dev->features |= NETIF_F_TSO;
1966#endif
1967
1da177e4
LT
1968 dev->irq = pdev->irq;
1969
1970 rc = register_netdev(dev);
1971 if (rc)
1972 goto err_out_iomap;
1973
1974 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1975 "%02x:%02x:%02x:%02x:%02x:%02x, "
1976 "IRQ %d\n",
1977 dev->name,
1978 dev->base_addr,
1979 dev->dev_addr[0], dev->dev_addr[1],
1980 dev->dev_addr[2], dev->dev_addr[3],
1981 dev->dev_addr[4], dev->dev_addr[5],
1982 dev->irq);
1983
1984 pci_set_drvdata(pdev, dev);
1985
1986 /* enable busmastering and memory-write-invalidate */
1987 pci_set_master(pdev);
1988
2e8a538d
JG
1989 if (cp->wol_enabled)
1990 cp_set_d3_state (cp);
1da177e4
LT
1991
1992 return 0;
1993
1994err_out_iomap:
1995 iounmap(regs);
1996err_out_res:
1997 pci_release_regions(pdev);
1998err_out_mwi:
1999 pci_clear_mwi(pdev);
2000err_out_disable:
2001 pci_disable_device(pdev);
2002err_out_free:
2003 free_netdev(dev);
2004 return rc;
2005}
2006
2007static void cp_remove_one (struct pci_dev *pdev)
2008{
2009 struct net_device *dev = pci_get_drvdata(pdev);
2010 struct cp_private *cp = netdev_priv(dev);
2011
5d9428de 2012 BUG_ON(!dev);
1da177e4
LT
2013 unregister_netdev(dev);
2014 iounmap(cp->regs);
2e8a538d
JG
2015 if (cp->wol_enabled)
2016 pci_set_power_state (pdev, PCI_D0);
1da177e4
LT
2017 pci_release_regions(pdev);
2018 pci_clear_mwi(pdev);
2019 pci_disable_device(pdev);
2020 pci_set_drvdata(pdev, NULL);
2021 free_netdev(dev);
2022}
2023
2024#ifdef CONFIG_PM
05adc3b7 2025static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2026{
2027 struct net_device *dev;
2028 struct cp_private *cp;
2029 unsigned long flags;
2030
2031 dev = pci_get_drvdata (pdev);
2032 cp = netdev_priv(dev);
2033
2034 if (!dev || !netif_running (dev)) return 0;
2035
2036 netif_device_detach (dev);
2037 netif_stop_queue (dev);
2038
2039 spin_lock_irqsave (&cp->lock, flags);
2040
2041 /* Disable Rx and Tx */
2042 cpw16 (IntrMask, 0);
2043 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2044
2045 spin_unlock_irqrestore (&cp->lock, flags);
2046
576cfa93
FR
2047 pci_save_state(pdev);
2048 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2049 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1da177e4
LT
2050
2051 return 0;
2052}
2053
2054static int cp_resume (struct pci_dev *pdev)
2055{
576cfa93
FR
2056 struct net_device *dev = pci_get_drvdata (pdev);
2057 struct cp_private *cp = netdev_priv(dev);
a4cf0761 2058 unsigned long flags;
1da177e4 2059
576cfa93
FR
2060 if (!netif_running(dev))
2061 return 0;
1da177e4
LT
2062
2063 netif_device_attach (dev);
576cfa93
FR
2064
2065 pci_set_power_state(pdev, PCI_D0);
2066 pci_restore_state(pdev);
2067 pci_enable_wake(pdev, PCI_D0, 0);
2068
2069 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2070 cp_init_rings_index (cp);
1da177e4
LT
2071 cp_init_hw (cp);
2072 netif_start_queue (dev);
a4cf0761
PO
2073
2074 spin_lock_irqsave (&cp->lock, flags);
2075
2076 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2077
2078 spin_unlock_irqrestore (&cp->lock, flags);
f3b197ac 2079
1da177e4
LT
2080 return 0;
2081}
2082#endif /* CONFIG_PM */
2083
2084static struct pci_driver cp_driver = {
2085 .name = DRV_NAME,
2086 .id_table = cp_pci_tbl,
2087 .probe = cp_init_one,
2088 .remove = cp_remove_one,
2089#ifdef CONFIG_PM
2090 .resume = cp_resume,
2091 .suspend = cp_suspend,
2092#endif
2093};
2094
2095static int __init cp_init (void)
2096{
2097#ifdef MODULE
2098 printk("%s", version);
2099#endif
2100 return pci_module_init (&cp_driver);
2101}
2102
2103static void __exit cp_exit (void)
2104{
2105 pci_unregister_driver (&cp_driver);
2106}
2107
2108module_init(cp_init);
2109module_exit(cp_exit);