]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/r8169.c
r8169: populate the hw_start handler for the 8168
[net-next-2.6.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
99f252b0 69#include <asm/system.h>
1da177e4
LT
70#include <asm/io.h>
71#include <asm/irq.h>
72
f7ccf420
SH
73#ifdef CONFIG_R8169_NAPI
74#define NAPI_SUFFIX "-NAPI"
75#else
76#define NAPI_SUFFIX ""
77#endif
78
79#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
80#define MODULENAME "r8169"
81#define PFX MODULENAME ": "
82
83#ifdef RTL8169_DEBUG
84#define assert(expr) \
5b0384f4
FR
85 if (!(expr)) { \
86 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
87 #expr,__FILE__,__FUNCTION__,__LINE__); \
88 }
1da177e4
LT
89#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
90#else
91#define assert(expr) do {} while (0)
92#define dprintk(fmt, args...) do {} while (0)
93#endif /* RTL8169_DEBUG */
94
b57b7e5a 95#define R8169_MSG_DEFAULT \
f0e837d9 96 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 97
1da177e4
LT
98#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100
101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb
0b50f81d 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else
106#define rtl8169_rx_skb netif_rx
0b50f81d 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
108#define rtl8169_rx_quota(count, quota) count
109#endif
110
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 117static const int max_interrupt_work = 20;
1da177e4
LT
118
119/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
120 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 121static const int multicast_filter_limit = 32;
1da177e4
LT
122
123/* MAC address length */
124#define MAC_ADDR_LEN 6
125
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
133
134#define R8169_REGS_SIZE 256
135#define R8169_NAPI_WEIGHT 64
136#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
137#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
138#define RX_BUF_SIZE 1536 /* Rx Buffer size */
139#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
140#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
141
142#define RTL8169_TX_TIMEOUT (6*HZ)
143#define RTL8169_PHY_TIMEOUT (10*HZ)
144
145/* write/read MMIO register */
146#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
147#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
148#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
149#define RTL_R8(reg) readb (ioaddr + (reg))
150#define RTL_R16(reg) readw (ioaddr + (reg))
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152
153enum mac_version {
bcf0bf90
FR
154 RTL_GIGA_MAC_VER_01 = 0x00,
155 RTL_GIGA_MAC_VER_02 = 0x01,
156 RTL_GIGA_MAC_VER_03 = 0x02,
157 RTL_GIGA_MAC_VER_04 = 0x03,
158 RTL_GIGA_MAC_VER_05 = 0x04,
2dd99530
FR
159 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
160 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf
bcf0bf90
FR
161 RTL_GIGA_MAC_VER_13 = 0x0d,
162 RTL_GIGA_MAC_VER_14 = 0x0e,
163 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
164};
165
166enum phy_version {
167 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
170 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
171 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
172 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
173};
174
1da177e4
LT
175#define _R(NAME,MAC,MASK) \
176 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
177
3c6bee1d 178static const struct {
1da177e4
LT
179 const char *name;
180 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = {
bcf0bf90
FR
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
185 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
192 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
193};
194#undef _R
195
bcf0bf90
FR
196enum cfg_version {
197 RTL_CFG_0 = 0x00,
198 RTL_CFG_1,
199 RTL_CFG_2
200};
201
07ce4064
FR
202static void rtl_hw_start_8169(struct net_device *);
203static void rtl_hw_start_8168(struct net_device *);
204static void rtl_hw_start_8101(struct net_device *);
205
bcf0bf90 206static const struct {
07ce4064 207 void (*hw_start)(struct net_device *);
bcf0bf90
FR
208 unsigned int region;
209 unsigned int align;
210} rtl_cfg_info[] = {
07ce4064
FR
211 [RTL_CFG_0] = { rtl_hw_start_8169, 1, NET_IP_ALIGN },
212 [RTL_CFG_1] = { rtl_hw_start_8168, 2, 8 },
213 [RTL_CFG_2] = { rtl_hw_start_8101, 2, 8 }
bcf0bf90
FR
214};
215
1da177e4 216static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 217 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 218 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
07ce4064 220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
bcf0bf90
FR
221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
222 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 223 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
224 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
225 { PCI_VENDOR_ID_LINKSYS, 0x1032,
226 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
227 {0,},
228};
229
230MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
231
232static int rx_copybreak = 200;
233static int use_dac;
b57b7e5a
SH
234static struct {
235 u32 msg_enable;
236} debug = { -1 };
1da177e4
LT
237
238enum RTL8169_registers {
239 MAC0 = 0, /* Ethernet hardware address. */
240 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
241 CounterAddrLow = 0x10,
242 CounterAddrHigh = 0x14,
1da177e4
LT
243 TxDescStartAddrLow = 0x20,
244 TxDescStartAddrHigh = 0x24,
245 TxHDescStartAddrLow = 0x28,
246 TxHDescStartAddrHigh = 0x2c,
247 FLASH = 0x30,
248 ERSR = 0x36,
249 ChipCmd = 0x37,
250 TxPoll = 0x38,
251 IntrMask = 0x3C,
252 IntrStatus = 0x3E,
253 TxConfig = 0x40,
254 RxConfig = 0x44,
255 RxMissed = 0x4C,
256 Cfg9346 = 0x50,
257 Config0 = 0x51,
258 Config1 = 0x52,
259 Config2 = 0x53,
260 Config3 = 0x54,
261 Config4 = 0x55,
262 Config5 = 0x56,
263 MultiIntr = 0x5C,
264 PHYAR = 0x60,
265 TBICSR = 0x64,
266 TBI_ANAR = 0x68,
267 TBI_LPAR = 0x6A,
268 PHYstatus = 0x6C,
269 RxMaxSize = 0xDA,
270 CPlusCmd = 0xE0,
271 IntrMitigate = 0xE2,
272 RxDescAddrLow = 0xE4,
273 RxDescAddrHigh = 0xE8,
274 EarlyTxThres = 0xEC,
275 FuncEvent = 0xF0,
276 FuncEventMask = 0xF4,
277 FuncPresetState = 0xF8,
278 FuncForceEvent = 0xFC,
279};
280
281enum RTL8169_register_content {
282 /* InterruptStatusBits */
283 SYSErr = 0x8000,
284 PCSTimeout = 0x4000,
285 SWInt = 0x0100,
286 TxDescUnavail = 0x80,
287 RxFIFOOver = 0x40,
288 LinkChg = 0x20,
289 RxOverflow = 0x10,
290 TxErr = 0x08,
291 TxOK = 0x04,
292 RxErr = 0x02,
293 RxOK = 0x01,
294
295 /* RxStatusDesc */
9dccf611
FR
296 RxFOVF = (1 << 23),
297 RxRWT = (1 << 22),
298 RxRES = (1 << 21),
299 RxRUNT = (1 << 20),
300 RxCRC = (1 << 19),
1da177e4
LT
301
302 /* ChipCmdBits */
303 CmdReset = 0x10,
304 CmdRxEnb = 0x08,
305 CmdTxEnb = 0x04,
306 RxBufEmpty = 0x01,
307
308 /* Cfg9346Bits */
309 Cfg9346_Lock = 0x00,
310 Cfg9346_Unlock = 0xC0,
311
312 /* rx_mode_bits */
313 AcceptErr = 0x20,
314 AcceptRunt = 0x10,
315 AcceptBroadcast = 0x08,
316 AcceptMulticast = 0x04,
317 AcceptMyPhys = 0x02,
318 AcceptAllPhys = 0x01,
319
320 /* RxConfigBits */
321 RxCfgFIFOShift = 13,
322 RxCfgDMAShift = 8,
323
324 /* TxConfigBits */
325 TxInterFrameGapShift = 24,
326 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
327
5d06a99f
FR
328 /* Config1 register p.24 */
329 PMEnable = (1 << 0), /* Power Management Enable */
330
61a4dcc2
FR
331 /* Config3 register p.25 */
332 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
333 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
334
5d06a99f 335 /* Config5 register p.27 */
61a4dcc2
FR
336 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
337 MWF = (1 << 5), /* Accept Multicast wakeup frame */
338 UWF = (1 << 4), /* Accept Unicast wakeup frame */
339 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
340 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
341
1da177e4
LT
342 /* TBICSR p.28 */
343 TBIReset = 0x80000000,
344 TBILoopback = 0x40000000,
345 TBINwEnable = 0x20000000,
346 TBINwRestart = 0x10000000,
347 TBILinkOk = 0x02000000,
348 TBINwComplete = 0x01000000,
349
350 /* CPlusCmd p.31 */
351 RxVlan = (1 << 6),
352 RxChkSum = (1 << 5),
353 PCIDAC = (1 << 4),
354 PCIMulRW = (1 << 3),
355
356 /* rtl8169_PHYstatus */
357 TBI_Enable = 0x80,
358 TxFlowCtrl = 0x40,
359 RxFlowCtrl = 0x20,
360 _1000bpsF = 0x10,
361 _100bps = 0x08,
362 _10bps = 0x04,
363 LinkStatus = 0x02,
364 FullDup = 0x01,
365
1da177e4
LT
366 /* _MediaType */
367 _10_Half = 0x01,
368 _10_Full = 0x02,
369 _100_Half = 0x04,
370 _100_Full = 0x08,
371 _1000_Full = 0x10,
372
373 /* _TBICSRBit */
374 TBILinkOK = 0x02000000,
d4a3a0fc
SH
375
376 /* DumpCounterCommand */
377 CounterDump = 0x8,
1da177e4
LT
378};
379
380enum _DescStatusBit {
381 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
382 RingEnd = (1 << 30), /* End of descriptor ring */
383 FirstFrag = (1 << 29), /* First segment of a packet */
384 LastFrag = (1 << 28), /* Final segment of a packet */
385
386 /* Tx private */
387 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
388 MSSShift = 16, /* MSS value position */
389 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
390 IPCS = (1 << 18), /* Calculate IP checksum */
391 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
392 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
393 TxVlanTag = (1 << 17), /* Add VLAN tag */
394
395 /* Rx private */
396 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
397 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
398
399#define RxProtoUDP (PID1)
400#define RxProtoTCP (PID0)
401#define RxProtoIP (PID1 | PID0)
402#define RxProtoMask RxProtoIP
403
404 IPFail = (1 << 16), /* IP checksum failed */
405 UDPFail = (1 << 15), /* UDP/IP checksum failed */
406 TCPFail = (1 << 14), /* TCP/IP checksum failed */
407 RxVlanTag = (1 << 16), /* VLAN tag available */
408};
409
410#define RsvdMask 0x3fffc000
411
412struct TxDesc {
413 u32 opts1;
414 u32 opts2;
415 u64 addr;
416};
417
418struct RxDesc {
419 u32 opts1;
420 u32 opts2;
421 u64 addr;
422};
423
424struct ring_info {
425 struct sk_buff *skb;
426 u32 len;
427 u8 __pad[sizeof(void *) - sizeof(u32)];
428};
429
430struct rtl8169_private {
431 void __iomem *mmio_addr; /* memory map physical address */
432 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 433 struct net_device *dev;
1da177e4
LT
434 struct net_device_stats stats; /* statistics of net device */
435 spinlock_t lock; /* spin lock flag */
b57b7e5a 436 u32 msg_enable;
1da177e4
LT
437 int chipset;
438 int mac_version;
439 int phy_version;
440 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
441 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
442 u32 dirty_rx;
443 u32 dirty_tx;
444 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
445 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
446 dma_addr_t TxPhyAddr;
447 dma_addr_t RxPhyAddr;
448 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
449 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 450 unsigned align;
1da177e4
LT
451 unsigned rx_buf_sz;
452 struct timer_list timer;
453 u16 cp_cmd;
454 u16 intr_mask;
455 int phy_auto_nego_reg;
456 int phy_1000_ctrl_reg;
457#ifdef CONFIG_R8169_VLAN
458 struct vlan_group *vlgrp;
459#endif
460 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
461 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
462 void (*phy_reset_enable)(void __iomem *);
07ce4064 463 void (*hw_start)(struct net_device *);
1da177e4
LT
464 unsigned int (*phy_reset_pending)(void __iomem *);
465 unsigned int (*link_ok)(void __iomem *);
c4028958 466 struct delayed_work task;
61a4dcc2 467 unsigned wol_enabled : 1;
1da177e4
LT
468};
469
979b6c13 470MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
471MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
472module_param_array(media, int, &num_media, 0);
df0a1bf6 473MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 474module_param(rx_copybreak, int, 0);
1b7efd58 475MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
476module_param(use_dac, int, 0);
477MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
478module_param_named(debug, debug.msg_enable, int, 0);
479MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
480MODULE_LICENSE("GPL");
481MODULE_VERSION(RTL8169_VERSION);
482
483static int rtl8169_open(struct net_device *dev);
484static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 485static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4 486static int rtl8169_init_ring(struct net_device *dev);
07ce4064 487static void rtl_hw_start(struct net_device *dev);
1da177e4 488static int rtl8169_close(struct net_device *dev);
07ce4064 489static void rtl_set_rx_mode(struct net_device *dev);
1da177e4 490static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 491static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
492static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
493 void __iomem *);
4dcb7d33 494static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 495static void rtl8169_down(struct net_device *dev);
99f252b0 496static void rtl8169_rx_clear(struct rtl8169_private *tp);
1da177e4
LT
497
498#ifdef CONFIG_R8169_NAPI
499static int rtl8169_poll(struct net_device *dev, int *budget);
500#endif
501
502static const u16 rtl8169_intr_mask =
503 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
504static const u16 rtl8169_napi_event =
505 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
506static const unsigned int rtl8169_rx_config =
5b0384f4 507 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
508
509static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
510{
511 int i;
512
513 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 514
2371408c 515 for (i = 20; i > 0; i--) {
1da177e4 516 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 517 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 518 break;
2371408c 519 udelay(25);
1da177e4
LT
520 }
521}
522
523static int mdio_read(void __iomem *ioaddr, int RegAddr)
524{
525 int i, value = -1;
526
527 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 528
2371408c 529 for (i = 20; i > 0; i--) {
1da177e4
LT
530 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
531 if (RTL_R32(PHYAR) & 0x80000000) {
532 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
533 break;
534 }
2371408c 535 udelay(25);
1da177e4
LT
536 }
537 return value;
538}
539
540static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
541{
542 RTL_W16(IntrMask, 0x0000);
543
544 RTL_W16(IntrStatus, 0xffff);
545}
546
547static void rtl8169_asic_down(void __iomem *ioaddr)
548{
549 RTL_W8(ChipCmd, 0x00);
550 rtl8169_irq_mask_and_ack(ioaddr);
551 RTL_R16(CPlusCmd);
552}
553
554static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
555{
556 return RTL_R32(TBICSR) & TBIReset;
557}
558
559static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
560{
64e4bfb4 561 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
562}
563
564static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
565{
566 return RTL_R32(TBICSR) & TBILinkOk;
567}
568
569static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
570{
571 return RTL_R8(PHYstatus) & LinkStatus;
572}
573
574static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
575{
576 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
577}
578
579static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
580{
581 unsigned int val;
582
9e0db8ef
FR
583 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
584 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
1da177e4
LT
585}
586
587static void rtl8169_check_link_status(struct net_device *dev,
588 struct rtl8169_private *tp, void __iomem *ioaddr)
589{
590 unsigned long flags;
591
592 spin_lock_irqsave(&tp->lock, flags);
593 if (tp->link_ok(ioaddr)) {
594 netif_carrier_on(dev);
b57b7e5a
SH
595 if (netif_msg_ifup(tp))
596 printk(KERN_INFO PFX "%s: link up\n", dev->name);
597 } else {
598 if (netif_msg_ifdown(tp))
599 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 600 netif_carrier_off(dev);
b57b7e5a 601 }
1da177e4
LT
602 spin_unlock_irqrestore(&tp->lock, flags);
603}
604
605static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
606{
607 struct {
608 u16 speed;
609 u8 duplex;
610 u8 autoneg;
611 u8 media;
612 } link_settings[] = {
613 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
614 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
615 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
616 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
617 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
618 /* Make TBI happy */
619 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
620 }, *p;
621 unsigned char option;
5b0384f4 622
1da177e4
LT
623 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
624
b57b7e5a 625 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
626 printk(KERN_WARNING PFX "media option is deprecated.\n");
627
628 for (p = link_settings; p->media != 0xff; p++) {
629 if (p->media == option)
630 break;
631 }
632 *autoneg = p->autoneg;
633 *speed = p->speed;
634 *duplex = p->duplex;
635}
636
61a4dcc2
FR
637static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
638{
639 struct rtl8169_private *tp = netdev_priv(dev);
640 void __iomem *ioaddr = tp->mmio_addr;
641 u8 options;
642
643 wol->wolopts = 0;
644
645#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
646 wol->supported = WAKE_ANY;
647
648 spin_lock_irq(&tp->lock);
649
650 options = RTL_R8(Config1);
651 if (!(options & PMEnable))
652 goto out_unlock;
653
654 options = RTL_R8(Config3);
655 if (options & LinkUp)
656 wol->wolopts |= WAKE_PHY;
657 if (options & MagicPacket)
658 wol->wolopts |= WAKE_MAGIC;
659
660 options = RTL_R8(Config5);
661 if (options & UWF)
662 wol->wolopts |= WAKE_UCAST;
663 if (options & BWF)
5b0384f4 664 wol->wolopts |= WAKE_BCAST;
61a4dcc2 665 if (options & MWF)
5b0384f4 666 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
667
668out_unlock:
669 spin_unlock_irq(&tp->lock);
670}
671
672static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
673{
674 struct rtl8169_private *tp = netdev_priv(dev);
675 void __iomem *ioaddr = tp->mmio_addr;
676 int i;
677 static struct {
678 u32 opt;
679 u16 reg;
680 u8 mask;
681 } cfg[] = {
682 { WAKE_ANY, Config1, PMEnable },
683 { WAKE_PHY, Config3, LinkUp },
684 { WAKE_MAGIC, Config3, MagicPacket },
685 { WAKE_UCAST, Config5, UWF },
686 { WAKE_BCAST, Config5, BWF },
687 { WAKE_MCAST, Config5, MWF },
688 { WAKE_ANY, Config5, LanWake }
689 };
690
691 spin_lock_irq(&tp->lock);
692
693 RTL_W8(Cfg9346, Cfg9346_Unlock);
694
695 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
696 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
697 if (wol->wolopts & cfg[i].opt)
698 options |= cfg[i].mask;
699 RTL_W8(cfg[i].reg, options);
700 }
701
702 RTL_W8(Cfg9346, Cfg9346_Lock);
703
704 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
705
706 spin_unlock_irq(&tp->lock);
707
708 return 0;
709}
710
1da177e4
LT
711static void rtl8169_get_drvinfo(struct net_device *dev,
712 struct ethtool_drvinfo *info)
713{
714 struct rtl8169_private *tp = netdev_priv(dev);
715
716 strcpy(info->driver, MODULENAME);
717 strcpy(info->version, RTL8169_VERSION);
718 strcpy(info->bus_info, pci_name(tp->pci_dev));
719}
720
721static int rtl8169_get_regs_len(struct net_device *dev)
722{
723 return R8169_REGS_SIZE;
724}
725
726static int rtl8169_set_speed_tbi(struct net_device *dev,
727 u8 autoneg, u16 speed, u8 duplex)
728{
729 struct rtl8169_private *tp = netdev_priv(dev);
730 void __iomem *ioaddr = tp->mmio_addr;
731 int ret = 0;
732 u32 reg;
733
734 reg = RTL_R32(TBICSR);
735 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
736 (duplex == DUPLEX_FULL)) {
737 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
738 } else if (autoneg == AUTONEG_ENABLE)
739 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
740 else {
b57b7e5a
SH
741 if (netif_msg_link(tp)) {
742 printk(KERN_WARNING "%s: "
743 "incorrect speed setting refused in TBI mode\n",
744 dev->name);
745 }
1da177e4
LT
746 ret = -EOPNOTSUPP;
747 }
748
749 return ret;
750}
751
752static int rtl8169_set_speed_xmii(struct net_device *dev,
753 u8 autoneg, u16 speed, u8 duplex)
754{
755 struct rtl8169_private *tp = netdev_priv(dev);
756 void __iomem *ioaddr = tp->mmio_addr;
757 int auto_nego, giga_ctrl;
758
64e4bfb4
FR
759 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
760 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL);
762 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
763 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
764
765 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
766 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
767 ADVERTISE_100HALF | ADVERTISE_100FULL);
768 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
769 } else {
770 if (speed == SPEED_10)
64e4bfb4 771 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 772 else if (speed == SPEED_100)
64e4bfb4 773 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 774 else if (speed == SPEED_1000)
64e4bfb4 775 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
776
777 if (duplex == DUPLEX_HALF)
64e4bfb4 778 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
779
780 if (duplex == DUPLEX_FULL)
64e4bfb4 781 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
782
783 /* This tweak comes straight from Realtek's driver. */
784 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
785 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 786 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
787 }
788 }
789
790 /* The 8100e/8101e do Fast Ethernet only. */
791 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
792 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
793 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 794 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
795 netif_msg_link(tp)) {
796 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
797 dev->name);
798 }
64e4bfb4 799 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
800 }
801
623a1593
FR
802 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
803
1da177e4
LT
804 tp->phy_auto_nego_reg = auto_nego;
805 tp->phy_1000_ctrl_reg = giga_ctrl;
806
64e4bfb4
FR
807 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
808 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
809 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
810 return 0;
811}
812
813static int rtl8169_set_speed(struct net_device *dev,
814 u8 autoneg, u16 speed, u8 duplex)
815{
816 struct rtl8169_private *tp = netdev_priv(dev);
817 int ret;
818
819 ret = tp->set_speed(dev, autoneg, speed, duplex);
820
64e4bfb4 821 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
822 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
823
824 return ret;
825}
826
827static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
828{
829 struct rtl8169_private *tp = netdev_priv(dev);
830 unsigned long flags;
831 int ret;
832
833 spin_lock_irqsave(&tp->lock, flags);
834 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
835 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 836
1da177e4
LT
837 return ret;
838}
839
840static u32 rtl8169_get_rx_csum(struct net_device *dev)
841{
842 struct rtl8169_private *tp = netdev_priv(dev);
843
844 return tp->cp_cmd & RxChkSum;
845}
846
847static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
848{
849 struct rtl8169_private *tp = netdev_priv(dev);
850 void __iomem *ioaddr = tp->mmio_addr;
851 unsigned long flags;
852
853 spin_lock_irqsave(&tp->lock, flags);
854
855 if (data)
856 tp->cp_cmd |= RxChkSum;
857 else
858 tp->cp_cmd &= ~RxChkSum;
859
860 RTL_W16(CPlusCmd, tp->cp_cmd);
861 RTL_R16(CPlusCmd);
862
863 spin_unlock_irqrestore(&tp->lock, flags);
864
865 return 0;
866}
867
868#ifdef CONFIG_R8169_VLAN
869
870static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
871 struct sk_buff *skb)
872{
873 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
874 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
875}
876
877static void rtl8169_vlan_rx_register(struct net_device *dev,
878 struct vlan_group *grp)
879{
880 struct rtl8169_private *tp = netdev_priv(dev);
881 void __iomem *ioaddr = tp->mmio_addr;
882 unsigned long flags;
883
884 spin_lock_irqsave(&tp->lock, flags);
885 tp->vlgrp = grp;
886 if (tp->vlgrp)
887 tp->cp_cmd |= RxVlan;
888 else
889 tp->cp_cmd &= ~RxVlan;
890 RTL_W16(CPlusCmd, tp->cp_cmd);
891 RTL_R16(CPlusCmd);
892 spin_unlock_irqrestore(&tp->lock, flags);
893}
894
1da177e4
LT
895static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
896 struct sk_buff *skb)
897{
898 u32 opts2 = le32_to_cpu(desc->opts2);
899 int ret;
900
901 if (tp->vlgrp && (opts2 & RxVlanTag)) {
902 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
903 swab16(opts2 & 0xffff));
904 ret = 0;
905 } else
906 ret = -1;
907 desc->opts2 = 0;
908 return ret;
909}
910
911#else /* !CONFIG_R8169_VLAN */
912
913static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
914 struct sk_buff *skb)
915{
916 return 0;
917}
918
919static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
920 struct sk_buff *skb)
921{
922 return -1;
923}
924
925#endif
926
927static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
928{
929 struct rtl8169_private *tp = netdev_priv(dev);
930 void __iomem *ioaddr = tp->mmio_addr;
931 u32 status;
932
933 cmd->supported =
934 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
935 cmd->port = PORT_FIBRE;
936 cmd->transceiver = XCVR_INTERNAL;
937
938 status = RTL_R32(TBICSR);
939 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
940 cmd->autoneg = !!(status & TBINwEnable);
941
942 cmd->speed = SPEED_1000;
943 cmd->duplex = DUPLEX_FULL; /* Always set */
944}
945
946static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
947{
948 struct rtl8169_private *tp = netdev_priv(dev);
949 void __iomem *ioaddr = tp->mmio_addr;
950 u8 status;
951
952 cmd->supported = SUPPORTED_10baseT_Half |
953 SUPPORTED_10baseT_Full |
954 SUPPORTED_100baseT_Half |
955 SUPPORTED_100baseT_Full |
956 SUPPORTED_1000baseT_Full |
957 SUPPORTED_Autoneg |
5b0384f4 958 SUPPORTED_TP;
1da177e4
LT
959
960 cmd->autoneg = 1;
961 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
962
64e4bfb4 963 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 964 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 965 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 966 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 967 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 968 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 969 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 970 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 971 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
972 cmd->advertising |= ADVERTISED_1000baseT_Full;
973
974 status = RTL_R8(PHYstatus);
975
976 if (status & _1000bpsF)
977 cmd->speed = SPEED_1000;
978 else if (status & _100bps)
979 cmd->speed = SPEED_100;
980 else if (status & _10bps)
981 cmd->speed = SPEED_10;
982
623a1593
FR
983 if (status & TxFlowCtrl)
984 cmd->advertising |= ADVERTISED_Asym_Pause;
985 if (status & RxFlowCtrl)
986 cmd->advertising |= ADVERTISED_Pause;
987
1da177e4
LT
988 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
989 DUPLEX_FULL : DUPLEX_HALF;
990}
991
992static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
993{
994 struct rtl8169_private *tp = netdev_priv(dev);
995 unsigned long flags;
996
997 spin_lock_irqsave(&tp->lock, flags);
998
999 tp->get_settings(dev, cmd);
1000
1001 spin_unlock_irqrestore(&tp->lock, flags);
1002 return 0;
1003}
1004
1005static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1006 void *p)
1007{
5b0384f4
FR
1008 struct rtl8169_private *tp = netdev_priv(dev);
1009 unsigned long flags;
1da177e4 1010
5b0384f4
FR
1011 if (regs->len > R8169_REGS_SIZE)
1012 regs->len = R8169_REGS_SIZE;
1da177e4 1013
5b0384f4
FR
1014 spin_lock_irqsave(&tp->lock, flags);
1015 memcpy_fromio(p, tp->mmio_addr, regs->len);
1016 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1017}
1018
b57b7e5a
SH
1019static u32 rtl8169_get_msglevel(struct net_device *dev)
1020{
1021 struct rtl8169_private *tp = netdev_priv(dev);
1022
1023 return tp->msg_enable;
1024}
1025
1026static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1027{
1028 struct rtl8169_private *tp = netdev_priv(dev);
1029
1030 tp->msg_enable = value;
1031}
1032
d4a3a0fc
SH
1033static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1034 "tx_packets",
1035 "rx_packets",
1036 "tx_errors",
1037 "rx_errors",
1038 "rx_missed",
1039 "align_errors",
1040 "tx_single_collisions",
1041 "tx_multi_collisions",
1042 "unicast",
1043 "broadcast",
1044 "multicast",
1045 "tx_aborted",
1046 "tx_underrun",
1047};
1048
1049struct rtl8169_counters {
1050 u64 tx_packets;
1051 u64 rx_packets;
1052 u64 tx_errors;
1053 u32 rx_errors;
1054 u16 rx_missed;
1055 u16 align_errors;
1056 u32 tx_one_collision;
1057 u32 tx_multi_collision;
1058 u64 rx_unicast;
1059 u64 rx_broadcast;
1060 u32 rx_multicast;
1061 u16 tx_aborted;
1062 u16 tx_underun;
1063};
1064
1065static int rtl8169_get_stats_count(struct net_device *dev)
1066{
1067 return ARRAY_SIZE(rtl8169_gstrings);
1068}
1069
1070static void rtl8169_get_ethtool_stats(struct net_device *dev,
1071 struct ethtool_stats *stats, u64 *data)
1072{
1073 struct rtl8169_private *tp = netdev_priv(dev);
1074 void __iomem *ioaddr = tp->mmio_addr;
1075 struct rtl8169_counters *counters;
1076 dma_addr_t paddr;
1077 u32 cmd;
1078
1079 ASSERT_RTNL();
1080
1081 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1082 if (!counters)
1083 return;
1084
1085 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1086 cmd = (u64)paddr & DMA_32BIT_MASK;
1087 RTL_W32(CounterAddrLow, cmd);
1088 RTL_W32(CounterAddrLow, cmd | CounterDump);
1089
1090 while (RTL_R32(CounterAddrLow) & CounterDump) {
1091 if (msleep_interruptible(1))
1092 break;
1093 }
1094
1095 RTL_W32(CounterAddrLow, 0);
1096 RTL_W32(CounterAddrHigh, 0);
1097
5b0384f4 1098 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1099 data[1] = le64_to_cpu(counters->rx_packets);
1100 data[2] = le64_to_cpu(counters->tx_errors);
1101 data[3] = le32_to_cpu(counters->rx_errors);
1102 data[4] = le16_to_cpu(counters->rx_missed);
1103 data[5] = le16_to_cpu(counters->align_errors);
1104 data[6] = le32_to_cpu(counters->tx_one_collision);
1105 data[7] = le32_to_cpu(counters->tx_multi_collision);
1106 data[8] = le64_to_cpu(counters->rx_unicast);
1107 data[9] = le64_to_cpu(counters->rx_broadcast);
1108 data[10] = le32_to_cpu(counters->rx_multicast);
1109 data[11] = le16_to_cpu(counters->tx_aborted);
1110 data[12] = le16_to_cpu(counters->tx_underun);
1111
1112 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1113}
1114
1115static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1116{
1117 switch(stringset) {
1118 case ETH_SS_STATS:
1119 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1120 break;
1121 }
1122}
1123
1124
7282d491 1125static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1126 .get_drvinfo = rtl8169_get_drvinfo,
1127 .get_regs_len = rtl8169_get_regs_len,
1128 .get_link = ethtool_op_get_link,
1129 .get_settings = rtl8169_get_settings,
1130 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1131 .get_msglevel = rtl8169_get_msglevel,
1132 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1133 .get_rx_csum = rtl8169_get_rx_csum,
1134 .set_rx_csum = rtl8169_set_rx_csum,
1135 .get_tx_csum = ethtool_op_get_tx_csum,
1136 .set_tx_csum = ethtool_op_set_tx_csum,
1137 .get_sg = ethtool_op_get_sg,
1138 .set_sg = ethtool_op_set_sg,
1139 .get_tso = ethtool_op_get_tso,
1140 .set_tso = ethtool_op_set_tso,
1141 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1142 .get_wol = rtl8169_get_wol,
1143 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1144 .get_strings = rtl8169_get_strings,
1145 .get_stats_count = rtl8169_get_stats_count,
1146 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1147 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1148};
1149
1150static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1151 int bitval)
1152{
1153 int val;
1154
1155 val = mdio_read(ioaddr, reg);
1156 val = (bitval == 1) ?
1157 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1158 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1159}
1160
1161static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1162{
1163 const struct {
1164 u32 mask;
1165 int mac_version;
1166 } mac_info[] = {
bcf0bf90
FR
1167 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1168 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1169 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1170 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1171 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1172 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1173 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1174 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1175 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1176 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1177 }, *p = mac_info;
1178 u32 reg;
1179
1180 reg = RTL_R32(TxConfig) & 0x7c800000;
1181 while ((reg & p->mask) != p->mask)
1182 p++;
1183 tp->mac_version = p->mac_version;
1184}
1185
1186static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1187{
bcf0bf90 1188 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1189}
1190
1191static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1192{
1193 const struct {
1194 u16 mask;
1195 u16 set;
1196 int phy_version;
1197 } phy_info[] = {
1198 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1199 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1200 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1201 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1202 }, *p = phy_info;
1203 u16 reg;
1204
64e4bfb4 1205 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1206 while ((reg & p->mask) != p->set)
1207 p++;
1208 tp->phy_version = p->phy_version;
1209}
1210
1211static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1212{
1213 struct {
1214 int version;
1215 char *msg;
1216 u32 reg;
1217 } phy_print[] = {
1218 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1219 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1220 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1221 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1222 { 0, NULL, 0x0000 }
1223 }, *p;
1224
1225 for (p = phy_print; p->msg; p++) {
1226 if (tp->phy_version == p->version) {
1227 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1228 return;
1229 }
1230 }
1231 dprintk("phy_version == Unknown\n");
1232}
1233
1234static void rtl8169_hw_phy_config(struct net_device *dev)
1235{
1236 struct rtl8169_private *tp = netdev_priv(dev);
1237 void __iomem *ioaddr = tp->mmio_addr;
1238 struct {
1239 u16 regs[5]; /* Beware of bit-sign propagation */
1240 } phy_magic[5] = { {
1241 { 0x0000, //w 4 15 12 0
1242 0x00a1, //w 3 15 0 00a1
1243 0x0008, //w 2 15 0 0008
1244 0x1020, //w 1 15 0 1020
1245 0x1000 } },{ //w 0 15 0 1000
1246 { 0x7000, //w 4 15 12 7
1247 0xff41, //w 3 15 0 ff41
1248 0xde60, //w 2 15 0 de60
1249 0x0140, //w 1 15 0 0140
1250 0x0077 } },{ //w 0 15 0 0077
1251 { 0xa000, //w 4 15 12 a
1252 0xdf01, //w 3 15 0 df01
1253 0xdf20, //w 2 15 0 df20
1254 0xff95, //w 1 15 0 ff95
1255 0xfa00 } },{ //w 0 15 0 fa00
1256 { 0xb000, //w 4 15 12 b
1257 0xff41, //w 3 15 0 ff41
1258 0xde20, //w 2 15 0 de20
1259 0x0140, //w 1 15 0 0140
1260 0x00bb } },{ //w 0 15 0 00bb
1261 { 0xf000, //w 4 15 12 f
1262 0xdf01, //w 3 15 0 df01
1263 0xdf20, //w 2 15 0 df20
1264 0xff95, //w 1 15 0 ff95
1265 0xbf00 } //w 0 15 0 bf00
1266 }
1267 }, *p = phy_magic;
1268 int i;
1269
1270 rtl8169_print_mac_version(tp);
1271 rtl8169_print_phy_version(tp);
1272
bcf0bf90 1273 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1274 return;
1275 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1276 return;
1277
1278 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1279 dprintk("Do final_reg2.cfg\n");
1280
1281 /* Shazam ! */
1282
bcf0bf90 1283 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1284 mdio_write(ioaddr, 31, 0x0002);
1285 mdio_write(ioaddr, 1, 0x90d0);
1286 mdio_write(ioaddr, 31, 0x0000);
1287 return;
1288 }
1289
1290 /* phy config for RTL8169s mac_version C chip */
1291 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1292 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1293 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1294 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1295
1296 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1297 int val, pos = 4;
1298
1299 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1300 mdio_write(ioaddr, pos, val);
1301 while (--pos >= 0)
1302 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1303 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1304 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1305 }
1306 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1307}
1308
1309static void rtl8169_phy_timer(unsigned long __opaque)
1310{
1311 struct net_device *dev = (struct net_device *)__opaque;
1312 struct rtl8169_private *tp = netdev_priv(dev);
1313 struct timer_list *timer = &tp->timer;
1314 void __iomem *ioaddr = tp->mmio_addr;
1315 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1316
bcf0bf90 1317 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1318 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1319
64e4bfb4 1320 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1321 return;
1322
1323 spin_lock_irq(&tp->lock);
1324
1325 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1326 /*
1da177e4
LT
1327 * A busy loop could burn quite a few cycles on nowadays CPU.
1328 * Let's delay the execution of the timer for a few ticks.
1329 */
1330 timeout = HZ/10;
1331 goto out_mod_timer;
1332 }
1333
1334 if (tp->link_ok(ioaddr))
1335 goto out_unlock;
1336
b57b7e5a
SH
1337 if (netif_msg_link(tp))
1338 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1339
1340 tp->phy_reset_enable(ioaddr);
1341
1342out_mod_timer:
1343 mod_timer(timer, jiffies + timeout);
1344out_unlock:
1345 spin_unlock_irq(&tp->lock);
1346}
1347
1348static inline void rtl8169_delete_timer(struct net_device *dev)
1349{
1350 struct rtl8169_private *tp = netdev_priv(dev);
1351 struct timer_list *timer = &tp->timer;
1352
bcf0bf90 1353 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1354 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1355 return;
1356
1357 del_timer_sync(timer);
1358}
1359
1360static inline void rtl8169_request_timer(struct net_device *dev)
1361{
1362 struct rtl8169_private *tp = netdev_priv(dev);
1363 struct timer_list *timer = &tp->timer;
1364
bcf0bf90 1365 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1366 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1367 return;
1368
2efa53f3 1369 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1da177e4
LT
1370}
1371
1372#ifdef CONFIG_NET_POLL_CONTROLLER
1373/*
1374 * Polling 'interrupt' - used by things like netconsole to send skbs
1375 * without having to re-enable interrupts. It's not called while
1376 * the interrupt routine is executing.
1377 */
1378static void rtl8169_netpoll(struct net_device *dev)
1379{
1380 struct rtl8169_private *tp = netdev_priv(dev);
1381 struct pci_dev *pdev = tp->pci_dev;
1382
1383 disable_irq(pdev->irq);
7d12e780 1384 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1385 enable_irq(pdev->irq);
1386}
1387#endif
1388
1389static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1390 void __iomem *ioaddr)
1391{
1392 iounmap(ioaddr);
1393 pci_release_regions(pdev);
1394 pci_disable_device(pdev);
1395 free_netdev(dev);
1396}
1397
bf793295
FR
1398static void rtl8169_phy_reset(struct net_device *dev,
1399 struct rtl8169_private *tp)
1400{
1401 void __iomem *ioaddr = tp->mmio_addr;
1402 int i;
1403
1404 tp->phy_reset_enable(ioaddr);
1405 for (i = 0; i < 100; i++) {
1406 if (!tp->phy_reset_pending(ioaddr))
1407 return;
1408 msleep(1);
1409 }
1410 if (netif_msg_link(tp))
1411 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1412}
1413
4ff96fa6
FR
1414static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1415{
1416 void __iomem *ioaddr = tp->mmio_addr;
1417 static int board_idx = -1;
1418 u8 autoneg, duplex;
1419 u16 speed;
1420
1421 board_idx++;
1422
1423 rtl8169_hw_phy_config(dev);
1424
1425 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1426 RTL_W8(0x82, 0x01);
1427
bcf0bf90 1428 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1429 dprintk("Set PCI Latency=0x40\n");
1430 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1431 }
1432
bcf0bf90 1433 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1434 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1435 RTL_W8(0x82, 0x01);
1436 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1437 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1438 }
1439
1440 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1441
bf793295
FR
1442 rtl8169_phy_reset(dev, tp);
1443
4ff96fa6
FR
1444 rtl8169_set_speed(dev, autoneg, speed, duplex);
1445
1446 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1447 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1448}
1449
5f787a1a
FR
1450static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1451{
1452 struct rtl8169_private *tp = netdev_priv(dev);
1453 struct mii_ioctl_data *data = if_mii(ifr);
1454
1455 if (!netif_running(dev))
1456 return -ENODEV;
1457
1458 switch (cmd) {
1459 case SIOCGMIIPHY:
1460 data->phy_id = 32; /* Internal PHY */
1461 return 0;
1462
1463 case SIOCGMIIREG:
1464 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1465 return 0;
1466
1467 case SIOCSMIIREG:
1468 if (!capable(CAP_NET_ADMIN))
1469 return -EPERM;
1470 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1471 return 0;
1472 }
1473 return -EOPNOTSUPP;
1474}
1475
1da177e4 1476static int __devinit
4ff96fa6 1477rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1478{
bcf0bf90 1479 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1480 struct rtl8169_private *tp;
4ff96fa6
FR
1481 struct net_device *dev;
1482 void __iomem *ioaddr;
315917d2
FR
1483 unsigned int pm_cap;
1484 int i, rc;
1da177e4 1485
4ff96fa6
FR
1486 if (netif_msg_drv(&debug)) {
1487 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1488 MODULENAME, RTL8169_VERSION);
1489 }
1da177e4 1490
1da177e4 1491 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1492 if (!dev) {
b57b7e5a 1493 if (netif_msg_drv(&debug))
9b91cf9d 1494 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1495 rc = -ENOMEM;
1496 goto out;
1da177e4
LT
1497 }
1498
1499 SET_MODULE_OWNER(dev);
1500 SET_NETDEV_DEV(dev, &pdev->dev);
1501 tp = netdev_priv(dev);
c4028958 1502 tp->dev = dev;
b57b7e5a 1503 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1504
1505 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1506 rc = pci_enable_device(pdev);
b57b7e5a 1507 if (rc < 0) {
2e8a538d 1508 if (netif_msg_probe(tp))
9b91cf9d 1509 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1510 goto err_out_free_dev_1;
1da177e4
LT
1511 }
1512
1513 rc = pci_set_mwi(pdev);
1514 if (rc < 0)
4ff96fa6 1515 goto err_out_disable_2;
1da177e4
LT
1516
1517 /* save power state before pci_enable_device overwrites it */
1518 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1519 if (pm_cap) {
4ff96fa6 1520 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1521
1522 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1523 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1524 } else {
4ff96fa6 1525 if (netif_msg_probe(tp)) {
9b91cf9d 1526 dev_err(&pdev->dev,
4ff96fa6
FR
1527 "PowerManagement capability not found.\n");
1528 }
1da177e4
LT
1529 }
1530
1531 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1532 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1533 if (netif_msg_probe(tp)) {
9b91cf9d 1534 dev_err(&pdev->dev,
bcf0bf90
FR
1535 "region #%d not an MMIO resource, aborting\n",
1536 region);
4ff96fa6 1537 }
1da177e4 1538 rc = -ENODEV;
4ff96fa6 1539 goto err_out_mwi_3;
1da177e4 1540 }
4ff96fa6 1541
1da177e4 1542 /* check for weird/broken PCI region reporting */
bcf0bf90 1543 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1544 if (netif_msg_probe(tp)) {
9b91cf9d 1545 dev_err(&pdev->dev,
4ff96fa6
FR
1546 "Invalid PCI region size(s), aborting\n");
1547 }
1da177e4 1548 rc = -ENODEV;
4ff96fa6 1549 goto err_out_mwi_3;
1da177e4
LT
1550 }
1551
1552 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1553 if (rc < 0) {
2e8a538d 1554 if (netif_msg_probe(tp))
9b91cf9d 1555 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1556 goto err_out_mwi_3;
1da177e4
LT
1557 }
1558
1559 tp->cp_cmd = PCIMulRW | RxChkSum;
1560
1561 if ((sizeof(dma_addr_t) > 4) &&
1562 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1563 tp->cp_cmd |= PCIDAC;
1564 dev->features |= NETIF_F_HIGHDMA;
1565 } else {
1566 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1567 if (rc < 0) {
4ff96fa6 1568 if (netif_msg_probe(tp)) {
9b91cf9d 1569 dev_err(&pdev->dev,
4ff96fa6
FR
1570 "DMA configuration failed.\n");
1571 }
1572 goto err_out_free_res_4;
1da177e4
LT
1573 }
1574 }
1575
1576 pci_set_master(pdev);
1577
1578 /* ioremap MMIO region */
bcf0bf90 1579 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1580 if (!ioaddr) {
b57b7e5a 1581 if (netif_msg_probe(tp))
9b91cf9d 1582 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1583 rc = -EIO;
4ff96fa6 1584 goto err_out_free_res_4;
1da177e4
LT
1585 }
1586
1587 /* Unneeded ? Don't mess with Mrs. Murphy. */
1588 rtl8169_irq_mask_and_ack(ioaddr);
1589
1590 /* Soft reset the chip. */
1591 RTL_W8(ChipCmd, CmdReset);
1592
1593 /* Check that the chip has finished the reset. */
b518fa8e 1594 for (i = 100; i > 0; i--) {
1da177e4
LT
1595 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1596 break;
b518fa8e 1597 msleep_interruptible(1);
1da177e4
LT
1598 }
1599
1600 /* Identify chip attached to board */
1601 rtl8169_get_mac_version(tp, ioaddr);
1602 rtl8169_get_phy_version(tp, ioaddr);
1603
1604 rtl8169_print_mac_version(tp);
1605 rtl8169_print_phy_version(tp);
1606
1607 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1608 if (tp->mac_version == rtl_chip_info[i].mac_version)
1609 break;
1610 }
1611 if (i < 0) {
1612 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1613 if (netif_msg_probe(tp)) {
2e8a538d 1614 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1615 "unknown chip version, assuming %s\n",
1616 rtl_chip_info[0].name);
b57b7e5a 1617 }
1da177e4
LT
1618 i++;
1619 }
1620 tp->chipset = i;
1621
5d06a99f
FR
1622 RTL_W8(Cfg9346, Cfg9346_Unlock);
1623 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1624 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1625 RTL_W8(Cfg9346, Cfg9346_Lock);
1626
1da177e4
LT
1627 if (RTL_R8(PHYstatus) & TBI_Enable) {
1628 tp->set_speed = rtl8169_set_speed_tbi;
1629 tp->get_settings = rtl8169_gset_tbi;
1630 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1631 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1632 tp->link_ok = rtl8169_tbi_link_ok;
1633
64e4bfb4 1634 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1635 } else {
1636 tp->set_speed = rtl8169_set_speed_xmii;
1637 tp->get_settings = rtl8169_gset_xmii;
1638 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1639 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1640 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1641
1642 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1643 }
1644
1645 /* Get MAC address. FIXME: read EEPROM */
1646 for (i = 0; i < MAC_ADDR_LEN; i++)
1647 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1648 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1649
1650 dev->open = rtl8169_open;
1651 dev->hard_start_xmit = rtl8169_start_xmit;
1652 dev->get_stats = rtl8169_get_stats;
1653 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1654 dev->stop = rtl8169_close;
1655 dev->tx_timeout = rtl8169_tx_timeout;
07ce4064 1656 dev->set_multicast_list = rtl_set_rx_mode;
1da177e4
LT
1657 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1658 dev->irq = pdev->irq;
1659 dev->base_addr = (unsigned long) ioaddr;
1660 dev->change_mtu = rtl8169_change_mtu;
1661
1662#ifdef CONFIG_R8169_NAPI
1663 dev->poll = rtl8169_poll;
1664 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1665#endif
1666
1667#ifdef CONFIG_R8169_VLAN
1668 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1669 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1da177e4
LT
1670#endif
1671
1672#ifdef CONFIG_NET_POLL_CONTROLLER
1673 dev->poll_controller = rtl8169_netpoll;
1674#endif
1675
1676 tp->intr_mask = 0xffff;
1677 tp->pci_dev = pdev;
1678 tp->mmio_addr = ioaddr;
bcf0bf90 1679 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4 1680
2efa53f3
FR
1681 init_timer(&tp->timer);
1682 tp->timer.data = (unsigned long) dev;
1683 tp->timer.function = rtl8169_phy_timer;
1684
07ce4064
FR
1685 tp->hw_start = rtl_cfg_info[ent->driver_data].hw_start;
1686
1da177e4
LT
1687 spin_lock_init(&tp->lock);
1688
1689 rc = register_netdev(dev);
4ff96fa6
FR
1690 if (rc < 0)
1691 goto err_out_unmap_5;
1da177e4
LT
1692
1693 pci_set_drvdata(pdev, dev);
1694
b57b7e5a
SH
1695 if (netif_msg_probe(tp)) {
1696 printk(KERN_INFO "%s: %s at 0x%lx, "
1697 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1698 "IRQ %d\n",
1699 dev->name,
bcf0bf90 1700 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1701 dev->base_addr,
1702 dev->dev_addr[0], dev->dev_addr[1],
1703 dev->dev_addr[2], dev->dev_addr[3],
1704 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1705 }
1da177e4 1706
4ff96fa6 1707 rtl8169_init_phy(dev, tp);
1da177e4 1708
4ff96fa6
FR
1709out:
1710 return rc;
1da177e4 1711
4ff96fa6
FR
1712err_out_unmap_5:
1713 iounmap(ioaddr);
1714err_out_free_res_4:
1715 pci_release_regions(pdev);
1716err_out_mwi_3:
1717 pci_clear_mwi(pdev);
1718err_out_disable_2:
1719 pci_disable_device(pdev);
1720err_out_free_dev_1:
1721 free_netdev(dev);
1722 goto out;
1da177e4
LT
1723}
1724
1725static void __devexit
1726rtl8169_remove_one(struct pci_dev *pdev)
1727{
1728 struct net_device *dev = pci_get_drvdata(pdev);
1729 struct rtl8169_private *tp = netdev_priv(dev);
1730
1731 assert(dev != NULL);
1732 assert(tp != NULL);
1733
eb2a021c
FR
1734 flush_scheduled_work();
1735
1da177e4
LT
1736 unregister_netdev(dev);
1737 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1738 pci_set_drvdata(pdev, NULL);
1739}
1740
1da177e4
LT
1741static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1742 struct net_device *dev)
1743{
1744 unsigned int mtu = dev->mtu;
1745
1746 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1747}
1748
1749static int rtl8169_open(struct net_device *dev)
1750{
1751 struct rtl8169_private *tp = netdev_priv(dev);
1752 struct pci_dev *pdev = tp->pci_dev;
99f252b0 1753 int retval = -ENOMEM;
1da177e4 1754
1da177e4 1755
99f252b0 1756 rtl8169_set_rxbufsize(tp, dev);
1da177e4
LT
1757
1758 /*
1759 * Rx and Tx desscriptors needs 256 bytes alignment.
1760 * pci_alloc_consistent provides more.
1761 */
1762 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1763 &tp->TxPhyAddr);
1764 if (!tp->TxDescArray)
99f252b0 1765 goto out;
1da177e4
LT
1766
1767 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1768 &tp->RxPhyAddr);
1769 if (!tp->RxDescArray)
99f252b0 1770 goto err_free_tx_0;
1da177e4
LT
1771
1772 retval = rtl8169_init_ring(dev);
1773 if (retval < 0)
99f252b0 1774 goto err_free_rx_1;
1da177e4 1775
c4028958 1776 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4 1777
99f252b0
FR
1778 smp_mb();
1779
1780 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED,
1781 dev->name, dev);
1782 if (retval < 0)
1783 goto err_release_ring_2;
1784
07ce4064 1785 rtl_hw_start(dev);
1da177e4
LT
1786
1787 rtl8169_request_timer(dev);
1788
1789 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1790out:
1791 return retval;
1792
99f252b0
FR
1793err_release_ring_2:
1794 rtl8169_rx_clear(tp);
1795err_free_rx_1:
1da177e4
LT
1796 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1797 tp->RxPhyAddr);
99f252b0 1798err_free_tx_0:
1da177e4
LT
1799 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1800 tp->TxPhyAddr);
1da177e4
LT
1801 goto out;
1802}
1803
1804static void rtl8169_hw_reset(void __iomem *ioaddr)
1805{
1806 /* Disable interrupts */
1807 rtl8169_irq_mask_and_ack(ioaddr);
1808
1809 /* Reset the chipset */
1810 RTL_W8(ChipCmd, CmdReset);
1811
1812 /* PCI commit */
1813 RTL_R8(ChipCmd);
1814}
1815
7f796d83 1816static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
9cb427b6
FR
1817{
1818 void __iomem *ioaddr = tp->mmio_addr;
1819 u32 cfg = rtl8169_rx_config;
1820
1821 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1822 RTL_W32(RxConfig, cfg);
1823
1824 /* Set DMA burst size and Interframe Gap Time */
1825 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1826 (InterFrameGap << TxInterFrameGapShift));
1827}
1828
07ce4064 1829static void rtl_hw_start(struct net_device *dev)
1da177e4
LT
1830{
1831 struct rtl8169_private *tp = netdev_priv(dev);
1832 void __iomem *ioaddr = tp->mmio_addr;
1833 u32 i;
1834
1835 /* Soft reset the chip. */
1836 RTL_W8(ChipCmd, CmdReset);
1837
1838 /* Check that the chip has finished the reset. */
b518fa8e 1839 for (i = 100; i > 0; i--) {
1da177e4
LT
1840 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1841 break;
b518fa8e 1842 msleep_interruptible(1);
1da177e4
LT
1843 }
1844
07ce4064
FR
1845 tp->hw_start(dev);
1846
1847 /* Enable all known interrupts by setting the interrupt mask. */
1848 RTL_W16(IntrMask, rtl8169_intr_mask);
1849
1850 netif_start_queue(dev);
1851}
1852
1853
7f796d83
FR
1854static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
1855 void __iomem *ioaddr)
1856{
1857 /*
1858 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1859 * register to be written before TxDescAddrLow to work.
1860 * Switching from MMIO to I/O access fixes the issue as well.
1861 */
1862 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
1863 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_32BIT_MASK);
1864 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
1865 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_32BIT_MASK);
1866}
1867
1868static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
1869{
1870 u16 cmd;
1871
1872 cmd = RTL_R16(CPlusCmd);
1873 RTL_W16(CPlusCmd, cmd);
1874 return cmd;
1875}
1876
1877static void rtl_set_rx_max_size(void __iomem *ioaddr)
1878{
1879 /* Low hurts. Let's disable the filtering. */
1880 RTL_W16(RxMaxSize, 16383);
1881}
1882
07ce4064
FR
1883static void rtl_hw_start_8169(struct net_device *dev)
1884{
1885 struct rtl8169_private *tp = netdev_priv(dev);
1886 void __iomem *ioaddr = tp->mmio_addr;
1887 struct pci_dev *pdev = tp->pci_dev;
1888 u16 cmd;
1889
9cb427b6
FR
1890 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1891 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1892 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1893 }
1894
bcf0bf90
FR
1895 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1896 pci_write_config_word(pdev, 0x68, 0x00);
1897 pci_write_config_word(pdev, 0x69, 0x08);
1898 }
1899
1900 /* Undocumented stuff. */
1901 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1902 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1903 if ((RTL_R8(Config2) & 0x07) & 0x01)
1904 RTL_W32(0x7c, 0x0007ffff);
1905
1906 RTL_W32(0x7c, 0x0007ff00);
1907
1908 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1909 cmd = cmd & 0xef;
1910 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1911 }
1912
1913 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1914 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1915 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1916 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1917 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1918 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1919
1da177e4
LT
1920 RTL_W8(EarlyTxThres, EarlyTxThld);
1921
7f796d83 1922 rtl_set_rx_max_size(ioaddr);
1da177e4 1923
9cb427b6
FR
1924 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1925 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1926 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1927 (tp->mac_version == RTL_GIGA_MAC_VER_04))
7f796d83 1928 rtl_set_rx_tx_config_registers(tp);
1da177e4 1929
7f796d83 1930 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1da177e4 1931
bcf0bf90
FR
1932 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1933 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1934 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1935 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1936 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1937 }
1938
bcf0bf90
FR
1939 RTL_W16(CPlusCmd, tp->cp_cmd);
1940
1da177e4
LT
1941 /*
1942 * Undocumented corner. Supposedly:
1943 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1944 */
1945 RTL_W16(IntrMitigate, 0x0000);
1946
7f796d83 1947 rtl_set_rx_tx_desc_registers(tp, ioaddr);
9cb427b6
FR
1948
1949 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1950 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1951 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1952 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1953 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
7f796d83 1954 rtl_set_rx_tx_config_registers(tp);
9cb427b6
FR
1955 }
1956
1da177e4 1957 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1958
1959 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1960 RTL_R8(IntrMask);
1da177e4
LT
1961
1962 RTL_W32(RxMissed, 0);
1963
07ce4064 1964 rtl_set_rx_mode(dev);
1da177e4
LT
1965
1966 /* no early-rx interrupts */
1967 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 1968}
1da177e4 1969
07ce4064
FR
1970static void rtl_hw_start_8168(struct net_device *dev)
1971{
2dd99530
FR
1972 struct rtl8169_private *tp = netdev_priv(dev);
1973 void __iomem *ioaddr = tp->mmio_addr;
1974
1975 RTL_W8(Cfg9346, Cfg9346_Unlock);
1976
1977 RTL_W8(EarlyTxThres, EarlyTxThld);
1978
1979 rtl_set_rx_max_size(ioaddr);
1980
1981 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1982
1983 RTL_W16(CPlusCmd, tp->cp_cmd);
1984
1985 RTL_W16(IntrMitigate, 0x0000);
1986
1987 rtl_set_rx_tx_desc_registers(tp, ioaddr);
1988
1989 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1990 rtl_set_rx_tx_config_registers(tp);
1991
1992 RTL_W8(Cfg9346, Cfg9346_Lock);
1993
1994 RTL_R8(IntrMask);
1995
1996 RTL_W32(RxMissed, 0);
1997
1998 rtl_set_rx_mode(dev);
1999
2000 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 2001}
1da177e4 2002
07ce4064
FR
2003static void rtl_hw_start_8101(struct net_device *dev)
2004{
2005 rtl_hw_start_8169(dev);
1da177e4
LT
2006}
2007
2008static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
2009{
2010 struct rtl8169_private *tp = netdev_priv(dev);
2011 int ret = 0;
2012
2013 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
2014 return -EINVAL;
2015
2016 dev->mtu = new_mtu;
2017
2018 if (!netif_running(dev))
2019 goto out;
2020
2021 rtl8169_down(dev);
2022
2023 rtl8169_set_rxbufsize(tp, dev);
2024
2025 ret = rtl8169_init_ring(dev);
2026 if (ret < 0)
2027 goto out;
2028
2029 netif_poll_enable(dev);
2030
07ce4064 2031 rtl_hw_start(dev);
1da177e4
LT
2032
2033 rtl8169_request_timer(dev);
2034
2035out:
2036 return ret;
2037}
2038
2039static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
2040{
2041 desc->addr = 0x0badbadbadbadbadull;
2042 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
2043}
2044
2045static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
2046 struct sk_buff **sk_buff, struct RxDesc *desc)
2047{
2048 struct pci_dev *pdev = tp->pci_dev;
2049
2050 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
2051 PCI_DMA_FROMDEVICE);
2052 dev_kfree_skb(*sk_buff);
2053 *sk_buff = NULL;
2054 rtl8169_make_unusable_by_asic(desc);
2055}
2056
2057static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
2058{
2059 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
2060
2061 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
2062}
2063
2064static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2065 u32 rx_buf_sz)
2066{
2067 desc->addr = cpu_to_le64(mapping);
2068 wmb();
2069 rtl8169_mark_to_asic(desc, rx_buf_sz);
2070}
2071
15d31758
SH
2072static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2073 struct net_device *dev,
2074 struct RxDesc *desc, int rx_buf_sz,
2075 unsigned int align)
1da177e4
LT
2076{
2077 struct sk_buff *skb;
2078 dma_addr_t mapping;
1da177e4 2079
15d31758 2080 skb = netdev_alloc_skb(dev, rx_buf_sz + align);
1da177e4
LT
2081 if (!skb)
2082 goto err_out;
2083
dcb92f88 2084 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4 2085
689be439 2086 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2087 PCI_DMA_FROMDEVICE);
2088
2089 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
1da177e4 2090out:
15d31758 2091 return skb;
1da177e4
LT
2092
2093err_out:
1da177e4
LT
2094 rtl8169_make_unusable_by_asic(desc);
2095 goto out;
2096}
2097
2098static void rtl8169_rx_clear(struct rtl8169_private *tp)
2099{
2100 int i;
2101
2102 for (i = 0; i < NUM_RX_DESC; i++) {
2103 if (tp->Rx_skbuff[i]) {
2104 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2105 tp->RxDescArray + i);
2106 }
2107 }
2108}
2109
2110static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2111 u32 start, u32 end)
2112{
2113 u32 cur;
5b0384f4 2114
4ae47c2d 2115 for (cur = start; end - cur != 0; cur++) {
15d31758
SH
2116 struct sk_buff *skb;
2117 unsigned int i = cur % NUM_RX_DESC;
1da177e4 2118
4ae47c2d
FR
2119 WARN_ON((s32)(end - cur) < 0);
2120
1da177e4
LT
2121 if (tp->Rx_skbuff[i])
2122 continue;
bcf0bf90 2123
15d31758
SH
2124 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2125 tp->RxDescArray + i,
2126 tp->rx_buf_sz, tp->align);
2127 if (!skb)
1da177e4 2128 break;
15d31758
SH
2129
2130 tp->Rx_skbuff[i] = skb;
1da177e4
LT
2131 }
2132 return cur - start;
2133}
2134
2135static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2136{
2137 desc->opts1 |= cpu_to_le32(RingEnd);
2138}
2139
2140static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2141{
2142 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2143}
2144
2145static int rtl8169_init_ring(struct net_device *dev)
2146{
2147 struct rtl8169_private *tp = netdev_priv(dev);
2148
2149 rtl8169_init_ring_indexes(tp);
2150
2151 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2152 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2153
2154 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2155 goto err_out;
2156
2157 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2158
2159 return 0;
2160
2161err_out:
2162 rtl8169_rx_clear(tp);
2163 return -ENOMEM;
2164}
2165
2166static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2167 struct TxDesc *desc)
2168{
2169 unsigned int len = tx_skb->len;
2170
2171 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2172 desc->opts1 = 0x00;
2173 desc->opts2 = 0x00;
2174 desc->addr = 0x00;
2175 tx_skb->len = 0;
2176}
2177
2178static void rtl8169_tx_clear(struct rtl8169_private *tp)
2179{
2180 unsigned int i;
2181
2182 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2183 unsigned int entry = i % NUM_TX_DESC;
2184 struct ring_info *tx_skb = tp->tx_skb + entry;
2185 unsigned int len = tx_skb->len;
2186
2187 if (len) {
2188 struct sk_buff *skb = tx_skb->skb;
2189
2190 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2191 tp->TxDescArray + entry);
2192 if (skb) {
2193 dev_kfree_skb(skb);
2194 tx_skb->skb = NULL;
2195 }
2196 tp->stats.tx_dropped++;
2197 }
2198 }
2199 tp->cur_tx = tp->dirty_tx = 0;
2200}
2201
c4028958 2202static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2203{
2204 struct rtl8169_private *tp = netdev_priv(dev);
2205
c4028958 2206 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2207 schedule_delayed_work(&tp->task, 4);
2208}
2209
2210static void rtl8169_wait_for_quiescence(struct net_device *dev)
2211{
2212 struct rtl8169_private *tp = netdev_priv(dev);
2213 void __iomem *ioaddr = tp->mmio_addr;
2214
2215 synchronize_irq(dev->irq);
2216
2217 /* Wait for any pending NAPI task to complete */
2218 netif_poll_disable(dev);
2219
2220 rtl8169_irq_mask_and_ack(ioaddr);
2221
2222 netif_poll_enable(dev);
2223}
2224
c4028958 2225static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2226{
c4028958
DH
2227 struct rtl8169_private *tp =
2228 container_of(work, struct rtl8169_private, task.work);
2229 struct net_device *dev = tp->dev;
1da177e4
LT
2230 int ret;
2231
eb2a021c
FR
2232 rtnl_lock();
2233
2234 if (!netif_running(dev))
2235 goto out_unlock;
2236
2237 rtl8169_wait_for_quiescence(dev);
2238 rtl8169_close(dev);
1da177e4
LT
2239
2240 ret = rtl8169_open(dev);
2241 if (unlikely(ret < 0)) {
2242 if (net_ratelimit()) {
b57b7e5a
SH
2243 struct rtl8169_private *tp = netdev_priv(dev);
2244
2245 if (netif_msg_drv(tp)) {
2246 printk(PFX KERN_ERR
2247 "%s: reinit failure (status = %d)."
2248 " Rescheduling.\n", dev->name, ret);
2249 }
1da177e4
LT
2250 }
2251 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2252 }
eb2a021c
FR
2253
2254out_unlock:
2255 rtnl_unlock();
1da177e4
LT
2256}
2257
c4028958 2258static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2259{
c4028958
DH
2260 struct rtl8169_private *tp =
2261 container_of(work, struct rtl8169_private, task.work);
2262 struct net_device *dev = tp->dev;
1da177e4 2263
eb2a021c
FR
2264 rtnl_lock();
2265
1da177e4 2266 if (!netif_running(dev))
eb2a021c 2267 goto out_unlock;
1da177e4
LT
2268
2269 rtl8169_wait_for_quiescence(dev);
2270
2271 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2272 rtl8169_tx_clear(tp);
2273
2274 if (tp->dirty_rx == tp->cur_rx) {
2275 rtl8169_init_ring_indexes(tp);
07ce4064 2276 rtl_hw_start(dev);
1da177e4
LT
2277 netif_wake_queue(dev);
2278 } else {
2279 if (net_ratelimit()) {
b57b7e5a
SH
2280 struct rtl8169_private *tp = netdev_priv(dev);
2281
2282 if (netif_msg_intr(tp)) {
2283 printk(PFX KERN_EMERG
2284 "%s: Rx buffers shortage\n", dev->name);
2285 }
1da177e4
LT
2286 }
2287 rtl8169_schedule_work(dev, rtl8169_reset_task);
2288 }
eb2a021c
FR
2289
2290out_unlock:
2291 rtnl_unlock();
1da177e4
LT
2292}
2293
2294static void rtl8169_tx_timeout(struct net_device *dev)
2295{
2296 struct rtl8169_private *tp = netdev_priv(dev);
2297
2298 rtl8169_hw_reset(tp->mmio_addr);
2299
2300 /* Let's wait a bit while any (async) irq lands on */
2301 rtl8169_schedule_work(dev, rtl8169_reset_task);
2302}
2303
2304static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2305 u32 opts1)
2306{
2307 struct skb_shared_info *info = skb_shinfo(skb);
2308 unsigned int cur_frag, entry;
2309 struct TxDesc *txd;
2310
2311 entry = tp->cur_tx;
2312 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2313 skb_frag_t *frag = info->frags + cur_frag;
2314 dma_addr_t mapping;
2315 u32 status, len;
2316 void *addr;
2317
2318 entry = (entry + 1) % NUM_TX_DESC;
2319
2320 txd = tp->TxDescArray + entry;
2321 len = frag->size;
2322 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2323 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2324
2325 /* anti gcc 2.95.3 bugware (sic) */
2326 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2327
2328 txd->opts1 = cpu_to_le32(status);
2329 txd->addr = cpu_to_le64(mapping);
2330
2331 tp->tx_skb[entry].len = len;
2332 }
2333
2334 if (cur_frag) {
2335 tp->tx_skb[entry].skb = skb;
2336 txd->opts1 |= cpu_to_le32(LastFrag);
2337 }
2338
2339 return cur_frag;
2340}
2341
2342static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2343{
2344 if (dev->features & NETIF_F_TSO) {
7967168c 2345 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2346
2347 if (mss)
2348 return LargeSend | ((mss & MSSMask) << MSSShift);
2349 }
84fa7933 2350 if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 2351 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
2352
2353 if (ip->protocol == IPPROTO_TCP)
2354 return IPCS | TCPCS;
2355 else if (ip->protocol == IPPROTO_UDP)
2356 return IPCS | UDPCS;
2357 WARN_ON(1); /* we need a WARN() */
2358 }
2359 return 0;
2360}
2361
2362static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2363{
2364 struct rtl8169_private *tp = netdev_priv(dev);
2365 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2366 struct TxDesc *txd = tp->TxDescArray + entry;
2367 void __iomem *ioaddr = tp->mmio_addr;
2368 dma_addr_t mapping;
2369 u32 status, len;
2370 u32 opts1;
188f4af0 2371 int ret = NETDEV_TX_OK;
5b0384f4 2372
1da177e4 2373 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2374 if (netif_msg_drv(tp)) {
2375 printk(KERN_ERR
2376 "%s: BUG! Tx Ring full when queue awake!\n",
2377 dev->name);
2378 }
1da177e4
LT
2379 goto err_stop;
2380 }
2381
2382 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2383 goto err_stop;
2384
2385 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2386
2387 frags = rtl8169_xmit_frags(tp, skb, opts1);
2388 if (frags) {
2389 len = skb_headlen(skb);
2390 opts1 |= FirstFrag;
2391 } else {
2392 len = skb->len;
2393
2394 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2395 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2396 goto err_update_stats;
2397 len = ETH_ZLEN;
2398 }
2399
2400 opts1 |= FirstFrag | LastFrag;
2401 tp->tx_skb[entry].skb = skb;
2402 }
2403
2404 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2405
2406 tp->tx_skb[entry].len = len;
2407 txd->addr = cpu_to_le64(mapping);
2408 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2409
2410 wmb();
2411
2412 /* anti gcc 2.95.3 bugware (sic) */
2413 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2414 txd->opts1 = cpu_to_le32(status);
2415
2416 dev->trans_start = jiffies;
2417
2418 tp->cur_tx += frags + 1;
2419
2420 smp_wmb();
2421
2422 RTL_W8(TxPoll, 0x40); /* set polling bit */
2423
2424 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2425 netif_stop_queue(dev);
2426 smp_rmb();
2427 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2428 netif_wake_queue(dev);
2429 }
2430
2431out:
2432 return ret;
2433
2434err_stop:
2435 netif_stop_queue(dev);
188f4af0 2436 ret = NETDEV_TX_BUSY;
1da177e4
LT
2437err_update_stats:
2438 tp->stats.tx_dropped++;
2439 goto out;
2440}
2441
2442static void rtl8169_pcierr_interrupt(struct net_device *dev)
2443{
2444 struct rtl8169_private *tp = netdev_priv(dev);
2445 struct pci_dev *pdev = tp->pci_dev;
2446 void __iomem *ioaddr = tp->mmio_addr;
2447 u16 pci_status, pci_cmd;
2448
2449 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2450 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2451
b57b7e5a
SH
2452 if (netif_msg_intr(tp)) {
2453 printk(KERN_ERR
2454 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2455 dev->name, pci_cmd, pci_status);
2456 }
1da177e4
LT
2457
2458 /*
2459 * The recovery sequence below admits a very elaborated explanation:
2460 * - it seems to work;
d03902b8
FR
2461 * - I did not see what else could be done;
2462 * - it makes iop3xx happy.
1da177e4
LT
2463 *
2464 * Feel free to adjust to your needs.
2465 */
a27993f3 2466 if (pdev->broken_parity_status)
d03902b8
FR
2467 pci_cmd &= ~PCI_COMMAND_PARITY;
2468 else
2469 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2470
2471 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2472
2473 pci_write_config_word(pdev, PCI_STATUS,
2474 pci_status & (PCI_STATUS_DETECTED_PARITY |
2475 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2476 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2477
2478 /* The infamous DAC f*ckup only happens at boot time */
2479 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2480 if (netif_msg_intr(tp))
2481 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2482 tp->cp_cmd &= ~PCIDAC;
2483 RTL_W16(CPlusCmd, tp->cp_cmd);
2484 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2485 }
2486
2487 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2488
2489 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2490}
2491
2492static void
2493rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2494 void __iomem *ioaddr)
2495{
2496 unsigned int dirty_tx, tx_left;
2497
2498 assert(dev != NULL);
2499 assert(tp != NULL);
2500 assert(ioaddr != NULL);
2501
2502 dirty_tx = tp->dirty_tx;
2503 smp_rmb();
2504 tx_left = tp->cur_tx - dirty_tx;
2505
2506 while (tx_left > 0) {
2507 unsigned int entry = dirty_tx % NUM_TX_DESC;
2508 struct ring_info *tx_skb = tp->tx_skb + entry;
2509 u32 len = tx_skb->len;
2510 u32 status;
2511
2512 rmb();
2513 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2514 if (status & DescOwn)
2515 break;
2516
2517 tp->stats.tx_bytes += len;
2518 tp->stats.tx_packets++;
2519
2520 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2521
2522 if (status & LastFrag) {
2523 dev_kfree_skb_irq(tx_skb->skb);
2524 tx_skb->skb = NULL;
2525 }
2526 dirty_tx++;
2527 tx_left--;
2528 }
2529
2530 if (tp->dirty_tx != dirty_tx) {
2531 tp->dirty_tx = dirty_tx;
2532 smp_wmb();
2533 if (netif_queue_stopped(dev) &&
2534 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2535 netif_wake_queue(dev);
2536 }
2537 }
2538}
2539
126fa4b9
FR
2540static inline int rtl8169_fragmented_frame(u32 status)
2541{
2542 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2543}
2544
1da177e4
LT
2545static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2546{
2547 u32 opts1 = le32_to_cpu(desc->opts1);
2548 u32 status = opts1 & RxProtoMask;
2549
2550 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2551 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2552 ((status == RxProtoIP) && !(opts1 & IPFail)))
2553 skb->ip_summed = CHECKSUM_UNNECESSARY;
2554 else
2555 skb->ip_summed = CHECKSUM_NONE;
2556}
2557
b449655f
SH
2558static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2559 struct pci_dev *pdev, dma_addr_t addr,
2560 unsigned int align)
1da177e4 2561{
b449655f
SH
2562 struct sk_buff *skb;
2563 bool done = false;
1da177e4 2564
b449655f
SH
2565 if (pkt_size >= rx_copybreak)
2566 goto out;
1da177e4 2567
b449655f
SH
2568 skb = dev_alloc_skb(pkt_size + align);
2569 if (!skb)
2570 goto out;
2571
2572 pci_dma_sync_single_for_cpu(pdev, addr, pkt_size, PCI_DMA_FROMDEVICE);
2573 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
2574 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
2575 *sk_buff = skb;
2576 done = true;
2577out:
2578 return done;
1da177e4
LT
2579}
2580
2581static int
2582rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2583 void __iomem *ioaddr)
2584{
2585 unsigned int cur_rx, rx_left;
2586 unsigned int delta, count;
2587
2588 assert(dev != NULL);
2589 assert(tp != NULL);
2590 assert(ioaddr != NULL);
2591
2592 cur_rx = tp->cur_rx;
2593 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2594 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2595
4dcb7d33 2596 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2597 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2598 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2599 u32 status;
2600
2601 rmb();
126fa4b9 2602 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2603
2604 if (status & DescOwn)
2605 break;
4dcb7d33 2606 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2607 if (netif_msg_rx_err(tp)) {
2608 printk(KERN_INFO
2609 "%s: Rx ERROR. status = %08x\n",
2610 dev->name, status);
2611 }
1da177e4
LT
2612 tp->stats.rx_errors++;
2613 if (status & (RxRWT | RxRUNT))
2614 tp->stats.rx_length_errors++;
2615 if (status & RxCRC)
2616 tp->stats.rx_crc_errors++;
9dccf611
FR
2617 if (status & RxFOVF) {
2618 rtl8169_schedule_work(dev, rtl8169_reset_task);
2619 tp->stats.rx_fifo_errors++;
2620 }
126fa4b9 2621 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2622 } else {
1da177e4 2623 struct sk_buff *skb = tp->Rx_skbuff[entry];
b449655f 2624 dma_addr_t addr = le64_to_cpu(desc->addr);
1da177e4 2625 int pkt_size = (status & 0x00001FFF) - 4;
b449655f 2626 struct pci_dev *pdev = tp->pci_dev;
1da177e4 2627
126fa4b9
FR
2628 /*
2629 * The driver does not support incoming fragmented
2630 * frames. They are seen as a symptom of over-mtu
2631 * sized frames.
2632 */
2633 if (unlikely(rtl8169_fragmented_frame(status))) {
2634 tp->stats.rx_dropped++;
2635 tp->stats.rx_length_errors++;
2636 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2637 continue;
126fa4b9
FR
2638 }
2639
1da177e4 2640 rtl8169_rx_csum(skb, desc);
bcf0bf90 2641
b449655f
SH
2642 if (rtl8169_try_rx_copy(&skb, pkt_size, pdev, addr,
2643 tp->align)) {
2644 pci_dma_sync_single_for_device(pdev, addr,
2645 pkt_size, PCI_DMA_FROMDEVICE);
2646 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2647 } else {
2648 pci_unmap_single(pdev, addr, pkt_size,
2649 PCI_DMA_FROMDEVICE);
1da177e4
LT
2650 tp->Rx_skbuff[entry] = NULL;
2651 }
2652
1da177e4
LT
2653 skb_put(skb, pkt_size);
2654 skb->protocol = eth_type_trans(skb, dev);
2655
2656 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2657 rtl8169_rx_skb(skb);
2658
2659 dev->last_rx = jiffies;
2660 tp->stats.rx_bytes += pkt_size;
2661 tp->stats.rx_packets++;
2662 }
1da177e4
LT
2663 }
2664
2665 count = cur_rx - tp->cur_rx;
2666 tp->cur_rx = cur_rx;
2667
2668 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2669 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2670 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2671 tp->dirty_rx += delta;
2672
2673 /*
2674 * FIXME: until there is periodic timer to try and refill the ring,
2675 * a temporary shortage may definitely kill the Rx process.
2676 * - disable the asic to try and avoid an overflow and kick it again
2677 * after refill ?
2678 * - how do others driver handle this condition (Uh oh...).
2679 */
b57b7e5a 2680 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2681 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2682
2683 return count;
2684}
2685
2686/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2687static irqreturn_t
7d12e780 2688rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2689{
2690 struct net_device *dev = (struct net_device *) dev_instance;
2691 struct rtl8169_private *tp = netdev_priv(dev);
2692 int boguscnt = max_interrupt_work;
2693 void __iomem *ioaddr = tp->mmio_addr;
2694 int status;
2695 int handled = 0;
2696
2697 do {
2698 status = RTL_R16(IntrStatus);
2699
2700 /* hotplug/major error/no more work/shared irq */
2701 if ((status == 0xFFFF) || !status)
2702 break;
2703
2704 handled = 1;
2705
2706 if (unlikely(!netif_running(dev))) {
2707 rtl8169_asic_down(ioaddr);
2708 goto out;
2709 }
2710
2711 status &= tp->intr_mask;
2712 RTL_W16(IntrStatus,
2713 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2714
2715 if (!(status & rtl8169_intr_mask))
2716 break;
2717
2718 if (unlikely(status & SYSErr)) {
2719 rtl8169_pcierr_interrupt(dev);
2720 break;
2721 }
2722
2723 if (status & LinkChg)
2724 rtl8169_check_link_status(dev, tp, ioaddr);
2725
2726#ifdef CONFIG_R8169_NAPI
2727 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2728 tp->intr_mask = ~rtl8169_napi_event;
2729
2730 if (likely(netif_rx_schedule_prep(dev)))
2731 __netif_rx_schedule(dev);
b57b7e5a 2732 else if (netif_msg_intr(tp)) {
1da177e4 2733 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2734 dev->name, status);
1da177e4
LT
2735 }
2736 break;
2737#else
2738 /* Rx interrupt */
2739 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2740 rtl8169_rx_interrupt(dev, tp, ioaddr);
2741 }
2742 /* Tx interrupt */
2743 if (status & (TxOK | TxErr))
2744 rtl8169_tx_interrupt(dev, tp, ioaddr);
2745#endif
2746
2747 boguscnt--;
2748 } while (boguscnt > 0);
2749
2750 if (boguscnt <= 0) {
7c8b2eb4 2751 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2752 printk(KERN_WARNING
2753 "%s: Too much work at interrupt!\n", dev->name);
2754 }
1da177e4
LT
2755 /* Clear all interrupt sources. */
2756 RTL_W16(IntrStatus, 0xffff);
2757 }
2758out:
2759 return IRQ_RETVAL(handled);
2760}
2761
2762#ifdef CONFIG_R8169_NAPI
2763static int rtl8169_poll(struct net_device *dev, int *budget)
2764{
2765 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2766 struct rtl8169_private *tp = netdev_priv(dev);
2767 void __iomem *ioaddr = tp->mmio_addr;
2768
2769 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2770 rtl8169_tx_interrupt(dev, tp, ioaddr);
2771
2772 *budget -= work_done;
2773 dev->quota -= work_done;
2774
2775 if (work_done < work_to_do) {
2776 netif_rx_complete(dev);
2777 tp->intr_mask = 0xffff;
2778 /*
2779 * 20040426: the barrier is not strictly required but the
2780 * behavior of the irq handler could be less predictable
2781 * without it. Btw, the lack of flush for the posted pci
2782 * write is safe - FR
2783 */
2784 smp_wmb();
2785 RTL_W16(IntrMask, rtl8169_intr_mask);
2786 }
2787
2788 return (work_done >= work_to_do);
2789}
2790#endif
2791
2792static void rtl8169_down(struct net_device *dev)
2793{
2794 struct rtl8169_private *tp = netdev_priv(dev);
2795 void __iomem *ioaddr = tp->mmio_addr;
2796 unsigned int poll_locked = 0;
733b736c 2797 unsigned int intrmask;
1da177e4
LT
2798
2799 rtl8169_delete_timer(dev);
2800
2801 netif_stop_queue(dev);
2802
1da177e4
LT
2803core_down:
2804 spin_lock_irq(&tp->lock);
2805
2806 rtl8169_asic_down(ioaddr);
2807
2808 /* Update the error counts. */
2809 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2810 RTL_W32(RxMissed, 0);
2811
2812 spin_unlock_irq(&tp->lock);
2813
2814 synchronize_irq(dev->irq);
2815
2816 if (!poll_locked) {
2817 netif_poll_disable(dev);
2818 poll_locked++;
2819 }
2820
2821 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2822 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2823
2824 /*
2825 * And now for the 50k$ question: are IRQ disabled or not ?
2826 *
2827 * Two paths lead here:
2828 * 1) dev->close
2829 * -> netif_running() is available to sync the current code and the
2830 * IRQ handler. See rtl8169_interrupt for details.
2831 * 2) dev->change_mtu
2832 * -> rtl8169_poll can not be issued again and re-enable the
2833 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2834 *
2835 * No loop if hotpluged or major error (0xffff).
1da177e4 2836 */
733b736c
AP
2837 intrmask = RTL_R16(IntrMask);
2838 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2839 goto core_down;
2840
2841 rtl8169_tx_clear(tp);
2842
2843 rtl8169_rx_clear(tp);
2844}
2845
2846static int rtl8169_close(struct net_device *dev)
2847{
2848 struct rtl8169_private *tp = netdev_priv(dev);
2849 struct pci_dev *pdev = tp->pci_dev;
2850
2851 rtl8169_down(dev);
2852
2853 free_irq(dev->irq, dev);
2854
2855 netif_poll_enable(dev);
2856
2857 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2858 tp->RxPhyAddr);
2859 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2860 tp->TxPhyAddr);
2861 tp->TxDescArray = NULL;
2862 tp->RxDescArray = NULL;
2863
2864 return 0;
2865}
2866
07ce4064 2867static void rtl_set_rx_mode(struct net_device *dev)
1da177e4
LT
2868{
2869 struct rtl8169_private *tp = netdev_priv(dev);
2870 void __iomem *ioaddr = tp->mmio_addr;
2871 unsigned long flags;
2872 u32 mc_filter[2]; /* Multicast hash filter */
2873 int i, rx_mode;
2874 u32 tmp = 0;
2875
2876 if (dev->flags & IFF_PROMISC) {
2877 /* Unconditionally log net taps. */
b57b7e5a
SH
2878 if (netif_msg_link(tp)) {
2879 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2880 dev->name);
2881 }
1da177e4
LT
2882 rx_mode =
2883 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2884 AcceptAllPhys;
2885 mc_filter[1] = mc_filter[0] = 0xffffffff;
2886 } else if ((dev->mc_count > multicast_filter_limit)
2887 || (dev->flags & IFF_ALLMULTI)) {
2888 /* Too many to filter perfectly -- accept all multicasts. */
2889 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2890 mc_filter[1] = mc_filter[0] = 0xffffffff;
2891 } else {
2892 struct dev_mc_list *mclist;
2893 rx_mode = AcceptBroadcast | AcceptMyPhys;
2894 mc_filter[1] = mc_filter[0] = 0;
2895 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2896 i++, mclist = mclist->next) {
2897 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2898 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2899 rx_mode |= AcceptMulticast;
2900 }
2901 }
2902
2903 spin_lock_irqsave(&tp->lock, flags);
2904
2905 tmp = rtl8169_rx_config | rx_mode |
2906 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2907
bcf0bf90
FR
2908 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2909 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2910 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2911 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2912 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2913 mc_filter[0] = 0xffffffff;
2914 mc_filter[1] = 0xffffffff;
2915 }
2916
1da177e4
LT
2917 RTL_W32(RxConfig, tmp);
2918 RTL_W32(MAR0 + 0, mc_filter[0]);
2919 RTL_W32(MAR0 + 4, mc_filter[1]);
2920
2921 spin_unlock_irqrestore(&tp->lock, flags);
2922}
2923
2924/**
2925 * rtl8169_get_stats - Get rtl8169 read/write statistics
2926 * @dev: The Ethernet Device to get statistics for
2927 *
2928 * Get TX/RX statistics for rtl8169
2929 */
2930static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2931{
2932 struct rtl8169_private *tp = netdev_priv(dev);
2933 void __iomem *ioaddr = tp->mmio_addr;
2934 unsigned long flags;
2935
2936 if (netif_running(dev)) {
2937 spin_lock_irqsave(&tp->lock, flags);
2938 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2939 RTL_W32(RxMissed, 0);
2940 spin_unlock_irqrestore(&tp->lock, flags);
2941 }
5b0384f4 2942
1da177e4
LT
2943 return &tp->stats;
2944}
2945
5d06a99f
FR
2946#ifdef CONFIG_PM
2947
2948static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2949{
2950 struct net_device *dev = pci_get_drvdata(pdev);
2951 struct rtl8169_private *tp = netdev_priv(dev);
2952 void __iomem *ioaddr = tp->mmio_addr;
2953
2954 if (!netif_running(dev))
1371fa6d 2955 goto out_pci_suspend;
5d06a99f
FR
2956
2957 netif_device_detach(dev);
2958 netif_stop_queue(dev);
2959
2960 spin_lock_irq(&tp->lock);
2961
2962 rtl8169_asic_down(ioaddr);
2963
2964 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2965 RTL_W32(RxMissed, 0);
2966
2967 spin_unlock_irq(&tp->lock);
2968
1371fa6d 2969out_pci_suspend:
5d06a99f 2970 pci_save_state(pdev);
61a4dcc2 2971 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f 2972 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1371fa6d 2973
5d06a99f
FR
2974 return 0;
2975}
2976
2977static int rtl8169_resume(struct pci_dev *pdev)
2978{
2979 struct net_device *dev = pci_get_drvdata(pdev);
2980
1371fa6d
FR
2981 pci_set_power_state(pdev, PCI_D0);
2982 pci_restore_state(pdev);
2983 pci_enable_wake(pdev, PCI_D0, 0);
2984
5d06a99f
FR
2985 if (!netif_running(dev))
2986 goto out;
2987
2988 netif_device_attach(dev);
2989
5d06a99f
FR
2990 rtl8169_schedule_work(dev, rtl8169_reset_task);
2991out:
2992 return 0;
2993}
2994
2995#endif /* CONFIG_PM */
2996
1da177e4
LT
2997static struct pci_driver rtl8169_pci_driver = {
2998 .name = MODULENAME,
2999 .id_table = rtl8169_pci_tbl,
3000 .probe = rtl8169_init_one,
3001 .remove = __devexit_p(rtl8169_remove_one),
3002#ifdef CONFIG_PM
3003 .suspend = rtl8169_suspend,
3004 .resume = rtl8169_resume,
3005#endif
3006};
3007
3008static int __init
3009rtl8169_init_module(void)
3010{
29917620 3011 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
3012}
3013
3014static void __exit
3015rtl8169_cleanup_module(void)
3016{
3017 pci_unregister_driver(&rtl8169_pci_driver);
3018}
3019
3020module_init(rtl8169_init_module);
3021module_exit(rtl8169_cleanup_module);