]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/r8169.c
r8169: prettify mac_version
[net-next-2.6.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
99f252b0 69#include <asm/system.h>
1da177e4
LT
70#include <asm/io.h>
71#include <asm/irq.h>
72
f7ccf420
SH
73#ifdef CONFIG_R8169_NAPI
74#define NAPI_SUFFIX "-NAPI"
75#else
76#define NAPI_SUFFIX ""
77#endif
78
79#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
80#define MODULENAME "r8169"
81#define PFX MODULENAME ": "
82
83#ifdef RTL8169_DEBUG
84#define assert(expr) \
5b0384f4
FR
85 if (!(expr)) { \
86 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
87 #expr,__FILE__,__FUNCTION__,__LINE__); \
88 }
1da177e4
LT
89#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
90#else
91#define assert(expr) do {} while (0)
92#define dprintk(fmt, args...) do {} while (0)
93#endif /* RTL8169_DEBUG */
94
b57b7e5a 95#define R8169_MSG_DEFAULT \
f0e837d9 96 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 97
1da177e4
LT
98#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100
101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb
0b50f81d 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else
106#define rtl8169_rx_skb netif_rx
0b50f81d 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
108#define rtl8169_rx_quota(count, quota) count
109#endif
110
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 117static const int max_interrupt_work = 20;
1da177e4
LT
118
119/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
120 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 121static const int multicast_filter_limit = 32;
1da177e4
LT
122
123/* MAC address length */
124#define MAC_ADDR_LEN 6
125
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
133
134#define R8169_REGS_SIZE 256
135#define R8169_NAPI_WEIGHT 64
136#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
137#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
138#define RX_BUF_SIZE 1536 /* Rx Buffer size */
139#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
140#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
141
142#define RTL8169_TX_TIMEOUT (6*HZ)
143#define RTL8169_PHY_TIMEOUT (10*HZ)
144
145/* write/read MMIO register */
146#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
147#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
148#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
149#define RTL_R8(reg) readb (ioaddr + (reg))
150#define RTL_R16(reg) readw (ioaddr + (reg))
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152
153enum mac_version {
ba6eb6ee
FR
154 RTL_GIGA_MAC_VER_01 = 0x01, // 8169
155 RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
156 RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
157 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
158 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
2dd99530
FR
159 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
160 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf
cdf1a608
FR
161 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec
162 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101
163 RTL_GIGA_MAC_VER_15 = 0x0f // 8101
1da177e4
LT
164};
165
166enum phy_version {
167 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
170 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
171 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
172 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
173};
174
1da177e4
LT
175#define _R(NAME,MAC,MASK) \
176 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
177
3c6bee1d 178static const struct {
1da177e4
LT
179 const char *name;
180 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = {
ba6eb6ee
FR
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
184 _R("RTL8169s", RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
185 _R("RTL8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
bcf0bf90
FR
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
192 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
193};
194#undef _R
195
bcf0bf90
FR
196enum cfg_version {
197 RTL_CFG_0 = 0x00,
198 RTL_CFG_1,
199 RTL_CFG_2
200};
201
07ce4064
FR
202static void rtl_hw_start_8169(struct net_device *);
203static void rtl_hw_start_8168(struct net_device *);
204static void rtl_hw_start_8101(struct net_device *);
205
bcf0bf90 206static const struct {
07ce4064 207 void (*hw_start)(struct net_device *);
bcf0bf90
FR
208 unsigned int region;
209 unsigned int align;
210} rtl_cfg_info[] = {
07ce4064
FR
211 [RTL_CFG_0] = { rtl_hw_start_8169, 1, NET_IP_ALIGN },
212 [RTL_CFG_1] = { rtl_hw_start_8168, 2, 8 },
213 [RTL_CFG_2] = { rtl_hw_start_8101, 2, 8 }
bcf0bf90
FR
214};
215
1da177e4 216static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 217 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 218 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
07ce4064 220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
bcf0bf90
FR
221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
222 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 223 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
224 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
225 { PCI_VENDOR_ID_LINKSYS, 0x1032,
226 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
227 {0,},
228};
229
230MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
231
232static int rx_copybreak = 200;
233static int use_dac;
b57b7e5a
SH
234static struct {
235 u32 msg_enable;
236} debug = { -1 };
1da177e4
LT
237
238enum RTL8169_registers {
239 MAC0 = 0, /* Ethernet hardware address. */
240 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
241 CounterAddrLow = 0x10,
242 CounterAddrHigh = 0x14,
1da177e4
LT
243 TxDescStartAddrLow = 0x20,
244 TxDescStartAddrHigh = 0x24,
245 TxHDescStartAddrLow = 0x28,
246 TxHDescStartAddrHigh = 0x2c,
247 FLASH = 0x30,
248 ERSR = 0x36,
249 ChipCmd = 0x37,
250 TxPoll = 0x38,
251 IntrMask = 0x3C,
252 IntrStatus = 0x3E,
253 TxConfig = 0x40,
254 RxConfig = 0x44,
255 RxMissed = 0x4C,
256 Cfg9346 = 0x50,
257 Config0 = 0x51,
258 Config1 = 0x52,
259 Config2 = 0x53,
260 Config3 = 0x54,
261 Config4 = 0x55,
262 Config5 = 0x56,
263 MultiIntr = 0x5C,
264 PHYAR = 0x60,
265 TBICSR = 0x64,
266 TBI_ANAR = 0x68,
267 TBI_LPAR = 0x6A,
268 PHYstatus = 0x6C,
269 RxMaxSize = 0xDA,
270 CPlusCmd = 0xE0,
271 IntrMitigate = 0xE2,
272 RxDescAddrLow = 0xE4,
273 RxDescAddrHigh = 0xE8,
274 EarlyTxThres = 0xEC,
275 FuncEvent = 0xF0,
276 FuncEventMask = 0xF4,
277 FuncPresetState = 0xF8,
278 FuncForceEvent = 0xFC,
279};
280
281enum RTL8169_register_content {
282 /* InterruptStatusBits */
283 SYSErr = 0x8000,
284 PCSTimeout = 0x4000,
285 SWInt = 0x0100,
286 TxDescUnavail = 0x80,
287 RxFIFOOver = 0x40,
288 LinkChg = 0x20,
289 RxOverflow = 0x10,
290 TxErr = 0x08,
291 TxOK = 0x04,
292 RxErr = 0x02,
293 RxOK = 0x01,
294
295 /* RxStatusDesc */
9dccf611
FR
296 RxFOVF = (1 << 23),
297 RxRWT = (1 << 22),
298 RxRES = (1 << 21),
299 RxRUNT = (1 << 20),
300 RxCRC = (1 << 19),
1da177e4
LT
301
302 /* ChipCmdBits */
303 CmdReset = 0x10,
304 CmdRxEnb = 0x08,
305 CmdTxEnb = 0x04,
306 RxBufEmpty = 0x01,
307
308 /* Cfg9346Bits */
309 Cfg9346_Lock = 0x00,
310 Cfg9346_Unlock = 0xC0,
311
312 /* rx_mode_bits */
313 AcceptErr = 0x20,
314 AcceptRunt = 0x10,
315 AcceptBroadcast = 0x08,
316 AcceptMulticast = 0x04,
317 AcceptMyPhys = 0x02,
318 AcceptAllPhys = 0x01,
319
320 /* RxConfigBits */
321 RxCfgFIFOShift = 13,
322 RxCfgDMAShift = 8,
323
324 /* TxConfigBits */
325 TxInterFrameGapShift = 24,
326 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
327
5d06a99f
FR
328 /* Config1 register p.24 */
329 PMEnable = (1 << 0), /* Power Management Enable */
330
61a4dcc2
FR
331 /* Config3 register p.25 */
332 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
333 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
334
5d06a99f 335 /* Config5 register p.27 */
61a4dcc2
FR
336 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
337 MWF = (1 << 5), /* Accept Multicast wakeup frame */
338 UWF = (1 << 4), /* Accept Unicast wakeup frame */
339 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
340 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
341
1da177e4
LT
342 /* TBICSR p.28 */
343 TBIReset = 0x80000000,
344 TBILoopback = 0x40000000,
345 TBINwEnable = 0x20000000,
346 TBINwRestart = 0x10000000,
347 TBILinkOk = 0x02000000,
348 TBINwComplete = 0x01000000,
349
350 /* CPlusCmd p.31 */
351 RxVlan = (1 << 6),
352 RxChkSum = (1 << 5),
353 PCIDAC = (1 << 4),
354 PCIMulRW = (1 << 3),
355
356 /* rtl8169_PHYstatus */
357 TBI_Enable = 0x80,
358 TxFlowCtrl = 0x40,
359 RxFlowCtrl = 0x20,
360 _1000bpsF = 0x10,
361 _100bps = 0x08,
362 _10bps = 0x04,
363 LinkStatus = 0x02,
364 FullDup = 0x01,
365
1da177e4
LT
366 /* _MediaType */
367 _10_Half = 0x01,
368 _10_Full = 0x02,
369 _100_Half = 0x04,
370 _100_Full = 0x08,
371 _1000_Full = 0x10,
372
373 /* _TBICSRBit */
374 TBILinkOK = 0x02000000,
d4a3a0fc
SH
375
376 /* DumpCounterCommand */
377 CounterDump = 0x8,
1da177e4
LT
378};
379
380enum _DescStatusBit {
381 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
382 RingEnd = (1 << 30), /* End of descriptor ring */
383 FirstFrag = (1 << 29), /* First segment of a packet */
384 LastFrag = (1 << 28), /* Final segment of a packet */
385
386 /* Tx private */
387 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
388 MSSShift = 16, /* MSS value position */
389 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
390 IPCS = (1 << 18), /* Calculate IP checksum */
391 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
392 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
393 TxVlanTag = (1 << 17), /* Add VLAN tag */
394
395 /* Rx private */
396 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
397 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
398
399#define RxProtoUDP (PID1)
400#define RxProtoTCP (PID0)
401#define RxProtoIP (PID1 | PID0)
402#define RxProtoMask RxProtoIP
403
404 IPFail = (1 << 16), /* IP checksum failed */
405 UDPFail = (1 << 15), /* UDP/IP checksum failed */
406 TCPFail = (1 << 14), /* TCP/IP checksum failed */
407 RxVlanTag = (1 << 16), /* VLAN tag available */
408};
409
410#define RsvdMask 0x3fffc000
411
412struct TxDesc {
413 u32 opts1;
414 u32 opts2;
415 u64 addr;
416};
417
418struct RxDesc {
419 u32 opts1;
420 u32 opts2;
421 u64 addr;
422};
423
424struct ring_info {
425 struct sk_buff *skb;
426 u32 len;
427 u8 __pad[sizeof(void *) - sizeof(u32)];
428};
429
430struct rtl8169_private {
431 void __iomem *mmio_addr; /* memory map physical address */
432 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 433 struct net_device *dev;
1da177e4
LT
434 struct net_device_stats stats; /* statistics of net device */
435 spinlock_t lock; /* spin lock flag */
b57b7e5a 436 u32 msg_enable;
1da177e4
LT
437 int chipset;
438 int mac_version;
439 int phy_version;
440 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
441 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
442 u32 dirty_rx;
443 u32 dirty_tx;
444 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
445 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
446 dma_addr_t TxPhyAddr;
447 dma_addr_t RxPhyAddr;
448 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
449 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 450 unsigned align;
1da177e4
LT
451 unsigned rx_buf_sz;
452 struct timer_list timer;
453 u16 cp_cmd;
454 u16 intr_mask;
455 int phy_auto_nego_reg;
456 int phy_1000_ctrl_reg;
457#ifdef CONFIG_R8169_VLAN
458 struct vlan_group *vlgrp;
459#endif
460 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
461 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
462 void (*phy_reset_enable)(void __iomem *);
07ce4064 463 void (*hw_start)(struct net_device *);
1da177e4
LT
464 unsigned int (*phy_reset_pending)(void __iomem *);
465 unsigned int (*link_ok)(void __iomem *);
c4028958 466 struct delayed_work task;
61a4dcc2 467 unsigned wol_enabled : 1;
1da177e4
LT
468};
469
979b6c13 470MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
471MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
472module_param_array(media, int, &num_media, 0);
df0a1bf6 473MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 474module_param(rx_copybreak, int, 0);
1b7efd58 475MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
476module_param(use_dac, int, 0);
477MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
478module_param_named(debug, debug.msg_enable, int, 0);
479MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
480MODULE_LICENSE("GPL");
481MODULE_VERSION(RTL8169_VERSION);
482
483static int rtl8169_open(struct net_device *dev);
484static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 485static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4 486static int rtl8169_init_ring(struct net_device *dev);
07ce4064 487static void rtl_hw_start(struct net_device *dev);
1da177e4 488static int rtl8169_close(struct net_device *dev);
07ce4064 489static void rtl_set_rx_mode(struct net_device *dev);
1da177e4 490static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 491static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
492static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
493 void __iomem *);
4dcb7d33 494static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 495static void rtl8169_down(struct net_device *dev);
99f252b0 496static void rtl8169_rx_clear(struct rtl8169_private *tp);
1da177e4
LT
497
498#ifdef CONFIG_R8169_NAPI
499static int rtl8169_poll(struct net_device *dev, int *budget);
500#endif
501
502static const u16 rtl8169_intr_mask =
503 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
504static const u16 rtl8169_napi_event =
505 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
506static const unsigned int rtl8169_rx_config =
5b0384f4 507 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
508
509static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
510{
511 int i;
512
513 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 514
2371408c 515 for (i = 20; i > 0; i--) {
1da177e4 516 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 517 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 518 break;
2371408c 519 udelay(25);
1da177e4
LT
520 }
521}
522
523static int mdio_read(void __iomem *ioaddr, int RegAddr)
524{
525 int i, value = -1;
526
527 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 528
2371408c 529 for (i = 20; i > 0; i--) {
1da177e4
LT
530 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
531 if (RTL_R32(PHYAR) & 0x80000000) {
532 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
533 break;
534 }
2371408c 535 udelay(25);
1da177e4
LT
536 }
537 return value;
538}
539
540static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
541{
542 RTL_W16(IntrMask, 0x0000);
543
544 RTL_W16(IntrStatus, 0xffff);
545}
546
547static void rtl8169_asic_down(void __iomem *ioaddr)
548{
549 RTL_W8(ChipCmd, 0x00);
550 rtl8169_irq_mask_and_ack(ioaddr);
551 RTL_R16(CPlusCmd);
552}
553
554static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
555{
556 return RTL_R32(TBICSR) & TBIReset;
557}
558
559static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
560{
64e4bfb4 561 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
562}
563
564static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
565{
566 return RTL_R32(TBICSR) & TBILinkOk;
567}
568
569static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
570{
571 return RTL_R8(PHYstatus) & LinkStatus;
572}
573
574static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
575{
576 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
577}
578
579static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
580{
581 unsigned int val;
582
9e0db8ef
FR
583 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
584 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
1da177e4
LT
585}
586
587static void rtl8169_check_link_status(struct net_device *dev,
588 struct rtl8169_private *tp, void __iomem *ioaddr)
589{
590 unsigned long flags;
591
592 spin_lock_irqsave(&tp->lock, flags);
593 if (tp->link_ok(ioaddr)) {
594 netif_carrier_on(dev);
b57b7e5a
SH
595 if (netif_msg_ifup(tp))
596 printk(KERN_INFO PFX "%s: link up\n", dev->name);
597 } else {
598 if (netif_msg_ifdown(tp))
599 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 600 netif_carrier_off(dev);
b57b7e5a 601 }
1da177e4
LT
602 spin_unlock_irqrestore(&tp->lock, flags);
603}
604
605static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
606{
607 struct {
608 u16 speed;
609 u8 duplex;
610 u8 autoneg;
611 u8 media;
612 } link_settings[] = {
613 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
614 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
615 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
616 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
617 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
618 /* Make TBI happy */
619 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
620 }, *p;
621 unsigned char option;
5b0384f4 622
1da177e4
LT
623 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
624
b57b7e5a 625 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
626 printk(KERN_WARNING PFX "media option is deprecated.\n");
627
628 for (p = link_settings; p->media != 0xff; p++) {
629 if (p->media == option)
630 break;
631 }
632 *autoneg = p->autoneg;
633 *speed = p->speed;
634 *duplex = p->duplex;
635}
636
61a4dcc2
FR
637static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
638{
639 struct rtl8169_private *tp = netdev_priv(dev);
640 void __iomem *ioaddr = tp->mmio_addr;
641 u8 options;
642
643 wol->wolopts = 0;
644
645#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
646 wol->supported = WAKE_ANY;
647
648 spin_lock_irq(&tp->lock);
649
650 options = RTL_R8(Config1);
651 if (!(options & PMEnable))
652 goto out_unlock;
653
654 options = RTL_R8(Config3);
655 if (options & LinkUp)
656 wol->wolopts |= WAKE_PHY;
657 if (options & MagicPacket)
658 wol->wolopts |= WAKE_MAGIC;
659
660 options = RTL_R8(Config5);
661 if (options & UWF)
662 wol->wolopts |= WAKE_UCAST;
663 if (options & BWF)
5b0384f4 664 wol->wolopts |= WAKE_BCAST;
61a4dcc2 665 if (options & MWF)
5b0384f4 666 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
667
668out_unlock:
669 spin_unlock_irq(&tp->lock);
670}
671
672static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
673{
674 struct rtl8169_private *tp = netdev_priv(dev);
675 void __iomem *ioaddr = tp->mmio_addr;
676 int i;
677 static struct {
678 u32 opt;
679 u16 reg;
680 u8 mask;
681 } cfg[] = {
682 { WAKE_ANY, Config1, PMEnable },
683 { WAKE_PHY, Config3, LinkUp },
684 { WAKE_MAGIC, Config3, MagicPacket },
685 { WAKE_UCAST, Config5, UWF },
686 { WAKE_BCAST, Config5, BWF },
687 { WAKE_MCAST, Config5, MWF },
688 { WAKE_ANY, Config5, LanWake }
689 };
690
691 spin_lock_irq(&tp->lock);
692
693 RTL_W8(Cfg9346, Cfg9346_Unlock);
694
695 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
696 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
697 if (wol->wolopts & cfg[i].opt)
698 options |= cfg[i].mask;
699 RTL_W8(cfg[i].reg, options);
700 }
701
702 RTL_W8(Cfg9346, Cfg9346_Lock);
703
704 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
705
706 spin_unlock_irq(&tp->lock);
707
708 return 0;
709}
710
1da177e4
LT
711static void rtl8169_get_drvinfo(struct net_device *dev,
712 struct ethtool_drvinfo *info)
713{
714 struct rtl8169_private *tp = netdev_priv(dev);
715
716 strcpy(info->driver, MODULENAME);
717 strcpy(info->version, RTL8169_VERSION);
718 strcpy(info->bus_info, pci_name(tp->pci_dev));
719}
720
721static int rtl8169_get_regs_len(struct net_device *dev)
722{
723 return R8169_REGS_SIZE;
724}
725
726static int rtl8169_set_speed_tbi(struct net_device *dev,
727 u8 autoneg, u16 speed, u8 duplex)
728{
729 struct rtl8169_private *tp = netdev_priv(dev);
730 void __iomem *ioaddr = tp->mmio_addr;
731 int ret = 0;
732 u32 reg;
733
734 reg = RTL_R32(TBICSR);
735 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
736 (duplex == DUPLEX_FULL)) {
737 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
738 } else if (autoneg == AUTONEG_ENABLE)
739 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
740 else {
b57b7e5a
SH
741 if (netif_msg_link(tp)) {
742 printk(KERN_WARNING "%s: "
743 "incorrect speed setting refused in TBI mode\n",
744 dev->name);
745 }
1da177e4
LT
746 ret = -EOPNOTSUPP;
747 }
748
749 return ret;
750}
751
752static int rtl8169_set_speed_xmii(struct net_device *dev,
753 u8 autoneg, u16 speed, u8 duplex)
754{
755 struct rtl8169_private *tp = netdev_priv(dev);
756 void __iomem *ioaddr = tp->mmio_addr;
757 int auto_nego, giga_ctrl;
758
64e4bfb4
FR
759 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
760 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL);
762 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
763 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
764
765 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
766 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
767 ADVERTISE_100HALF | ADVERTISE_100FULL);
768 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
769 } else {
770 if (speed == SPEED_10)
64e4bfb4 771 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 772 else if (speed == SPEED_100)
64e4bfb4 773 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 774 else if (speed == SPEED_1000)
64e4bfb4 775 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
776
777 if (duplex == DUPLEX_HALF)
64e4bfb4 778 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
779
780 if (duplex == DUPLEX_FULL)
64e4bfb4 781 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
782
783 /* This tweak comes straight from Realtek's driver. */
784 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
785 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 786 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
787 }
788 }
789
790 /* The 8100e/8101e do Fast Ethernet only. */
791 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
792 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
793 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 794 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
795 netif_msg_link(tp)) {
796 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
797 dev->name);
798 }
64e4bfb4 799 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
800 }
801
623a1593
FR
802 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
803
1da177e4
LT
804 tp->phy_auto_nego_reg = auto_nego;
805 tp->phy_1000_ctrl_reg = giga_ctrl;
806
64e4bfb4
FR
807 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
808 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
809 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
810 return 0;
811}
812
813static int rtl8169_set_speed(struct net_device *dev,
814 u8 autoneg, u16 speed, u8 duplex)
815{
816 struct rtl8169_private *tp = netdev_priv(dev);
817 int ret;
818
819 ret = tp->set_speed(dev, autoneg, speed, duplex);
820
64e4bfb4 821 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
822 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
823
824 return ret;
825}
826
827static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
828{
829 struct rtl8169_private *tp = netdev_priv(dev);
830 unsigned long flags;
831 int ret;
832
833 spin_lock_irqsave(&tp->lock, flags);
834 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
835 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 836
1da177e4
LT
837 return ret;
838}
839
840static u32 rtl8169_get_rx_csum(struct net_device *dev)
841{
842 struct rtl8169_private *tp = netdev_priv(dev);
843
844 return tp->cp_cmd & RxChkSum;
845}
846
847static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
848{
849 struct rtl8169_private *tp = netdev_priv(dev);
850 void __iomem *ioaddr = tp->mmio_addr;
851 unsigned long flags;
852
853 spin_lock_irqsave(&tp->lock, flags);
854
855 if (data)
856 tp->cp_cmd |= RxChkSum;
857 else
858 tp->cp_cmd &= ~RxChkSum;
859
860 RTL_W16(CPlusCmd, tp->cp_cmd);
861 RTL_R16(CPlusCmd);
862
863 spin_unlock_irqrestore(&tp->lock, flags);
864
865 return 0;
866}
867
868#ifdef CONFIG_R8169_VLAN
869
870static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
871 struct sk_buff *skb)
872{
873 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
874 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
875}
876
877static void rtl8169_vlan_rx_register(struct net_device *dev,
878 struct vlan_group *grp)
879{
880 struct rtl8169_private *tp = netdev_priv(dev);
881 void __iomem *ioaddr = tp->mmio_addr;
882 unsigned long flags;
883
884 spin_lock_irqsave(&tp->lock, flags);
885 tp->vlgrp = grp;
886 if (tp->vlgrp)
887 tp->cp_cmd |= RxVlan;
888 else
889 tp->cp_cmd &= ~RxVlan;
890 RTL_W16(CPlusCmd, tp->cp_cmd);
891 RTL_R16(CPlusCmd);
892 spin_unlock_irqrestore(&tp->lock, flags);
893}
894
1da177e4
LT
895static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
896 struct sk_buff *skb)
897{
898 u32 opts2 = le32_to_cpu(desc->opts2);
899 int ret;
900
901 if (tp->vlgrp && (opts2 & RxVlanTag)) {
902 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
903 swab16(opts2 & 0xffff));
904 ret = 0;
905 } else
906 ret = -1;
907 desc->opts2 = 0;
908 return ret;
909}
910
911#else /* !CONFIG_R8169_VLAN */
912
913static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
914 struct sk_buff *skb)
915{
916 return 0;
917}
918
919static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
920 struct sk_buff *skb)
921{
922 return -1;
923}
924
925#endif
926
927static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
928{
929 struct rtl8169_private *tp = netdev_priv(dev);
930 void __iomem *ioaddr = tp->mmio_addr;
931 u32 status;
932
933 cmd->supported =
934 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
935 cmd->port = PORT_FIBRE;
936 cmd->transceiver = XCVR_INTERNAL;
937
938 status = RTL_R32(TBICSR);
939 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
940 cmd->autoneg = !!(status & TBINwEnable);
941
942 cmd->speed = SPEED_1000;
943 cmd->duplex = DUPLEX_FULL; /* Always set */
944}
945
946static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
947{
948 struct rtl8169_private *tp = netdev_priv(dev);
949 void __iomem *ioaddr = tp->mmio_addr;
950 u8 status;
951
952 cmd->supported = SUPPORTED_10baseT_Half |
953 SUPPORTED_10baseT_Full |
954 SUPPORTED_100baseT_Half |
955 SUPPORTED_100baseT_Full |
956 SUPPORTED_1000baseT_Full |
957 SUPPORTED_Autoneg |
5b0384f4 958 SUPPORTED_TP;
1da177e4
LT
959
960 cmd->autoneg = 1;
961 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
962
64e4bfb4 963 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 964 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 965 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 966 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 967 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 968 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 969 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 970 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 971 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
972 cmd->advertising |= ADVERTISED_1000baseT_Full;
973
974 status = RTL_R8(PHYstatus);
975
976 if (status & _1000bpsF)
977 cmd->speed = SPEED_1000;
978 else if (status & _100bps)
979 cmd->speed = SPEED_100;
980 else if (status & _10bps)
981 cmd->speed = SPEED_10;
982
623a1593
FR
983 if (status & TxFlowCtrl)
984 cmd->advertising |= ADVERTISED_Asym_Pause;
985 if (status & RxFlowCtrl)
986 cmd->advertising |= ADVERTISED_Pause;
987
1da177e4
LT
988 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
989 DUPLEX_FULL : DUPLEX_HALF;
990}
991
992static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
993{
994 struct rtl8169_private *tp = netdev_priv(dev);
995 unsigned long flags;
996
997 spin_lock_irqsave(&tp->lock, flags);
998
999 tp->get_settings(dev, cmd);
1000
1001 spin_unlock_irqrestore(&tp->lock, flags);
1002 return 0;
1003}
1004
1005static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1006 void *p)
1007{
5b0384f4
FR
1008 struct rtl8169_private *tp = netdev_priv(dev);
1009 unsigned long flags;
1da177e4 1010
5b0384f4
FR
1011 if (regs->len > R8169_REGS_SIZE)
1012 regs->len = R8169_REGS_SIZE;
1da177e4 1013
5b0384f4
FR
1014 spin_lock_irqsave(&tp->lock, flags);
1015 memcpy_fromio(p, tp->mmio_addr, regs->len);
1016 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1017}
1018
b57b7e5a
SH
1019static u32 rtl8169_get_msglevel(struct net_device *dev)
1020{
1021 struct rtl8169_private *tp = netdev_priv(dev);
1022
1023 return tp->msg_enable;
1024}
1025
1026static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1027{
1028 struct rtl8169_private *tp = netdev_priv(dev);
1029
1030 tp->msg_enable = value;
1031}
1032
d4a3a0fc
SH
1033static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1034 "tx_packets",
1035 "rx_packets",
1036 "tx_errors",
1037 "rx_errors",
1038 "rx_missed",
1039 "align_errors",
1040 "tx_single_collisions",
1041 "tx_multi_collisions",
1042 "unicast",
1043 "broadcast",
1044 "multicast",
1045 "tx_aborted",
1046 "tx_underrun",
1047};
1048
1049struct rtl8169_counters {
1050 u64 tx_packets;
1051 u64 rx_packets;
1052 u64 tx_errors;
1053 u32 rx_errors;
1054 u16 rx_missed;
1055 u16 align_errors;
1056 u32 tx_one_collision;
1057 u32 tx_multi_collision;
1058 u64 rx_unicast;
1059 u64 rx_broadcast;
1060 u32 rx_multicast;
1061 u16 tx_aborted;
1062 u16 tx_underun;
1063};
1064
1065static int rtl8169_get_stats_count(struct net_device *dev)
1066{
1067 return ARRAY_SIZE(rtl8169_gstrings);
1068}
1069
1070static void rtl8169_get_ethtool_stats(struct net_device *dev,
1071 struct ethtool_stats *stats, u64 *data)
1072{
1073 struct rtl8169_private *tp = netdev_priv(dev);
1074 void __iomem *ioaddr = tp->mmio_addr;
1075 struct rtl8169_counters *counters;
1076 dma_addr_t paddr;
1077 u32 cmd;
1078
1079 ASSERT_RTNL();
1080
1081 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1082 if (!counters)
1083 return;
1084
1085 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1086 cmd = (u64)paddr & DMA_32BIT_MASK;
1087 RTL_W32(CounterAddrLow, cmd);
1088 RTL_W32(CounterAddrLow, cmd | CounterDump);
1089
1090 while (RTL_R32(CounterAddrLow) & CounterDump) {
1091 if (msleep_interruptible(1))
1092 break;
1093 }
1094
1095 RTL_W32(CounterAddrLow, 0);
1096 RTL_W32(CounterAddrHigh, 0);
1097
5b0384f4 1098 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1099 data[1] = le64_to_cpu(counters->rx_packets);
1100 data[2] = le64_to_cpu(counters->tx_errors);
1101 data[3] = le32_to_cpu(counters->rx_errors);
1102 data[4] = le16_to_cpu(counters->rx_missed);
1103 data[5] = le16_to_cpu(counters->align_errors);
1104 data[6] = le32_to_cpu(counters->tx_one_collision);
1105 data[7] = le32_to_cpu(counters->tx_multi_collision);
1106 data[8] = le64_to_cpu(counters->rx_unicast);
1107 data[9] = le64_to_cpu(counters->rx_broadcast);
1108 data[10] = le32_to_cpu(counters->rx_multicast);
1109 data[11] = le16_to_cpu(counters->tx_aborted);
1110 data[12] = le16_to_cpu(counters->tx_underun);
1111
1112 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1113}
1114
1115static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1116{
1117 switch(stringset) {
1118 case ETH_SS_STATS:
1119 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1120 break;
1121 }
1122}
1123
1124
7282d491 1125static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1126 .get_drvinfo = rtl8169_get_drvinfo,
1127 .get_regs_len = rtl8169_get_regs_len,
1128 .get_link = ethtool_op_get_link,
1129 .get_settings = rtl8169_get_settings,
1130 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1131 .get_msglevel = rtl8169_get_msglevel,
1132 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1133 .get_rx_csum = rtl8169_get_rx_csum,
1134 .set_rx_csum = rtl8169_set_rx_csum,
1135 .get_tx_csum = ethtool_op_get_tx_csum,
1136 .set_tx_csum = ethtool_op_set_tx_csum,
1137 .get_sg = ethtool_op_get_sg,
1138 .set_sg = ethtool_op_set_sg,
1139 .get_tso = ethtool_op_get_tso,
1140 .set_tso = ethtool_op_set_tso,
1141 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1142 .get_wol = rtl8169_get_wol,
1143 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1144 .get_strings = rtl8169_get_strings,
1145 .get_stats_count = rtl8169_get_stats_count,
1146 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1147 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1148};
1149
1150static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1151 int bitval)
1152{
1153 int val;
1154
1155 val = mdio_read(ioaddr, reg);
1156 val = (bitval == 1) ?
1157 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1158 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1159}
1160
1161static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1162{
1163 const struct {
1164 u32 mask;
1165 int mac_version;
1166 } mac_info[] = {
bcf0bf90
FR
1167 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1168 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1169 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1170 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1171 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1172 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1173 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1174 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1175 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1176 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1177 }, *p = mac_info;
1178 u32 reg;
1179
1180 reg = RTL_R32(TxConfig) & 0x7c800000;
1181 while ((reg & p->mask) != p->mask)
1182 p++;
1183 tp->mac_version = p->mac_version;
1184}
1185
1186static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1187{
bcf0bf90 1188 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1189}
1190
1191static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1192{
1193 const struct {
1194 u16 mask;
1195 u16 set;
1196 int phy_version;
1197 } phy_info[] = {
1198 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1199 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1200 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1201 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1202 }, *p = phy_info;
1203 u16 reg;
1204
64e4bfb4 1205 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1206 while ((reg & p->mask) != p->set)
1207 p++;
1208 tp->phy_version = p->phy_version;
1209}
1210
1211static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1212{
1213 struct {
1214 int version;
1215 char *msg;
1216 u32 reg;
1217 } phy_print[] = {
1218 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1219 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1220 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1221 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1222 { 0, NULL, 0x0000 }
1223 }, *p;
1224
1225 for (p = phy_print; p->msg; p++) {
1226 if (tp->phy_version == p->version) {
1227 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1228 return;
1229 }
1230 }
1231 dprintk("phy_version == Unknown\n");
1232}
1233
1234static void rtl8169_hw_phy_config(struct net_device *dev)
1235{
1236 struct rtl8169_private *tp = netdev_priv(dev);
1237 void __iomem *ioaddr = tp->mmio_addr;
1238 struct {
1239 u16 regs[5]; /* Beware of bit-sign propagation */
1240 } phy_magic[5] = { {
1241 { 0x0000, //w 4 15 12 0
1242 0x00a1, //w 3 15 0 00a1
1243 0x0008, //w 2 15 0 0008
1244 0x1020, //w 1 15 0 1020
1245 0x1000 } },{ //w 0 15 0 1000
1246 { 0x7000, //w 4 15 12 7
1247 0xff41, //w 3 15 0 ff41
1248 0xde60, //w 2 15 0 de60
1249 0x0140, //w 1 15 0 0140
1250 0x0077 } },{ //w 0 15 0 0077
1251 { 0xa000, //w 4 15 12 a
1252 0xdf01, //w 3 15 0 df01
1253 0xdf20, //w 2 15 0 df20
1254 0xff95, //w 1 15 0 ff95
1255 0xfa00 } },{ //w 0 15 0 fa00
1256 { 0xb000, //w 4 15 12 b
1257 0xff41, //w 3 15 0 ff41
1258 0xde20, //w 2 15 0 de20
1259 0x0140, //w 1 15 0 0140
1260 0x00bb } },{ //w 0 15 0 00bb
1261 { 0xf000, //w 4 15 12 f
1262 0xdf01, //w 3 15 0 df01
1263 0xdf20, //w 2 15 0 df20
1264 0xff95, //w 1 15 0 ff95
1265 0xbf00 } //w 0 15 0 bf00
1266 }
1267 }, *p = phy_magic;
1268 int i;
1269
1270 rtl8169_print_mac_version(tp);
1271 rtl8169_print_phy_version(tp);
1272
bcf0bf90 1273 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1274 return;
1275 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1276 return;
1277
1278 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1279 dprintk("Do final_reg2.cfg\n");
1280
1281 /* Shazam ! */
1282
bcf0bf90 1283 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1284 mdio_write(ioaddr, 31, 0x0002);
1285 mdio_write(ioaddr, 1, 0x90d0);
1286 mdio_write(ioaddr, 31, 0x0000);
1287 return;
1288 }
1289
1290 /* phy config for RTL8169s mac_version C chip */
1291 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1292 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1293 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1294 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1295
1296 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1297 int val, pos = 4;
1298
1299 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1300 mdio_write(ioaddr, pos, val);
1301 while (--pos >= 0)
1302 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1303 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1304 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1305 }
1306 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1307}
1308
1309static void rtl8169_phy_timer(unsigned long __opaque)
1310{
1311 struct net_device *dev = (struct net_device *)__opaque;
1312 struct rtl8169_private *tp = netdev_priv(dev);
1313 struct timer_list *timer = &tp->timer;
1314 void __iomem *ioaddr = tp->mmio_addr;
1315 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1316
bcf0bf90 1317 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1318 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1319
64e4bfb4 1320 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1321 return;
1322
1323 spin_lock_irq(&tp->lock);
1324
1325 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1326 /*
1da177e4
LT
1327 * A busy loop could burn quite a few cycles on nowadays CPU.
1328 * Let's delay the execution of the timer for a few ticks.
1329 */
1330 timeout = HZ/10;
1331 goto out_mod_timer;
1332 }
1333
1334 if (tp->link_ok(ioaddr))
1335 goto out_unlock;
1336
b57b7e5a
SH
1337 if (netif_msg_link(tp))
1338 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1339
1340 tp->phy_reset_enable(ioaddr);
1341
1342out_mod_timer:
1343 mod_timer(timer, jiffies + timeout);
1344out_unlock:
1345 spin_unlock_irq(&tp->lock);
1346}
1347
1348static inline void rtl8169_delete_timer(struct net_device *dev)
1349{
1350 struct rtl8169_private *tp = netdev_priv(dev);
1351 struct timer_list *timer = &tp->timer;
1352
bcf0bf90 1353 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1354 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1355 return;
1356
1357 del_timer_sync(timer);
1358}
1359
1360static inline void rtl8169_request_timer(struct net_device *dev)
1361{
1362 struct rtl8169_private *tp = netdev_priv(dev);
1363 struct timer_list *timer = &tp->timer;
1364
bcf0bf90 1365 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1366 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1367 return;
1368
2efa53f3 1369 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1da177e4
LT
1370}
1371
1372#ifdef CONFIG_NET_POLL_CONTROLLER
1373/*
1374 * Polling 'interrupt' - used by things like netconsole to send skbs
1375 * without having to re-enable interrupts. It's not called while
1376 * the interrupt routine is executing.
1377 */
1378static void rtl8169_netpoll(struct net_device *dev)
1379{
1380 struct rtl8169_private *tp = netdev_priv(dev);
1381 struct pci_dev *pdev = tp->pci_dev;
1382
1383 disable_irq(pdev->irq);
7d12e780 1384 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1385 enable_irq(pdev->irq);
1386}
1387#endif
1388
1389static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1390 void __iomem *ioaddr)
1391{
1392 iounmap(ioaddr);
1393 pci_release_regions(pdev);
1394 pci_disable_device(pdev);
1395 free_netdev(dev);
1396}
1397
bf793295
FR
1398static void rtl8169_phy_reset(struct net_device *dev,
1399 struct rtl8169_private *tp)
1400{
1401 void __iomem *ioaddr = tp->mmio_addr;
1402 int i;
1403
1404 tp->phy_reset_enable(ioaddr);
1405 for (i = 0; i < 100; i++) {
1406 if (!tp->phy_reset_pending(ioaddr))
1407 return;
1408 msleep(1);
1409 }
1410 if (netif_msg_link(tp))
1411 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1412}
1413
4ff96fa6
FR
1414static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1415{
1416 void __iomem *ioaddr = tp->mmio_addr;
1417 static int board_idx = -1;
1418 u8 autoneg, duplex;
1419 u16 speed;
1420
1421 board_idx++;
1422
1423 rtl8169_hw_phy_config(dev);
1424
1425 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1426 RTL_W8(0x82, 0x01);
1427
bcf0bf90 1428 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1429 dprintk("Set PCI Latency=0x40\n");
1430 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1431 }
1432
bcf0bf90 1433 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1434 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1435 RTL_W8(0x82, 0x01);
1436 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1437 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1438 }
1439
1440 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1441
bf793295
FR
1442 rtl8169_phy_reset(dev, tp);
1443
4ff96fa6
FR
1444 rtl8169_set_speed(dev, autoneg, speed, duplex);
1445
1446 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1447 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1448}
1449
5f787a1a
FR
1450static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1451{
1452 struct rtl8169_private *tp = netdev_priv(dev);
1453 struct mii_ioctl_data *data = if_mii(ifr);
1454
1455 if (!netif_running(dev))
1456 return -ENODEV;
1457
1458 switch (cmd) {
1459 case SIOCGMIIPHY:
1460 data->phy_id = 32; /* Internal PHY */
1461 return 0;
1462
1463 case SIOCGMIIREG:
1464 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1465 return 0;
1466
1467 case SIOCSMIIREG:
1468 if (!capable(CAP_NET_ADMIN))
1469 return -EPERM;
1470 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1471 return 0;
1472 }
1473 return -EOPNOTSUPP;
1474}
1475
1da177e4 1476static int __devinit
4ff96fa6 1477rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1478{
bcf0bf90 1479 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1480 struct rtl8169_private *tp;
4ff96fa6
FR
1481 struct net_device *dev;
1482 void __iomem *ioaddr;
315917d2
FR
1483 unsigned int pm_cap;
1484 int i, rc;
1da177e4 1485
4ff96fa6
FR
1486 if (netif_msg_drv(&debug)) {
1487 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1488 MODULENAME, RTL8169_VERSION);
1489 }
1da177e4 1490
1da177e4 1491 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1492 if (!dev) {
b57b7e5a 1493 if (netif_msg_drv(&debug))
9b91cf9d 1494 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1495 rc = -ENOMEM;
1496 goto out;
1da177e4
LT
1497 }
1498
1499 SET_MODULE_OWNER(dev);
1500 SET_NETDEV_DEV(dev, &pdev->dev);
1501 tp = netdev_priv(dev);
c4028958 1502 tp->dev = dev;
b57b7e5a 1503 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1504
1505 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1506 rc = pci_enable_device(pdev);
b57b7e5a 1507 if (rc < 0) {
2e8a538d 1508 if (netif_msg_probe(tp))
9b91cf9d 1509 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1510 goto err_out_free_dev_1;
1da177e4
LT
1511 }
1512
1513 rc = pci_set_mwi(pdev);
1514 if (rc < 0)
4ff96fa6 1515 goto err_out_disable_2;
1da177e4
LT
1516
1517 /* save power state before pci_enable_device overwrites it */
1518 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1519 if (pm_cap) {
4ff96fa6 1520 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1521
1522 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1523 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1524 } else {
4ff96fa6 1525 if (netif_msg_probe(tp)) {
9b91cf9d 1526 dev_err(&pdev->dev,
4ff96fa6
FR
1527 "PowerManagement capability not found.\n");
1528 }
1da177e4
LT
1529 }
1530
1531 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1532 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1533 if (netif_msg_probe(tp)) {
9b91cf9d 1534 dev_err(&pdev->dev,
bcf0bf90
FR
1535 "region #%d not an MMIO resource, aborting\n",
1536 region);
4ff96fa6 1537 }
1da177e4 1538 rc = -ENODEV;
4ff96fa6 1539 goto err_out_mwi_3;
1da177e4 1540 }
4ff96fa6 1541
1da177e4 1542 /* check for weird/broken PCI region reporting */
bcf0bf90 1543 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1544 if (netif_msg_probe(tp)) {
9b91cf9d 1545 dev_err(&pdev->dev,
4ff96fa6
FR
1546 "Invalid PCI region size(s), aborting\n");
1547 }
1da177e4 1548 rc = -ENODEV;
4ff96fa6 1549 goto err_out_mwi_3;
1da177e4
LT
1550 }
1551
1552 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1553 if (rc < 0) {
2e8a538d 1554 if (netif_msg_probe(tp))
9b91cf9d 1555 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1556 goto err_out_mwi_3;
1da177e4
LT
1557 }
1558
1559 tp->cp_cmd = PCIMulRW | RxChkSum;
1560
1561 if ((sizeof(dma_addr_t) > 4) &&
1562 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1563 tp->cp_cmd |= PCIDAC;
1564 dev->features |= NETIF_F_HIGHDMA;
1565 } else {
1566 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1567 if (rc < 0) {
4ff96fa6 1568 if (netif_msg_probe(tp)) {
9b91cf9d 1569 dev_err(&pdev->dev,
4ff96fa6
FR
1570 "DMA configuration failed.\n");
1571 }
1572 goto err_out_free_res_4;
1da177e4
LT
1573 }
1574 }
1575
1576 pci_set_master(pdev);
1577
1578 /* ioremap MMIO region */
bcf0bf90 1579 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1580 if (!ioaddr) {
b57b7e5a 1581 if (netif_msg_probe(tp))
9b91cf9d 1582 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1583 rc = -EIO;
4ff96fa6 1584 goto err_out_free_res_4;
1da177e4
LT
1585 }
1586
1587 /* Unneeded ? Don't mess with Mrs. Murphy. */
1588 rtl8169_irq_mask_and_ack(ioaddr);
1589
1590 /* Soft reset the chip. */
1591 RTL_W8(ChipCmd, CmdReset);
1592
1593 /* Check that the chip has finished the reset. */
b518fa8e 1594 for (i = 100; i > 0; i--) {
1da177e4
LT
1595 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1596 break;
b518fa8e 1597 msleep_interruptible(1);
1da177e4
LT
1598 }
1599
1600 /* Identify chip attached to board */
1601 rtl8169_get_mac_version(tp, ioaddr);
1602 rtl8169_get_phy_version(tp, ioaddr);
1603
1604 rtl8169_print_mac_version(tp);
1605 rtl8169_print_phy_version(tp);
1606
1607 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1608 if (tp->mac_version == rtl_chip_info[i].mac_version)
1609 break;
1610 }
1611 if (i < 0) {
1612 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1613 if (netif_msg_probe(tp)) {
2e8a538d 1614 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1615 "unknown chip version, assuming %s\n",
1616 rtl_chip_info[0].name);
b57b7e5a 1617 }
1da177e4
LT
1618 i++;
1619 }
1620 tp->chipset = i;
1621
5d06a99f
FR
1622 RTL_W8(Cfg9346, Cfg9346_Unlock);
1623 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1624 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1625 RTL_W8(Cfg9346, Cfg9346_Lock);
1626
1da177e4
LT
1627 if (RTL_R8(PHYstatus) & TBI_Enable) {
1628 tp->set_speed = rtl8169_set_speed_tbi;
1629 tp->get_settings = rtl8169_gset_tbi;
1630 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1631 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1632 tp->link_ok = rtl8169_tbi_link_ok;
1633
64e4bfb4 1634 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1635 } else {
1636 tp->set_speed = rtl8169_set_speed_xmii;
1637 tp->get_settings = rtl8169_gset_xmii;
1638 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1639 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1640 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1641
1642 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1643 }
1644
1645 /* Get MAC address. FIXME: read EEPROM */
1646 for (i = 0; i < MAC_ADDR_LEN; i++)
1647 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1648 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1649
1650 dev->open = rtl8169_open;
1651 dev->hard_start_xmit = rtl8169_start_xmit;
1652 dev->get_stats = rtl8169_get_stats;
1653 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1654 dev->stop = rtl8169_close;
1655 dev->tx_timeout = rtl8169_tx_timeout;
07ce4064 1656 dev->set_multicast_list = rtl_set_rx_mode;
1da177e4
LT
1657 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1658 dev->irq = pdev->irq;
1659 dev->base_addr = (unsigned long) ioaddr;
1660 dev->change_mtu = rtl8169_change_mtu;
1661
1662#ifdef CONFIG_R8169_NAPI
1663 dev->poll = rtl8169_poll;
1664 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1665#endif
1666
1667#ifdef CONFIG_R8169_VLAN
1668 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1669 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1da177e4
LT
1670#endif
1671
1672#ifdef CONFIG_NET_POLL_CONTROLLER
1673 dev->poll_controller = rtl8169_netpoll;
1674#endif
1675
1676 tp->intr_mask = 0xffff;
1677 tp->pci_dev = pdev;
1678 tp->mmio_addr = ioaddr;
bcf0bf90 1679 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4 1680
2efa53f3
FR
1681 init_timer(&tp->timer);
1682 tp->timer.data = (unsigned long) dev;
1683 tp->timer.function = rtl8169_phy_timer;
1684
07ce4064
FR
1685 tp->hw_start = rtl_cfg_info[ent->driver_data].hw_start;
1686
1da177e4
LT
1687 spin_lock_init(&tp->lock);
1688
1689 rc = register_netdev(dev);
4ff96fa6
FR
1690 if (rc < 0)
1691 goto err_out_unmap_5;
1da177e4
LT
1692
1693 pci_set_drvdata(pdev, dev);
1694
b57b7e5a
SH
1695 if (netif_msg_probe(tp)) {
1696 printk(KERN_INFO "%s: %s at 0x%lx, "
1697 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1698 "IRQ %d\n",
1699 dev->name,
bcf0bf90 1700 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1701 dev->base_addr,
1702 dev->dev_addr[0], dev->dev_addr[1],
1703 dev->dev_addr[2], dev->dev_addr[3],
1704 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1705 }
1da177e4 1706
4ff96fa6 1707 rtl8169_init_phy(dev, tp);
1da177e4 1708
4ff96fa6
FR
1709out:
1710 return rc;
1da177e4 1711
4ff96fa6
FR
1712err_out_unmap_5:
1713 iounmap(ioaddr);
1714err_out_free_res_4:
1715 pci_release_regions(pdev);
1716err_out_mwi_3:
1717 pci_clear_mwi(pdev);
1718err_out_disable_2:
1719 pci_disable_device(pdev);
1720err_out_free_dev_1:
1721 free_netdev(dev);
1722 goto out;
1da177e4
LT
1723}
1724
1725static void __devexit
1726rtl8169_remove_one(struct pci_dev *pdev)
1727{
1728 struct net_device *dev = pci_get_drvdata(pdev);
1729 struct rtl8169_private *tp = netdev_priv(dev);
1730
1731 assert(dev != NULL);
1732 assert(tp != NULL);
1733
eb2a021c
FR
1734 flush_scheduled_work();
1735
1da177e4
LT
1736 unregister_netdev(dev);
1737 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1738 pci_set_drvdata(pdev, NULL);
1739}
1740
1da177e4
LT
1741static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1742 struct net_device *dev)
1743{
1744 unsigned int mtu = dev->mtu;
1745
1746 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1747}
1748
1749static int rtl8169_open(struct net_device *dev)
1750{
1751 struct rtl8169_private *tp = netdev_priv(dev);
1752 struct pci_dev *pdev = tp->pci_dev;
99f252b0 1753 int retval = -ENOMEM;
1da177e4 1754
1da177e4 1755
99f252b0 1756 rtl8169_set_rxbufsize(tp, dev);
1da177e4
LT
1757
1758 /*
1759 * Rx and Tx desscriptors needs 256 bytes alignment.
1760 * pci_alloc_consistent provides more.
1761 */
1762 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1763 &tp->TxPhyAddr);
1764 if (!tp->TxDescArray)
99f252b0 1765 goto out;
1da177e4
LT
1766
1767 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1768 &tp->RxPhyAddr);
1769 if (!tp->RxDescArray)
99f252b0 1770 goto err_free_tx_0;
1da177e4
LT
1771
1772 retval = rtl8169_init_ring(dev);
1773 if (retval < 0)
99f252b0 1774 goto err_free_rx_1;
1da177e4 1775
c4028958 1776 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4 1777
99f252b0
FR
1778 smp_mb();
1779
1780 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED,
1781 dev->name, dev);
1782 if (retval < 0)
1783 goto err_release_ring_2;
1784
07ce4064 1785 rtl_hw_start(dev);
1da177e4
LT
1786
1787 rtl8169_request_timer(dev);
1788
1789 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1790out:
1791 return retval;
1792
99f252b0
FR
1793err_release_ring_2:
1794 rtl8169_rx_clear(tp);
1795err_free_rx_1:
1da177e4
LT
1796 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1797 tp->RxPhyAddr);
99f252b0 1798err_free_tx_0:
1da177e4
LT
1799 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1800 tp->TxPhyAddr);
1da177e4
LT
1801 goto out;
1802}
1803
1804static void rtl8169_hw_reset(void __iomem *ioaddr)
1805{
1806 /* Disable interrupts */
1807 rtl8169_irq_mask_and_ack(ioaddr);
1808
1809 /* Reset the chipset */
1810 RTL_W8(ChipCmd, CmdReset);
1811
1812 /* PCI commit */
1813 RTL_R8(ChipCmd);
1814}
1815
7f796d83 1816static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
9cb427b6
FR
1817{
1818 void __iomem *ioaddr = tp->mmio_addr;
1819 u32 cfg = rtl8169_rx_config;
1820
1821 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1822 RTL_W32(RxConfig, cfg);
1823
1824 /* Set DMA burst size and Interframe Gap Time */
1825 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1826 (InterFrameGap << TxInterFrameGapShift));
1827}
1828
07ce4064 1829static void rtl_hw_start(struct net_device *dev)
1da177e4
LT
1830{
1831 struct rtl8169_private *tp = netdev_priv(dev);
1832 void __iomem *ioaddr = tp->mmio_addr;
1833 u32 i;
1834
1835 /* Soft reset the chip. */
1836 RTL_W8(ChipCmd, CmdReset);
1837
1838 /* Check that the chip has finished the reset. */
b518fa8e 1839 for (i = 100; i > 0; i--) {
1da177e4
LT
1840 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1841 break;
b518fa8e 1842 msleep_interruptible(1);
1da177e4
LT
1843 }
1844
07ce4064
FR
1845 tp->hw_start(dev);
1846
1847 /* Enable all known interrupts by setting the interrupt mask. */
1848 RTL_W16(IntrMask, rtl8169_intr_mask);
1849
1850 netif_start_queue(dev);
1851}
1852
1853
7f796d83
FR
1854static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
1855 void __iomem *ioaddr)
1856{
1857 /*
1858 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1859 * register to be written before TxDescAddrLow to work.
1860 * Switching from MMIO to I/O access fixes the issue as well.
1861 */
1862 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
1863 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_32BIT_MASK);
1864 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
1865 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_32BIT_MASK);
1866}
1867
1868static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
1869{
1870 u16 cmd;
1871
1872 cmd = RTL_R16(CPlusCmd);
1873 RTL_W16(CPlusCmd, cmd);
1874 return cmd;
1875}
1876
1877static void rtl_set_rx_max_size(void __iomem *ioaddr)
1878{
1879 /* Low hurts. Let's disable the filtering. */
1880 RTL_W16(RxMaxSize, 16383);
1881}
1882
07ce4064
FR
1883static void rtl_hw_start_8169(struct net_device *dev)
1884{
1885 struct rtl8169_private *tp = netdev_priv(dev);
1886 void __iomem *ioaddr = tp->mmio_addr;
1887 struct pci_dev *pdev = tp->pci_dev;
1888 u16 cmd;
1889
9cb427b6
FR
1890 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1891 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1892 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1893 }
1894
bcf0bf90
FR
1895 /* Undocumented stuff. */
1896 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1897 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1898 if ((RTL_R8(Config2) & 0x07) & 0x01)
1899 RTL_W32(0x7c, 0x0007ffff);
1900
1901 RTL_W32(0x7c, 0x0007ff00);
1902
1903 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1904 cmd = cmd & 0xef;
1905 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1906 }
1907
1908 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1909 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1910 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1911 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1912 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1913 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1914
1da177e4
LT
1915 RTL_W8(EarlyTxThres, EarlyTxThld);
1916
7f796d83 1917 rtl_set_rx_max_size(ioaddr);
1da177e4 1918
9cb427b6
FR
1919 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1920 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1921 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1922 (tp->mac_version == RTL_GIGA_MAC_VER_04))
7f796d83 1923 rtl_set_rx_tx_config_registers(tp);
1da177e4 1924
7f796d83 1925 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1da177e4 1926
bcf0bf90
FR
1927 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1928 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1929 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1930 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1931 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1932 }
1933
bcf0bf90
FR
1934 RTL_W16(CPlusCmd, tp->cp_cmd);
1935
1da177e4
LT
1936 /*
1937 * Undocumented corner. Supposedly:
1938 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1939 */
1940 RTL_W16(IntrMitigate, 0x0000);
1941
7f796d83 1942 rtl_set_rx_tx_desc_registers(tp, ioaddr);
9cb427b6
FR
1943
1944 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1945 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1946 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1947 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1948 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
7f796d83 1949 rtl_set_rx_tx_config_registers(tp);
9cb427b6
FR
1950 }
1951
1da177e4 1952 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1953
1954 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1955 RTL_R8(IntrMask);
1da177e4
LT
1956
1957 RTL_W32(RxMissed, 0);
1958
07ce4064 1959 rtl_set_rx_mode(dev);
1da177e4
LT
1960
1961 /* no early-rx interrupts */
1962 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 1963}
1da177e4 1964
07ce4064
FR
1965static void rtl_hw_start_8168(struct net_device *dev)
1966{
2dd99530
FR
1967 struct rtl8169_private *tp = netdev_priv(dev);
1968 void __iomem *ioaddr = tp->mmio_addr;
1969
1970 RTL_W8(Cfg9346, Cfg9346_Unlock);
1971
1972 RTL_W8(EarlyTxThres, EarlyTxThld);
1973
1974 rtl_set_rx_max_size(ioaddr);
1975
1976 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1977
1978 RTL_W16(CPlusCmd, tp->cp_cmd);
1979
1980 RTL_W16(IntrMitigate, 0x0000);
1981
1982 rtl_set_rx_tx_desc_registers(tp, ioaddr);
1983
1984 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1985 rtl_set_rx_tx_config_registers(tp);
1986
1987 RTL_W8(Cfg9346, Cfg9346_Lock);
1988
1989 RTL_R8(IntrMask);
1990
1991 RTL_W32(RxMissed, 0);
1992
1993 rtl_set_rx_mode(dev);
1994
1995 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 1996}
1da177e4 1997
07ce4064
FR
1998static void rtl_hw_start_8101(struct net_device *dev)
1999{
cdf1a608
FR
2000 struct rtl8169_private *tp = netdev_priv(dev);
2001 void __iomem *ioaddr = tp->mmio_addr;
2002 struct pci_dev *pdev = tp->pci_dev;
2003
2004 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
2005 pci_write_config_word(pdev, 0x68, 0x00);
2006 pci_write_config_word(pdev, 0x69, 0x08);
2007 }
2008
2009 RTL_W8(Cfg9346, Cfg9346_Unlock);
2010
2011 RTL_W8(EarlyTxThres, EarlyTxThld);
2012
2013 rtl_set_rx_max_size(ioaddr);
2014
2015 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
2016
2017 RTL_W16(CPlusCmd, tp->cp_cmd);
2018
2019 RTL_W16(IntrMitigate, 0x0000);
2020
2021 rtl_set_rx_tx_desc_registers(tp, ioaddr);
2022
2023 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
2024 rtl_set_rx_tx_config_registers(tp);
2025
2026 RTL_W8(Cfg9346, Cfg9346_Lock);
2027
2028 RTL_R8(IntrMask);
2029
2030 RTL_W32(RxMissed, 0);
2031
2032 rtl_set_rx_mode(dev);
2033
2034 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
1da177e4
LT
2035}
2036
2037static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
2038{
2039 struct rtl8169_private *tp = netdev_priv(dev);
2040 int ret = 0;
2041
2042 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
2043 return -EINVAL;
2044
2045 dev->mtu = new_mtu;
2046
2047 if (!netif_running(dev))
2048 goto out;
2049
2050 rtl8169_down(dev);
2051
2052 rtl8169_set_rxbufsize(tp, dev);
2053
2054 ret = rtl8169_init_ring(dev);
2055 if (ret < 0)
2056 goto out;
2057
2058 netif_poll_enable(dev);
2059
07ce4064 2060 rtl_hw_start(dev);
1da177e4
LT
2061
2062 rtl8169_request_timer(dev);
2063
2064out:
2065 return ret;
2066}
2067
2068static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
2069{
2070 desc->addr = 0x0badbadbadbadbadull;
2071 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
2072}
2073
2074static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
2075 struct sk_buff **sk_buff, struct RxDesc *desc)
2076{
2077 struct pci_dev *pdev = tp->pci_dev;
2078
2079 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
2080 PCI_DMA_FROMDEVICE);
2081 dev_kfree_skb(*sk_buff);
2082 *sk_buff = NULL;
2083 rtl8169_make_unusable_by_asic(desc);
2084}
2085
2086static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
2087{
2088 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
2089
2090 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
2091}
2092
2093static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2094 u32 rx_buf_sz)
2095{
2096 desc->addr = cpu_to_le64(mapping);
2097 wmb();
2098 rtl8169_mark_to_asic(desc, rx_buf_sz);
2099}
2100
15d31758
SH
2101static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2102 struct net_device *dev,
2103 struct RxDesc *desc, int rx_buf_sz,
2104 unsigned int align)
1da177e4
LT
2105{
2106 struct sk_buff *skb;
2107 dma_addr_t mapping;
1da177e4 2108
15d31758 2109 skb = netdev_alloc_skb(dev, rx_buf_sz + align);
1da177e4
LT
2110 if (!skb)
2111 goto err_out;
2112
dcb92f88 2113 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4 2114
689be439 2115 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2116 PCI_DMA_FROMDEVICE);
2117
2118 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
1da177e4 2119out:
15d31758 2120 return skb;
1da177e4
LT
2121
2122err_out:
1da177e4
LT
2123 rtl8169_make_unusable_by_asic(desc);
2124 goto out;
2125}
2126
2127static void rtl8169_rx_clear(struct rtl8169_private *tp)
2128{
2129 int i;
2130
2131 for (i = 0; i < NUM_RX_DESC; i++) {
2132 if (tp->Rx_skbuff[i]) {
2133 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2134 tp->RxDescArray + i);
2135 }
2136 }
2137}
2138
2139static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2140 u32 start, u32 end)
2141{
2142 u32 cur;
5b0384f4 2143
4ae47c2d 2144 for (cur = start; end - cur != 0; cur++) {
15d31758
SH
2145 struct sk_buff *skb;
2146 unsigned int i = cur % NUM_RX_DESC;
1da177e4 2147
4ae47c2d
FR
2148 WARN_ON((s32)(end - cur) < 0);
2149
1da177e4
LT
2150 if (tp->Rx_skbuff[i])
2151 continue;
bcf0bf90 2152
15d31758
SH
2153 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2154 tp->RxDescArray + i,
2155 tp->rx_buf_sz, tp->align);
2156 if (!skb)
1da177e4 2157 break;
15d31758
SH
2158
2159 tp->Rx_skbuff[i] = skb;
1da177e4
LT
2160 }
2161 return cur - start;
2162}
2163
2164static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2165{
2166 desc->opts1 |= cpu_to_le32(RingEnd);
2167}
2168
2169static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2170{
2171 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2172}
2173
2174static int rtl8169_init_ring(struct net_device *dev)
2175{
2176 struct rtl8169_private *tp = netdev_priv(dev);
2177
2178 rtl8169_init_ring_indexes(tp);
2179
2180 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2181 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2182
2183 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2184 goto err_out;
2185
2186 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2187
2188 return 0;
2189
2190err_out:
2191 rtl8169_rx_clear(tp);
2192 return -ENOMEM;
2193}
2194
2195static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2196 struct TxDesc *desc)
2197{
2198 unsigned int len = tx_skb->len;
2199
2200 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2201 desc->opts1 = 0x00;
2202 desc->opts2 = 0x00;
2203 desc->addr = 0x00;
2204 tx_skb->len = 0;
2205}
2206
2207static void rtl8169_tx_clear(struct rtl8169_private *tp)
2208{
2209 unsigned int i;
2210
2211 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2212 unsigned int entry = i % NUM_TX_DESC;
2213 struct ring_info *tx_skb = tp->tx_skb + entry;
2214 unsigned int len = tx_skb->len;
2215
2216 if (len) {
2217 struct sk_buff *skb = tx_skb->skb;
2218
2219 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2220 tp->TxDescArray + entry);
2221 if (skb) {
2222 dev_kfree_skb(skb);
2223 tx_skb->skb = NULL;
2224 }
2225 tp->stats.tx_dropped++;
2226 }
2227 }
2228 tp->cur_tx = tp->dirty_tx = 0;
2229}
2230
c4028958 2231static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2232{
2233 struct rtl8169_private *tp = netdev_priv(dev);
2234
c4028958 2235 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2236 schedule_delayed_work(&tp->task, 4);
2237}
2238
2239static void rtl8169_wait_for_quiescence(struct net_device *dev)
2240{
2241 struct rtl8169_private *tp = netdev_priv(dev);
2242 void __iomem *ioaddr = tp->mmio_addr;
2243
2244 synchronize_irq(dev->irq);
2245
2246 /* Wait for any pending NAPI task to complete */
2247 netif_poll_disable(dev);
2248
2249 rtl8169_irq_mask_and_ack(ioaddr);
2250
2251 netif_poll_enable(dev);
2252}
2253
c4028958 2254static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2255{
c4028958
DH
2256 struct rtl8169_private *tp =
2257 container_of(work, struct rtl8169_private, task.work);
2258 struct net_device *dev = tp->dev;
1da177e4
LT
2259 int ret;
2260
eb2a021c
FR
2261 rtnl_lock();
2262
2263 if (!netif_running(dev))
2264 goto out_unlock;
2265
2266 rtl8169_wait_for_quiescence(dev);
2267 rtl8169_close(dev);
1da177e4
LT
2268
2269 ret = rtl8169_open(dev);
2270 if (unlikely(ret < 0)) {
2271 if (net_ratelimit()) {
b57b7e5a
SH
2272 struct rtl8169_private *tp = netdev_priv(dev);
2273
2274 if (netif_msg_drv(tp)) {
2275 printk(PFX KERN_ERR
2276 "%s: reinit failure (status = %d)."
2277 " Rescheduling.\n", dev->name, ret);
2278 }
1da177e4
LT
2279 }
2280 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2281 }
eb2a021c
FR
2282
2283out_unlock:
2284 rtnl_unlock();
1da177e4
LT
2285}
2286
c4028958 2287static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2288{
c4028958
DH
2289 struct rtl8169_private *tp =
2290 container_of(work, struct rtl8169_private, task.work);
2291 struct net_device *dev = tp->dev;
1da177e4 2292
eb2a021c
FR
2293 rtnl_lock();
2294
1da177e4 2295 if (!netif_running(dev))
eb2a021c 2296 goto out_unlock;
1da177e4
LT
2297
2298 rtl8169_wait_for_quiescence(dev);
2299
2300 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2301 rtl8169_tx_clear(tp);
2302
2303 if (tp->dirty_rx == tp->cur_rx) {
2304 rtl8169_init_ring_indexes(tp);
07ce4064 2305 rtl_hw_start(dev);
1da177e4
LT
2306 netif_wake_queue(dev);
2307 } else {
2308 if (net_ratelimit()) {
b57b7e5a
SH
2309 struct rtl8169_private *tp = netdev_priv(dev);
2310
2311 if (netif_msg_intr(tp)) {
2312 printk(PFX KERN_EMERG
2313 "%s: Rx buffers shortage\n", dev->name);
2314 }
1da177e4
LT
2315 }
2316 rtl8169_schedule_work(dev, rtl8169_reset_task);
2317 }
eb2a021c
FR
2318
2319out_unlock:
2320 rtnl_unlock();
1da177e4
LT
2321}
2322
2323static void rtl8169_tx_timeout(struct net_device *dev)
2324{
2325 struct rtl8169_private *tp = netdev_priv(dev);
2326
2327 rtl8169_hw_reset(tp->mmio_addr);
2328
2329 /* Let's wait a bit while any (async) irq lands on */
2330 rtl8169_schedule_work(dev, rtl8169_reset_task);
2331}
2332
2333static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2334 u32 opts1)
2335{
2336 struct skb_shared_info *info = skb_shinfo(skb);
2337 unsigned int cur_frag, entry;
2338 struct TxDesc *txd;
2339
2340 entry = tp->cur_tx;
2341 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2342 skb_frag_t *frag = info->frags + cur_frag;
2343 dma_addr_t mapping;
2344 u32 status, len;
2345 void *addr;
2346
2347 entry = (entry + 1) % NUM_TX_DESC;
2348
2349 txd = tp->TxDescArray + entry;
2350 len = frag->size;
2351 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2352 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2353
2354 /* anti gcc 2.95.3 bugware (sic) */
2355 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2356
2357 txd->opts1 = cpu_to_le32(status);
2358 txd->addr = cpu_to_le64(mapping);
2359
2360 tp->tx_skb[entry].len = len;
2361 }
2362
2363 if (cur_frag) {
2364 tp->tx_skb[entry].skb = skb;
2365 txd->opts1 |= cpu_to_le32(LastFrag);
2366 }
2367
2368 return cur_frag;
2369}
2370
2371static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2372{
2373 if (dev->features & NETIF_F_TSO) {
7967168c 2374 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2375
2376 if (mss)
2377 return LargeSend | ((mss & MSSMask) << MSSShift);
2378 }
84fa7933 2379 if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 2380 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
2381
2382 if (ip->protocol == IPPROTO_TCP)
2383 return IPCS | TCPCS;
2384 else if (ip->protocol == IPPROTO_UDP)
2385 return IPCS | UDPCS;
2386 WARN_ON(1); /* we need a WARN() */
2387 }
2388 return 0;
2389}
2390
2391static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2392{
2393 struct rtl8169_private *tp = netdev_priv(dev);
2394 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2395 struct TxDesc *txd = tp->TxDescArray + entry;
2396 void __iomem *ioaddr = tp->mmio_addr;
2397 dma_addr_t mapping;
2398 u32 status, len;
2399 u32 opts1;
188f4af0 2400 int ret = NETDEV_TX_OK;
5b0384f4 2401
1da177e4 2402 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2403 if (netif_msg_drv(tp)) {
2404 printk(KERN_ERR
2405 "%s: BUG! Tx Ring full when queue awake!\n",
2406 dev->name);
2407 }
1da177e4
LT
2408 goto err_stop;
2409 }
2410
2411 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2412 goto err_stop;
2413
2414 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2415
2416 frags = rtl8169_xmit_frags(tp, skb, opts1);
2417 if (frags) {
2418 len = skb_headlen(skb);
2419 opts1 |= FirstFrag;
2420 } else {
2421 len = skb->len;
2422
2423 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2424 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2425 goto err_update_stats;
2426 len = ETH_ZLEN;
2427 }
2428
2429 opts1 |= FirstFrag | LastFrag;
2430 tp->tx_skb[entry].skb = skb;
2431 }
2432
2433 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2434
2435 tp->tx_skb[entry].len = len;
2436 txd->addr = cpu_to_le64(mapping);
2437 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2438
2439 wmb();
2440
2441 /* anti gcc 2.95.3 bugware (sic) */
2442 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2443 txd->opts1 = cpu_to_le32(status);
2444
2445 dev->trans_start = jiffies;
2446
2447 tp->cur_tx += frags + 1;
2448
2449 smp_wmb();
2450
2451 RTL_W8(TxPoll, 0x40); /* set polling bit */
2452
2453 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2454 netif_stop_queue(dev);
2455 smp_rmb();
2456 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2457 netif_wake_queue(dev);
2458 }
2459
2460out:
2461 return ret;
2462
2463err_stop:
2464 netif_stop_queue(dev);
188f4af0 2465 ret = NETDEV_TX_BUSY;
1da177e4
LT
2466err_update_stats:
2467 tp->stats.tx_dropped++;
2468 goto out;
2469}
2470
2471static void rtl8169_pcierr_interrupt(struct net_device *dev)
2472{
2473 struct rtl8169_private *tp = netdev_priv(dev);
2474 struct pci_dev *pdev = tp->pci_dev;
2475 void __iomem *ioaddr = tp->mmio_addr;
2476 u16 pci_status, pci_cmd;
2477
2478 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2479 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2480
b57b7e5a
SH
2481 if (netif_msg_intr(tp)) {
2482 printk(KERN_ERR
2483 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2484 dev->name, pci_cmd, pci_status);
2485 }
1da177e4
LT
2486
2487 /*
2488 * The recovery sequence below admits a very elaborated explanation:
2489 * - it seems to work;
d03902b8
FR
2490 * - I did not see what else could be done;
2491 * - it makes iop3xx happy.
1da177e4
LT
2492 *
2493 * Feel free to adjust to your needs.
2494 */
a27993f3 2495 if (pdev->broken_parity_status)
d03902b8
FR
2496 pci_cmd &= ~PCI_COMMAND_PARITY;
2497 else
2498 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2499
2500 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2501
2502 pci_write_config_word(pdev, PCI_STATUS,
2503 pci_status & (PCI_STATUS_DETECTED_PARITY |
2504 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2505 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2506
2507 /* The infamous DAC f*ckup only happens at boot time */
2508 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2509 if (netif_msg_intr(tp))
2510 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2511 tp->cp_cmd &= ~PCIDAC;
2512 RTL_W16(CPlusCmd, tp->cp_cmd);
2513 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2514 }
2515
2516 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2517
2518 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2519}
2520
2521static void
2522rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2523 void __iomem *ioaddr)
2524{
2525 unsigned int dirty_tx, tx_left;
2526
2527 assert(dev != NULL);
2528 assert(tp != NULL);
2529 assert(ioaddr != NULL);
2530
2531 dirty_tx = tp->dirty_tx;
2532 smp_rmb();
2533 tx_left = tp->cur_tx - dirty_tx;
2534
2535 while (tx_left > 0) {
2536 unsigned int entry = dirty_tx % NUM_TX_DESC;
2537 struct ring_info *tx_skb = tp->tx_skb + entry;
2538 u32 len = tx_skb->len;
2539 u32 status;
2540
2541 rmb();
2542 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2543 if (status & DescOwn)
2544 break;
2545
2546 tp->stats.tx_bytes += len;
2547 tp->stats.tx_packets++;
2548
2549 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2550
2551 if (status & LastFrag) {
2552 dev_kfree_skb_irq(tx_skb->skb);
2553 tx_skb->skb = NULL;
2554 }
2555 dirty_tx++;
2556 tx_left--;
2557 }
2558
2559 if (tp->dirty_tx != dirty_tx) {
2560 tp->dirty_tx = dirty_tx;
2561 smp_wmb();
2562 if (netif_queue_stopped(dev) &&
2563 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2564 netif_wake_queue(dev);
2565 }
2566 }
2567}
2568
126fa4b9
FR
2569static inline int rtl8169_fragmented_frame(u32 status)
2570{
2571 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2572}
2573
1da177e4
LT
2574static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2575{
2576 u32 opts1 = le32_to_cpu(desc->opts1);
2577 u32 status = opts1 & RxProtoMask;
2578
2579 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2580 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2581 ((status == RxProtoIP) && !(opts1 & IPFail)))
2582 skb->ip_summed = CHECKSUM_UNNECESSARY;
2583 else
2584 skb->ip_summed = CHECKSUM_NONE;
2585}
2586
b449655f
SH
2587static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2588 struct pci_dev *pdev, dma_addr_t addr,
2589 unsigned int align)
1da177e4 2590{
b449655f
SH
2591 struct sk_buff *skb;
2592 bool done = false;
1da177e4 2593
b449655f
SH
2594 if (pkt_size >= rx_copybreak)
2595 goto out;
1da177e4 2596
b449655f
SH
2597 skb = dev_alloc_skb(pkt_size + align);
2598 if (!skb)
2599 goto out;
2600
2601 pci_dma_sync_single_for_cpu(pdev, addr, pkt_size, PCI_DMA_FROMDEVICE);
2602 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
2603 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
2604 *sk_buff = skb;
2605 done = true;
2606out:
2607 return done;
1da177e4
LT
2608}
2609
2610static int
2611rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2612 void __iomem *ioaddr)
2613{
2614 unsigned int cur_rx, rx_left;
2615 unsigned int delta, count;
2616
2617 assert(dev != NULL);
2618 assert(tp != NULL);
2619 assert(ioaddr != NULL);
2620
2621 cur_rx = tp->cur_rx;
2622 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2623 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2624
4dcb7d33 2625 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2626 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2627 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2628 u32 status;
2629
2630 rmb();
126fa4b9 2631 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2632
2633 if (status & DescOwn)
2634 break;
4dcb7d33 2635 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2636 if (netif_msg_rx_err(tp)) {
2637 printk(KERN_INFO
2638 "%s: Rx ERROR. status = %08x\n",
2639 dev->name, status);
2640 }
1da177e4
LT
2641 tp->stats.rx_errors++;
2642 if (status & (RxRWT | RxRUNT))
2643 tp->stats.rx_length_errors++;
2644 if (status & RxCRC)
2645 tp->stats.rx_crc_errors++;
9dccf611
FR
2646 if (status & RxFOVF) {
2647 rtl8169_schedule_work(dev, rtl8169_reset_task);
2648 tp->stats.rx_fifo_errors++;
2649 }
126fa4b9 2650 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2651 } else {
1da177e4 2652 struct sk_buff *skb = tp->Rx_skbuff[entry];
b449655f 2653 dma_addr_t addr = le64_to_cpu(desc->addr);
1da177e4 2654 int pkt_size = (status & 0x00001FFF) - 4;
b449655f 2655 struct pci_dev *pdev = tp->pci_dev;
1da177e4 2656
126fa4b9
FR
2657 /*
2658 * The driver does not support incoming fragmented
2659 * frames. They are seen as a symptom of over-mtu
2660 * sized frames.
2661 */
2662 if (unlikely(rtl8169_fragmented_frame(status))) {
2663 tp->stats.rx_dropped++;
2664 tp->stats.rx_length_errors++;
2665 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2666 continue;
126fa4b9
FR
2667 }
2668
1da177e4 2669 rtl8169_rx_csum(skb, desc);
bcf0bf90 2670
b449655f
SH
2671 if (rtl8169_try_rx_copy(&skb, pkt_size, pdev, addr,
2672 tp->align)) {
2673 pci_dma_sync_single_for_device(pdev, addr,
2674 pkt_size, PCI_DMA_FROMDEVICE);
2675 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2676 } else {
2677 pci_unmap_single(pdev, addr, pkt_size,
2678 PCI_DMA_FROMDEVICE);
1da177e4
LT
2679 tp->Rx_skbuff[entry] = NULL;
2680 }
2681
1da177e4
LT
2682 skb_put(skb, pkt_size);
2683 skb->protocol = eth_type_trans(skb, dev);
2684
2685 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2686 rtl8169_rx_skb(skb);
2687
2688 dev->last_rx = jiffies;
2689 tp->stats.rx_bytes += pkt_size;
2690 tp->stats.rx_packets++;
2691 }
1da177e4
LT
2692 }
2693
2694 count = cur_rx - tp->cur_rx;
2695 tp->cur_rx = cur_rx;
2696
2697 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2698 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2699 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2700 tp->dirty_rx += delta;
2701
2702 /*
2703 * FIXME: until there is periodic timer to try and refill the ring,
2704 * a temporary shortage may definitely kill the Rx process.
2705 * - disable the asic to try and avoid an overflow and kick it again
2706 * after refill ?
2707 * - how do others driver handle this condition (Uh oh...).
2708 */
b57b7e5a 2709 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2710 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2711
2712 return count;
2713}
2714
2715/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2716static irqreturn_t
7d12e780 2717rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2718{
2719 struct net_device *dev = (struct net_device *) dev_instance;
2720 struct rtl8169_private *tp = netdev_priv(dev);
2721 int boguscnt = max_interrupt_work;
2722 void __iomem *ioaddr = tp->mmio_addr;
2723 int status;
2724 int handled = 0;
2725
2726 do {
2727 status = RTL_R16(IntrStatus);
2728
2729 /* hotplug/major error/no more work/shared irq */
2730 if ((status == 0xFFFF) || !status)
2731 break;
2732
2733 handled = 1;
2734
2735 if (unlikely(!netif_running(dev))) {
2736 rtl8169_asic_down(ioaddr);
2737 goto out;
2738 }
2739
2740 status &= tp->intr_mask;
2741 RTL_W16(IntrStatus,
2742 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2743
2744 if (!(status & rtl8169_intr_mask))
2745 break;
2746
2747 if (unlikely(status & SYSErr)) {
2748 rtl8169_pcierr_interrupt(dev);
2749 break;
2750 }
2751
2752 if (status & LinkChg)
2753 rtl8169_check_link_status(dev, tp, ioaddr);
2754
2755#ifdef CONFIG_R8169_NAPI
2756 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2757 tp->intr_mask = ~rtl8169_napi_event;
2758
2759 if (likely(netif_rx_schedule_prep(dev)))
2760 __netif_rx_schedule(dev);
b57b7e5a 2761 else if (netif_msg_intr(tp)) {
1da177e4 2762 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2763 dev->name, status);
1da177e4
LT
2764 }
2765 break;
2766#else
2767 /* Rx interrupt */
2768 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2769 rtl8169_rx_interrupt(dev, tp, ioaddr);
2770 }
2771 /* Tx interrupt */
2772 if (status & (TxOK | TxErr))
2773 rtl8169_tx_interrupt(dev, tp, ioaddr);
2774#endif
2775
2776 boguscnt--;
2777 } while (boguscnt > 0);
2778
2779 if (boguscnt <= 0) {
7c8b2eb4 2780 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2781 printk(KERN_WARNING
2782 "%s: Too much work at interrupt!\n", dev->name);
2783 }
1da177e4
LT
2784 /* Clear all interrupt sources. */
2785 RTL_W16(IntrStatus, 0xffff);
2786 }
2787out:
2788 return IRQ_RETVAL(handled);
2789}
2790
2791#ifdef CONFIG_R8169_NAPI
2792static int rtl8169_poll(struct net_device *dev, int *budget)
2793{
2794 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2795 struct rtl8169_private *tp = netdev_priv(dev);
2796 void __iomem *ioaddr = tp->mmio_addr;
2797
2798 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2799 rtl8169_tx_interrupt(dev, tp, ioaddr);
2800
2801 *budget -= work_done;
2802 dev->quota -= work_done;
2803
2804 if (work_done < work_to_do) {
2805 netif_rx_complete(dev);
2806 tp->intr_mask = 0xffff;
2807 /*
2808 * 20040426: the barrier is not strictly required but the
2809 * behavior of the irq handler could be less predictable
2810 * without it. Btw, the lack of flush for the posted pci
2811 * write is safe - FR
2812 */
2813 smp_wmb();
2814 RTL_W16(IntrMask, rtl8169_intr_mask);
2815 }
2816
2817 return (work_done >= work_to_do);
2818}
2819#endif
2820
2821static void rtl8169_down(struct net_device *dev)
2822{
2823 struct rtl8169_private *tp = netdev_priv(dev);
2824 void __iomem *ioaddr = tp->mmio_addr;
2825 unsigned int poll_locked = 0;
733b736c 2826 unsigned int intrmask;
1da177e4
LT
2827
2828 rtl8169_delete_timer(dev);
2829
2830 netif_stop_queue(dev);
2831
1da177e4
LT
2832core_down:
2833 spin_lock_irq(&tp->lock);
2834
2835 rtl8169_asic_down(ioaddr);
2836
2837 /* Update the error counts. */
2838 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2839 RTL_W32(RxMissed, 0);
2840
2841 spin_unlock_irq(&tp->lock);
2842
2843 synchronize_irq(dev->irq);
2844
2845 if (!poll_locked) {
2846 netif_poll_disable(dev);
2847 poll_locked++;
2848 }
2849
2850 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2851 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2852
2853 /*
2854 * And now for the 50k$ question: are IRQ disabled or not ?
2855 *
2856 * Two paths lead here:
2857 * 1) dev->close
2858 * -> netif_running() is available to sync the current code and the
2859 * IRQ handler. See rtl8169_interrupt for details.
2860 * 2) dev->change_mtu
2861 * -> rtl8169_poll can not be issued again and re-enable the
2862 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2863 *
2864 * No loop if hotpluged or major error (0xffff).
1da177e4 2865 */
733b736c
AP
2866 intrmask = RTL_R16(IntrMask);
2867 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2868 goto core_down;
2869
2870 rtl8169_tx_clear(tp);
2871
2872 rtl8169_rx_clear(tp);
2873}
2874
2875static int rtl8169_close(struct net_device *dev)
2876{
2877 struct rtl8169_private *tp = netdev_priv(dev);
2878 struct pci_dev *pdev = tp->pci_dev;
2879
2880 rtl8169_down(dev);
2881
2882 free_irq(dev->irq, dev);
2883
2884 netif_poll_enable(dev);
2885
2886 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2887 tp->RxPhyAddr);
2888 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2889 tp->TxPhyAddr);
2890 tp->TxDescArray = NULL;
2891 tp->RxDescArray = NULL;
2892
2893 return 0;
2894}
2895
07ce4064 2896static void rtl_set_rx_mode(struct net_device *dev)
1da177e4
LT
2897{
2898 struct rtl8169_private *tp = netdev_priv(dev);
2899 void __iomem *ioaddr = tp->mmio_addr;
2900 unsigned long flags;
2901 u32 mc_filter[2]; /* Multicast hash filter */
2902 int i, rx_mode;
2903 u32 tmp = 0;
2904
2905 if (dev->flags & IFF_PROMISC) {
2906 /* Unconditionally log net taps. */
b57b7e5a
SH
2907 if (netif_msg_link(tp)) {
2908 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2909 dev->name);
2910 }
1da177e4
LT
2911 rx_mode =
2912 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2913 AcceptAllPhys;
2914 mc_filter[1] = mc_filter[0] = 0xffffffff;
2915 } else if ((dev->mc_count > multicast_filter_limit)
2916 || (dev->flags & IFF_ALLMULTI)) {
2917 /* Too many to filter perfectly -- accept all multicasts. */
2918 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2919 mc_filter[1] = mc_filter[0] = 0xffffffff;
2920 } else {
2921 struct dev_mc_list *mclist;
2922 rx_mode = AcceptBroadcast | AcceptMyPhys;
2923 mc_filter[1] = mc_filter[0] = 0;
2924 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2925 i++, mclist = mclist->next) {
2926 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2927 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2928 rx_mode |= AcceptMulticast;
2929 }
2930 }
2931
2932 spin_lock_irqsave(&tp->lock, flags);
2933
2934 tmp = rtl8169_rx_config | rx_mode |
2935 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2936
bcf0bf90
FR
2937 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2938 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2939 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2940 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2941 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2942 mc_filter[0] = 0xffffffff;
2943 mc_filter[1] = 0xffffffff;
2944 }
2945
1da177e4
LT
2946 RTL_W32(RxConfig, tmp);
2947 RTL_W32(MAR0 + 0, mc_filter[0]);
2948 RTL_W32(MAR0 + 4, mc_filter[1]);
2949
2950 spin_unlock_irqrestore(&tp->lock, flags);
2951}
2952
2953/**
2954 * rtl8169_get_stats - Get rtl8169 read/write statistics
2955 * @dev: The Ethernet Device to get statistics for
2956 *
2957 * Get TX/RX statistics for rtl8169
2958 */
2959static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2960{
2961 struct rtl8169_private *tp = netdev_priv(dev);
2962 void __iomem *ioaddr = tp->mmio_addr;
2963 unsigned long flags;
2964
2965 if (netif_running(dev)) {
2966 spin_lock_irqsave(&tp->lock, flags);
2967 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2968 RTL_W32(RxMissed, 0);
2969 spin_unlock_irqrestore(&tp->lock, flags);
2970 }
5b0384f4 2971
1da177e4
LT
2972 return &tp->stats;
2973}
2974
5d06a99f
FR
2975#ifdef CONFIG_PM
2976
2977static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2978{
2979 struct net_device *dev = pci_get_drvdata(pdev);
2980 struct rtl8169_private *tp = netdev_priv(dev);
2981 void __iomem *ioaddr = tp->mmio_addr;
2982
2983 if (!netif_running(dev))
1371fa6d 2984 goto out_pci_suspend;
5d06a99f
FR
2985
2986 netif_device_detach(dev);
2987 netif_stop_queue(dev);
2988
2989 spin_lock_irq(&tp->lock);
2990
2991 rtl8169_asic_down(ioaddr);
2992
2993 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2994 RTL_W32(RxMissed, 0);
2995
2996 spin_unlock_irq(&tp->lock);
2997
1371fa6d 2998out_pci_suspend:
5d06a99f 2999 pci_save_state(pdev);
61a4dcc2 3000 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f 3001 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1371fa6d 3002
5d06a99f
FR
3003 return 0;
3004}
3005
3006static int rtl8169_resume(struct pci_dev *pdev)
3007{
3008 struct net_device *dev = pci_get_drvdata(pdev);
3009
1371fa6d
FR
3010 pci_set_power_state(pdev, PCI_D0);
3011 pci_restore_state(pdev);
3012 pci_enable_wake(pdev, PCI_D0, 0);
3013
5d06a99f
FR
3014 if (!netif_running(dev))
3015 goto out;
3016
3017 netif_device_attach(dev);
3018
5d06a99f
FR
3019 rtl8169_schedule_work(dev, rtl8169_reset_task);
3020out:
3021 return 0;
3022}
3023
3024#endif /* CONFIG_PM */
3025
1da177e4
LT
3026static struct pci_driver rtl8169_pci_driver = {
3027 .name = MODULENAME,
3028 .id_table = rtl8169_pci_tbl,
3029 .probe = rtl8169_init_one,
3030 .remove = __devexit_p(rtl8169_remove_one),
3031#ifdef CONFIG_PM
3032 .suspend = rtl8169_suspend,
3033 .resume = rtl8169_resume,
3034#endif
3035};
3036
3037static int __init
3038rtl8169_init_module(void)
3039{
29917620 3040 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
3041}
3042
3043static void __exit
3044rtl8169_cleanup_module(void)
3045{
3046 pci_unregister_driver(&rtl8169_pci_driver);
3047}
3048
3049module_init(rtl8169_init_module);
3050module_exit(rtl8169_cleanup_module);