]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/r8169.c
[HDLC] Fix dev->header_cache_update having a random value.
[net-next-2.6.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
69#include <asm/io.h>
70#include <asm/irq.h>
71
f7ccf420
SH
72#ifdef CONFIG_R8169_NAPI
73#define NAPI_SUFFIX "-NAPI"
74#else
75#define NAPI_SUFFIX ""
76#endif
77
78#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
79#define MODULENAME "r8169"
80#define PFX MODULENAME ": "
81
82#ifdef RTL8169_DEBUG
83#define assert(expr) \
5b0384f4
FR
84 if (!(expr)) { \
85 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
86 #expr,__FILE__,__FUNCTION__,__LINE__); \
87 }
1da177e4
LT
88#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
89#else
90#define assert(expr) do {} while (0)
91#define dprintk(fmt, args...) do {} while (0)
92#endif /* RTL8169_DEBUG */
93
b57b7e5a 94#define R8169_MSG_DEFAULT \
f0e837d9 95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 96
1da177e4
LT
97#define TX_BUFFS_AVAIL(tp) \
98 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
99
100#ifdef CONFIG_R8169_NAPI
101#define rtl8169_rx_skb netif_receive_skb
0b50f81d 102#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
103#define rtl8169_rx_quota(count, quota) min(count, quota)
104#else
105#define rtl8169_rx_skb netif_rx
0b50f81d 106#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
107#define rtl8169_rx_quota(count, quota) count
108#endif
109
110/* media options */
111#define MAX_UNITS 8
112static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
113static int num_media = 0;
114
115/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 116static const int max_interrupt_work = 20;
1da177e4
LT
117
118/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
119 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 120static const int multicast_filter_limit = 32;
1da177e4
LT
121
122/* MAC address length */
123#define MAC_ADDR_LEN 6
124
125#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
126#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
129#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
130#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
131#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
132
133#define R8169_REGS_SIZE 256
134#define R8169_NAPI_WEIGHT 64
135#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
136#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
137#define RX_BUF_SIZE 1536 /* Rx Buffer size */
138#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
139#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
140
141#define RTL8169_TX_TIMEOUT (6*HZ)
142#define RTL8169_PHY_TIMEOUT (10*HZ)
143
144/* write/read MMIO register */
145#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
146#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
147#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
148#define RTL_R8(reg) readb (ioaddr + (reg))
149#define RTL_R16(reg) readw (ioaddr + (reg))
150#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
151
152enum mac_version {
bcf0bf90
FR
153 RTL_GIGA_MAC_VER_01 = 0x00,
154 RTL_GIGA_MAC_VER_02 = 0x01,
155 RTL_GIGA_MAC_VER_03 = 0x02,
156 RTL_GIGA_MAC_VER_04 = 0x03,
157 RTL_GIGA_MAC_VER_05 = 0x04,
158 RTL_GIGA_MAC_VER_11 = 0x0b,
159 RTL_GIGA_MAC_VER_12 = 0x0c,
160 RTL_GIGA_MAC_VER_13 = 0x0d,
161 RTL_GIGA_MAC_VER_14 = 0x0e,
162 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
163};
164
165enum phy_version {
166 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
167 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
170 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
171 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
172};
173
1da177e4
LT
174#define _R(NAME,MAC,MASK) \
175 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
176
3c6bee1d 177static const struct {
1da177e4
LT
178 const char *name;
179 u8 mac_version;
180 u32 RxConfigMask; /* Clears the bits supported by this chip */
181} rtl_chip_info[] = {
bcf0bf90
FR
182 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
183 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
185 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
186 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
187 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
189 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
190 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
192};
193#undef _R
194
bcf0bf90
FR
195enum cfg_version {
196 RTL_CFG_0 = 0x00,
197 RTL_CFG_1,
198 RTL_CFG_2
199};
200
201static const struct {
202 unsigned int region;
203 unsigned int align;
204} rtl_cfg_info[] = {
205 [RTL_CFG_0] = { 1, NET_IP_ALIGN },
206 [RTL_CFG_1] = { 2, NET_IP_ALIGN },
207 [RTL_CFG_2] = { 2, 8 }
208};
209
1da177e4 210static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 211 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 212 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 213 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
214 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
215 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
216 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 217 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
218 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
219 { PCI_VENDOR_ID_LINKSYS, 0x1032,
220 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
221 {0,},
222};
223
224MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
225
226static int rx_copybreak = 200;
227static int use_dac;
b57b7e5a
SH
228static struct {
229 u32 msg_enable;
230} debug = { -1 };
1da177e4
LT
231
232enum RTL8169_registers {
233 MAC0 = 0, /* Ethernet hardware address. */
234 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
235 CounterAddrLow = 0x10,
236 CounterAddrHigh = 0x14,
1da177e4
LT
237 TxDescStartAddrLow = 0x20,
238 TxDescStartAddrHigh = 0x24,
239 TxHDescStartAddrLow = 0x28,
240 TxHDescStartAddrHigh = 0x2c,
241 FLASH = 0x30,
242 ERSR = 0x36,
243 ChipCmd = 0x37,
244 TxPoll = 0x38,
245 IntrMask = 0x3C,
246 IntrStatus = 0x3E,
247 TxConfig = 0x40,
248 RxConfig = 0x44,
249 RxMissed = 0x4C,
250 Cfg9346 = 0x50,
251 Config0 = 0x51,
252 Config1 = 0x52,
253 Config2 = 0x53,
254 Config3 = 0x54,
255 Config4 = 0x55,
256 Config5 = 0x56,
257 MultiIntr = 0x5C,
258 PHYAR = 0x60,
259 TBICSR = 0x64,
260 TBI_ANAR = 0x68,
261 TBI_LPAR = 0x6A,
262 PHYstatus = 0x6C,
263 RxMaxSize = 0xDA,
264 CPlusCmd = 0xE0,
265 IntrMitigate = 0xE2,
266 RxDescAddrLow = 0xE4,
267 RxDescAddrHigh = 0xE8,
268 EarlyTxThres = 0xEC,
269 FuncEvent = 0xF0,
270 FuncEventMask = 0xF4,
271 FuncPresetState = 0xF8,
272 FuncForceEvent = 0xFC,
273};
274
275enum RTL8169_register_content {
276 /* InterruptStatusBits */
277 SYSErr = 0x8000,
278 PCSTimeout = 0x4000,
279 SWInt = 0x0100,
280 TxDescUnavail = 0x80,
281 RxFIFOOver = 0x40,
282 LinkChg = 0x20,
283 RxOverflow = 0x10,
284 TxErr = 0x08,
285 TxOK = 0x04,
286 RxErr = 0x02,
287 RxOK = 0x01,
288
289 /* RxStatusDesc */
9dccf611
FR
290 RxFOVF = (1 << 23),
291 RxRWT = (1 << 22),
292 RxRES = (1 << 21),
293 RxRUNT = (1 << 20),
294 RxCRC = (1 << 19),
1da177e4
LT
295
296 /* ChipCmdBits */
297 CmdReset = 0x10,
298 CmdRxEnb = 0x08,
299 CmdTxEnb = 0x04,
300 RxBufEmpty = 0x01,
301
302 /* Cfg9346Bits */
303 Cfg9346_Lock = 0x00,
304 Cfg9346_Unlock = 0xC0,
305
306 /* rx_mode_bits */
307 AcceptErr = 0x20,
308 AcceptRunt = 0x10,
309 AcceptBroadcast = 0x08,
310 AcceptMulticast = 0x04,
311 AcceptMyPhys = 0x02,
312 AcceptAllPhys = 0x01,
313
314 /* RxConfigBits */
315 RxCfgFIFOShift = 13,
316 RxCfgDMAShift = 8,
317
318 /* TxConfigBits */
319 TxInterFrameGapShift = 24,
320 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
321
5d06a99f
FR
322 /* Config1 register p.24 */
323 PMEnable = (1 << 0), /* Power Management Enable */
324
61a4dcc2
FR
325 /* Config3 register p.25 */
326 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
327 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
328
5d06a99f 329 /* Config5 register p.27 */
61a4dcc2
FR
330 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
331 MWF = (1 << 5), /* Accept Multicast wakeup frame */
332 UWF = (1 << 4), /* Accept Unicast wakeup frame */
333 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
334 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
335
1da177e4
LT
336 /* TBICSR p.28 */
337 TBIReset = 0x80000000,
338 TBILoopback = 0x40000000,
339 TBINwEnable = 0x20000000,
340 TBINwRestart = 0x10000000,
341 TBILinkOk = 0x02000000,
342 TBINwComplete = 0x01000000,
343
344 /* CPlusCmd p.31 */
345 RxVlan = (1 << 6),
346 RxChkSum = (1 << 5),
347 PCIDAC = (1 << 4),
348 PCIMulRW = (1 << 3),
349
350 /* rtl8169_PHYstatus */
351 TBI_Enable = 0x80,
352 TxFlowCtrl = 0x40,
353 RxFlowCtrl = 0x20,
354 _1000bpsF = 0x10,
355 _100bps = 0x08,
356 _10bps = 0x04,
357 LinkStatus = 0x02,
358 FullDup = 0x01,
359
1da177e4
LT
360 /* _MediaType */
361 _10_Half = 0x01,
362 _10_Full = 0x02,
363 _100_Half = 0x04,
364 _100_Full = 0x08,
365 _1000_Full = 0x10,
366
367 /* _TBICSRBit */
368 TBILinkOK = 0x02000000,
d4a3a0fc
SH
369
370 /* DumpCounterCommand */
371 CounterDump = 0x8,
1da177e4
LT
372};
373
374enum _DescStatusBit {
375 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
376 RingEnd = (1 << 30), /* End of descriptor ring */
377 FirstFrag = (1 << 29), /* First segment of a packet */
378 LastFrag = (1 << 28), /* Final segment of a packet */
379
380 /* Tx private */
381 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
382 MSSShift = 16, /* MSS value position */
383 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
384 IPCS = (1 << 18), /* Calculate IP checksum */
385 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
386 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
387 TxVlanTag = (1 << 17), /* Add VLAN tag */
388
389 /* Rx private */
390 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
391 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
392
393#define RxProtoUDP (PID1)
394#define RxProtoTCP (PID0)
395#define RxProtoIP (PID1 | PID0)
396#define RxProtoMask RxProtoIP
397
398 IPFail = (1 << 16), /* IP checksum failed */
399 UDPFail = (1 << 15), /* UDP/IP checksum failed */
400 TCPFail = (1 << 14), /* TCP/IP checksum failed */
401 RxVlanTag = (1 << 16), /* VLAN tag available */
402};
403
404#define RsvdMask 0x3fffc000
405
406struct TxDesc {
407 u32 opts1;
408 u32 opts2;
409 u64 addr;
410};
411
412struct RxDesc {
413 u32 opts1;
414 u32 opts2;
415 u64 addr;
416};
417
418struct ring_info {
419 struct sk_buff *skb;
420 u32 len;
421 u8 __pad[sizeof(void *) - sizeof(u32)];
422};
423
424struct rtl8169_private {
425 void __iomem *mmio_addr; /* memory map physical address */
426 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 427 struct net_device *dev;
1da177e4
LT
428 struct net_device_stats stats; /* statistics of net device */
429 spinlock_t lock; /* spin lock flag */
b57b7e5a 430 u32 msg_enable;
1da177e4
LT
431 int chipset;
432 int mac_version;
433 int phy_version;
434 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
435 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
436 u32 dirty_rx;
437 u32 dirty_tx;
438 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
439 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
440 dma_addr_t TxPhyAddr;
441 dma_addr_t RxPhyAddr;
442 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
443 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 444 unsigned align;
1da177e4
LT
445 unsigned rx_buf_sz;
446 struct timer_list timer;
447 u16 cp_cmd;
448 u16 intr_mask;
449 int phy_auto_nego_reg;
450 int phy_1000_ctrl_reg;
451#ifdef CONFIG_R8169_VLAN
452 struct vlan_group *vlgrp;
453#endif
454 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
455 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
456 void (*phy_reset_enable)(void __iomem *);
457 unsigned int (*phy_reset_pending)(void __iomem *);
458 unsigned int (*link_ok)(void __iomem *);
c4028958 459 struct delayed_work task;
61a4dcc2 460 unsigned wol_enabled : 1;
1da177e4
LT
461};
462
979b6c13 463MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
464MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
465module_param_array(media, int, &num_media, 0);
df0a1bf6 466MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 467module_param(rx_copybreak, int, 0);
1b7efd58 468MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
469module_param(use_dac, int, 0);
470MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
471module_param_named(debug, debug.msg_enable, int, 0);
472MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
473MODULE_LICENSE("GPL");
474MODULE_VERSION(RTL8169_VERSION);
475
476static int rtl8169_open(struct net_device *dev);
477static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 478static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4
LT
479static int rtl8169_init_ring(struct net_device *dev);
480static void rtl8169_hw_start(struct net_device *dev);
481static int rtl8169_close(struct net_device *dev);
482static void rtl8169_set_rx_mode(struct net_device *dev);
483static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 484static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
485static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
486 void __iomem *);
4dcb7d33 487static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4
LT
488static void rtl8169_down(struct net_device *dev);
489
490#ifdef CONFIG_R8169_NAPI
491static int rtl8169_poll(struct net_device *dev, int *budget);
492#endif
493
494static const u16 rtl8169_intr_mask =
495 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
496static const u16 rtl8169_napi_event =
497 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
498static const unsigned int rtl8169_rx_config =
5b0384f4 499 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
500
501static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
502{
503 int i;
504
505 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 506
2371408c 507 for (i = 20; i > 0; i--) {
1da177e4 508 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 509 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 510 break;
2371408c 511 udelay(25);
1da177e4
LT
512 }
513}
514
515static int mdio_read(void __iomem *ioaddr, int RegAddr)
516{
517 int i, value = -1;
518
519 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 520
2371408c 521 for (i = 20; i > 0; i--) {
1da177e4
LT
522 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
523 if (RTL_R32(PHYAR) & 0x80000000) {
524 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
525 break;
526 }
2371408c 527 udelay(25);
1da177e4
LT
528 }
529 return value;
530}
531
532static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
533{
534 RTL_W16(IntrMask, 0x0000);
535
536 RTL_W16(IntrStatus, 0xffff);
537}
538
539static void rtl8169_asic_down(void __iomem *ioaddr)
540{
541 RTL_W8(ChipCmd, 0x00);
542 rtl8169_irq_mask_and_ack(ioaddr);
543 RTL_R16(CPlusCmd);
544}
545
546static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
547{
548 return RTL_R32(TBICSR) & TBIReset;
549}
550
551static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
552{
64e4bfb4 553 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
554}
555
556static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
557{
558 return RTL_R32(TBICSR) & TBILinkOk;
559}
560
561static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
562{
563 return RTL_R8(PHYstatus) & LinkStatus;
564}
565
566static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
567{
568 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
569}
570
571static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
572{
573 unsigned int val;
574
bf793295
FR
575 mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
576 val = mdio_read(ioaddr, MII_BMCR);
1da177e4
LT
577}
578
579static void rtl8169_check_link_status(struct net_device *dev,
580 struct rtl8169_private *tp, void __iomem *ioaddr)
581{
582 unsigned long flags;
583
584 spin_lock_irqsave(&tp->lock, flags);
585 if (tp->link_ok(ioaddr)) {
586 netif_carrier_on(dev);
b57b7e5a
SH
587 if (netif_msg_ifup(tp))
588 printk(KERN_INFO PFX "%s: link up\n", dev->name);
589 } else {
590 if (netif_msg_ifdown(tp))
591 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 592 netif_carrier_off(dev);
b57b7e5a 593 }
1da177e4
LT
594 spin_unlock_irqrestore(&tp->lock, flags);
595}
596
597static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
598{
599 struct {
600 u16 speed;
601 u8 duplex;
602 u8 autoneg;
603 u8 media;
604 } link_settings[] = {
605 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
606 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
607 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
608 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
609 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
610 /* Make TBI happy */
611 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
612 }, *p;
613 unsigned char option;
5b0384f4 614
1da177e4
LT
615 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
616
b57b7e5a 617 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
618 printk(KERN_WARNING PFX "media option is deprecated.\n");
619
620 for (p = link_settings; p->media != 0xff; p++) {
621 if (p->media == option)
622 break;
623 }
624 *autoneg = p->autoneg;
625 *speed = p->speed;
626 *duplex = p->duplex;
627}
628
61a4dcc2
FR
629static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
630{
631 struct rtl8169_private *tp = netdev_priv(dev);
632 void __iomem *ioaddr = tp->mmio_addr;
633 u8 options;
634
635 wol->wolopts = 0;
636
637#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
638 wol->supported = WAKE_ANY;
639
640 spin_lock_irq(&tp->lock);
641
642 options = RTL_R8(Config1);
643 if (!(options & PMEnable))
644 goto out_unlock;
645
646 options = RTL_R8(Config3);
647 if (options & LinkUp)
648 wol->wolopts |= WAKE_PHY;
649 if (options & MagicPacket)
650 wol->wolopts |= WAKE_MAGIC;
651
652 options = RTL_R8(Config5);
653 if (options & UWF)
654 wol->wolopts |= WAKE_UCAST;
655 if (options & BWF)
5b0384f4 656 wol->wolopts |= WAKE_BCAST;
61a4dcc2 657 if (options & MWF)
5b0384f4 658 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
659
660out_unlock:
661 spin_unlock_irq(&tp->lock);
662}
663
664static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
665{
666 struct rtl8169_private *tp = netdev_priv(dev);
667 void __iomem *ioaddr = tp->mmio_addr;
668 int i;
669 static struct {
670 u32 opt;
671 u16 reg;
672 u8 mask;
673 } cfg[] = {
674 { WAKE_ANY, Config1, PMEnable },
675 { WAKE_PHY, Config3, LinkUp },
676 { WAKE_MAGIC, Config3, MagicPacket },
677 { WAKE_UCAST, Config5, UWF },
678 { WAKE_BCAST, Config5, BWF },
679 { WAKE_MCAST, Config5, MWF },
680 { WAKE_ANY, Config5, LanWake }
681 };
682
683 spin_lock_irq(&tp->lock);
684
685 RTL_W8(Cfg9346, Cfg9346_Unlock);
686
687 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
688 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
689 if (wol->wolopts & cfg[i].opt)
690 options |= cfg[i].mask;
691 RTL_W8(cfg[i].reg, options);
692 }
693
694 RTL_W8(Cfg9346, Cfg9346_Lock);
695
696 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
697
698 spin_unlock_irq(&tp->lock);
699
700 return 0;
701}
702
1da177e4
LT
703static void rtl8169_get_drvinfo(struct net_device *dev,
704 struct ethtool_drvinfo *info)
705{
706 struct rtl8169_private *tp = netdev_priv(dev);
707
708 strcpy(info->driver, MODULENAME);
709 strcpy(info->version, RTL8169_VERSION);
710 strcpy(info->bus_info, pci_name(tp->pci_dev));
711}
712
713static int rtl8169_get_regs_len(struct net_device *dev)
714{
715 return R8169_REGS_SIZE;
716}
717
718static int rtl8169_set_speed_tbi(struct net_device *dev,
719 u8 autoneg, u16 speed, u8 duplex)
720{
721 struct rtl8169_private *tp = netdev_priv(dev);
722 void __iomem *ioaddr = tp->mmio_addr;
723 int ret = 0;
724 u32 reg;
725
726 reg = RTL_R32(TBICSR);
727 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
728 (duplex == DUPLEX_FULL)) {
729 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
730 } else if (autoneg == AUTONEG_ENABLE)
731 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
732 else {
b57b7e5a
SH
733 if (netif_msg_link(tp)) {
734 printk(KERN_WARNING "%s: "
735 "incorrect speed setting refused in TBI mode\n",
736 dev->name);
737 }
1da177e4
LT
738 ret = -EOPNOTSUPP;
739 }
740
741 return ret;
742}
743
744static int rtl8169_set_speed_xmii(struct net_device *dev,
745 u8 autoneg, u16 speed, u8 duplex)
746{
747 struct rtl8169_private *tp = netdev_priv(dev);
748 void __iomem *ioaddr = tp->mmio_addr;
749 int auto_nego, giga_ctrl;
750
64e4bfb4
FR
751 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
752 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
753 ADVERTISE_100HALF | ADVERTISE_100FULL);
754 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
755 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
756
757 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
758 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
759 ADVERTISE_100HALF | ADVERTISE_100FULL);
760 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
761 } else {
762 if (speed == SPEED_10)
64e4bfb4 763 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 764 else if (speed == SPEED_100)
64e4bfb4 765 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 766 else if (speed == SPEED_1000)
64e4bfb4 767 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
768
769 if (duplex == DUPLEX_HALF)
64e4bfb4 770 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
771
772 if (duplex == DUPLEX_FULL)
64e4bfb4 773 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
774
775 /* This tweak comes straight from Realtek's driver. */
776 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
777 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 778 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
779 }
780 }
781
782 /* The 8100e/8101e do Fast Ethernet only. */
783 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
784 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
785 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 786 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
787 netif_msg_link(tp)) {
788 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
789 dev->name);
790 }
64e4bfb4 791 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
792 }
793
623a1593
FR
794 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
795
1da177e4
LT
796 tp->phy_auto_nego_reg = auto_nego;
797 tp->phy_1000_ctrl_reg = giga_ctrl;
798
64e4bfb4
FR
799 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
800 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
801 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
802 return 0;
803}
804
805static int rtl8169_set_speed(struct net_device *dev,
806 u8 autoneg, u16 speed, u8 duplex)
807{
808 struct rtl8169_private *tp = netdev_priv(dev);
809 int ret;
810
811 ret = tp->set_speed(dev, autoneg, speed, duplex);
812
64e4bfb4 813 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
814 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
815
816 return ret;
817}
818
819static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
820{
821 struct rtl8169_private *tp = netdev_priv(dev);
822 unsigned long flags;
823 int ret;
824
825 spin_lock_irqsave(&tp->lock, flags);
826 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
827 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 828
1da177e4
LT
829 return ret;
830}
831
832static u32 rtl8169_get_rx_csum(struct net_device *dev)
833{
834 struct rtl8169_private *tp = netdev_priv(dev);
835
836 return tp->cp_cmd & RxChkSum;
837}
838
839static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
840{
841 struct rtl8169_private *tp = netdev_priv(dev);
842 void __iomem *ioaddr = tp->mmio_addr;
843 unsigned long flags;
844
845 spin_lock_irqsave(&tp->lock, flags);
846
847 if (data)
848 tp->cp_cmd |= RxChkSum;
849 else
850 tp->cp_cmd &= ~RxChkSum;
851
852 RTL_W16(CPlusCmd, tp->cp_cmd);
853 RTL_R16(CPlusCmd);
854
855 spin_unlock_irqrestore(&tp->lock, flags);
856
857 return 0;
858}
859
860#ifdef CONFIG_R8169_VLAN
861
862static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
863 struct sk_buff *skb)
864{
865 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
866 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
867}
868
869static void rtl8169_vlan_rx_register(struct net_device *dev,
870 struct vlan_group *grp)
871{
872 struct rtl8169_private *tp = netdev_priv(dev);
873 void __iomem *ioaddr = tp->mmio_addr;
874 unsigned long flags;
875
876 spin_lock_irqsave(&tp->lock, flags);
877 tp->vlgrp = grp;
878 if (tp->vlgrp)
879 tp->cp_cmd |= RxVlan;
880 else
881 tp->cp_cmd &= ~RxVlan;
882 RTL_W16(CPlusCmd, tp->cp_cmd);
883 RTL_R16(CPlusCmd);
884 spin_unlock_irqrestore(&tp->lock, flags);
885}
886
887static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
888{
889 struct rtl8169_private *tp = netdev_priv(dev);
890 unsigned long flags;
891
892 spin_lock_irqsave(&tp->lock, flags);
893 if (tp->vlgrp)
894 tp->vlgrp->vlan_devices[vid] = NULL;
895 spin_unlock_irqrestore(&tp->lock, flags);
896}
897
898static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
899 struct sk_buff *skb)
900{
901 u32 opts2 = le32_to_cpu(desc->opts2);
902 int ret;
903
904 if (tp->vlgrp && (opts2 & RxVlanTag)) {
905 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
906 swab16(opts2 & 0xffff));
907 ret = 0;
908 } else
909 ret = -1;
910 desc->opts2 = 0;
911 return ret;
912}
913
914#else /* !CONFIG_R8169_VLAN */
915
916static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
917 struct sk_buff *skb)
918{
919 return 0;
920}
921
922static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
923 struct sk_buff *skb)
924{
925 return -1;
926}
927
928#endif
929
930static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
931{
932 struct rtl8169_private *tp = netdev_priv(dev);
933 void __iomem *ioaddr = tp->mmio_addr;
934 u32 status;
935
936 cmd->supported =
937 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
938 cmd->port = PORT_FIBRE;
939 cmd->transceiver = XCVR_INTERNAL;
940
941 status = RTL_R32(TBICSR);
942 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
943 cmd->autoneg = !!(status & TBINwEnable);
944
945 cmd->speed = SPEED_1000;
946 cmd->duplex = DUPLEX_FULL; /* Always set */
947}
948
949static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
950{
951 struct rtl8169_private *tp = netdev_priv(dev);
952 void __iomem *ioaddr = tp->mmio_addr;
953 u8 status;
954
955 cmd->supported = SUPPORTED_10baseT_Half |
956 SUPPORTED_10baseT_Full |
957 SUPPORTED_100baseT_Half |
958 SUPPORTED_100baseT_Full |
959 SUPPORTED_1000baseT_Full |
960 SUPPORTED_Autoneg |
5b0384f4 961 SUPPORTED_TP;
1da177e4
LT
962
963 cmd->autoneg = 1;
964 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
965
64e4bfb4 966 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 967 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 968 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 969 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 970 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 971 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 972 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 973 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 974 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
975 cmd->advertising |= ADVERTISED_1000baseT_Full;
976
977 status = RTL_R8(PHYstatus);
978
979 if (status & _1000bpsF)
980 cmd->speed = SPEED_1000;
981 else if (status & _100bps)
982 cmd->speed = SPEED_100;
983 else if (status & _10bps)
984 cmd->speed = SPEED_10;
985
623a1593
FR
986 if (status & TxFlowCtrl)
987 cmd->advertising |= ADVERTISED_Asym_Pause;
988 if (status & RxFlowCtrl)
989 cmd->advertising |= ADVERTISED_Pause;
990
1da177e4
LT
991 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
992 DUPLEX_FULL : DUPLEX_HALF;
993}
994
995static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
996{
997 struct rtl8169_private *tp = netdev_priv(dev);
998 unsigned long flags;
999
1000 spin_lock_irqsave(&tp->lock, flags);
1001
1002 tp->get_settings(dev, cmd);
1003
1004 spin_unlock_irqrestore(&tp->lock, flags);
1005 return 0;
1006}
1007
1008static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1009 void *p)
1010{
5b0384f4
FR
1011 struct rtl8169_private *tp = netdev_priv(dev);
1012 unsigned long flags;
1da177e4 1013
5b0384f4
FR
1014 if (regs->len > R8169_REGS_SIZE)
1015 regs->len = R8169_REGS_SIZE;
1da177e4 1016
5b0384f4
FR
1017 spin_lock_irqsave(&tp->lock, flags);
1018 memcpy_fromio(p, tp->mmio_addr, regs->len);
1019 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1020}
1021
b57b7e5a
SH
1022static u32 rtl8169_get_msglevel(struct net_device *dev)
1023{
1024 struct rtl8169_private *tp = netdev_priv(dev);
1025
1026 return tp->msg_enable;
1027}
1028
1029static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1030{
1031 struct rtl8169_private *tp = netdev_priv(dev);
1032
1033 tp->msg_enable = value;
1034}
1035
d4a3a0fc
SH
1036static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1037 "tx_packets",
1038 "rx_packets",
1039 "tx_errors",
1040 "rx_errors",
1041 "rx_missed",
1042 "align_errors",
1043 "tx_single_collisions",
1044 "tx_multi_collisions",
1045 "unicast",
1046 "broadcast",
1047 "multicast",
1048 "tx_aborted",
1049 "tx_underrun",
1050};
1051
1052struct rtl8169_counters {
1053 u64 tx_packets;
1054 u64 rx_packets;
1055 u64 tx_errors;
1056 u32 rx_errors;
1057 u16 rx_missed;
1058 u16 align_errors;
1059 u32 tx_one_collision;
1060 u32 tx_multi_collision;
1061 u64 rx_unicast;
1062 u64 rx_broadcast;
1063 u32 rx_multicast;
1064 u16 tx_aborted;
1065 u16 tx_underun;
1066};
1067
1068static int rtl8169_get_stats_count(struct net_device *dev)
1069{
1070 return ARRAY_SIZE(rtl8169_gstrings);
1071}
1072
1073static void rtl8169_get_ethtool_stats(struct net_device *dev,
1074 struct ethtool_stats *stats, u64 *data)
1075{
1076 struct rtl8169_private *tp = netdev_priv(dev);
1077 void __iomem *ioaddr = tp->mmio_addr;
1078 struct rtl8169_counters *counters;
1079 dma_addr_t paddr;
1080 u32 cmd;
1081
1082 ASSERT_RTNL();
1083
1084 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1085 if (!counters)
1086 return;
1087
1088 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1089 cmd = (u64)paddr & DMA_32BIT_MASK;
1090 RTL_W32(CounterAddrLow, cmd);
1091 RTL_W32(CounterAddrLow, cmd | CounterDump);
1092
1093 while (RTL_R32(CounterAddrLow) & CounterDump) {
1094 if (msleep_interruptible(1))
1095 break;
1096 }
1097
1098 RTL_W32(CounterAddrLow, 0);
1099 RTL_W32(CounterAddrHigh, 0);
1100
5b0384f4 1101 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1102 data[1] = le64_to_cpu(counters->rx_packets);
1103 data[2] = le64_to_cpu(counters->tx_errors);
1104 data[3] = le32_to_cpu(counters->rx_errors);
1105 data[4] = le16_to_cpu(counters->rx_missed);
1106 data[5] = le16_to_cpu(counters->align_errors);
1107 data[6] = le32_to_cpu(counters->tx_one_collision);
1108 data[7] = le32_to_cpu(counters->tx_multi_collision);
1109 data[8] = le64_to_cpu(counters->rx_unicast);
1110 data[9] = le64_to_cpu(counters->rx_broadcast);
1111 data[10] = le32_to_cpu(counters->rx_multicast);
1112 data[11] = le16_to_cpu(counters->tx_aborted);
1113 data[12] = le16_to_cpu(counters->tx_underun);
1114
1115 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1116}
1117
1118static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1119{
1120 switch(stringset) {
1121 case ETH_SS_STATS:
1122 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1123 break;
1124 }
1125}
1126
1127
7282d491 1128static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1129 .get_drvinfo = rtl8169_get_drvinfo,
1130 .get_regs_len = rtl8169_get_regs_len,
1131 .get_link = ethtool_op_get_link,
1132 .get_settings = rtl8169_get_settings,
1133 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1134 .get_msglevel = rtl8169_get_msglevel,
1135 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1136 .get_rx_csum = rtl8169_get_rx_csum,
1137 .set_rx_csum = rtl8169_set_rx_csum,
1138 .get_tx_csum = ethtool_op_get_tx_csum,
1139 .set_tx_csum = ethtool_op_set_tx_csum,
1140 .get_sg = ethtool_op_get_sg,
1141 .set_sg = ethtool_op_set_sg,
1142 .get_tso = ethtool_op_get_tso,
1143 .set_tso = ethtool_op_set_tso,
1144 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1145 .get_wol = rtl8169_get_wol,
1146 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1147 .get_strings = rtl8169_get_strings,
1148 .get_stats_count = rtl8169_get_stats_count,
1149 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1150 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1151};
1152
1153static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1154 int bitval)
1155{
1156 int val;
1157
1158 val = mdio_read(ioaddr, reg);
1159 val = (bitval == 1) ?
1160 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1161 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1162}
1163
1164static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1165{
1166 const struct {
1167 u32 mask;
1168 int mac_version;
1169 } mac_info[] = {
bcf0bf90
FR
1170 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1171 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1172 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1173 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1174 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1175 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1176 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1177 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1178 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1179 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1180 }, *p = mac_info;
1181 u32 reg;
1182
1183 reg = RTL_R32(TxConfig) & 0x7c800000;
1184 while ((reg & p->mask) != p->mask)
1185 p++;
1186 tp->mac_version = p->mac_version;
1187}
1188
1189static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1190{
bcf0bf90 1191 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1192}
1193
1194static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1195{
1196 const struct {
1197 u16 mask;
1198 u16 set;
1199 int phy_version;
1200 } phy_info[] = {
1201 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1202 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1203 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1204 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1205 }, *p = phy_info;
1206 u16 reg;
1207
64e4bfb4 1208 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1209 while ((reg & p->mask) != p->set)
1210 p++;
1211 tp->phy_version = p->phy_version;
1212}
1213
1214static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1215{
1216 struct {
1217 int version;
1218 char *msg;
1219 u32 reg;
1220 } phy_print[] = {
1221 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1222 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1223 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1224 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1225 { 0, NULL, 0x0000 }
1226 }, *p;
1227
1228 for (p = phy_print; p->msg; p++) {
1229 if (tp->phy_version == p->version) {
1230 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1231 return;
1232 }
1233 }
1234 dprintk("phy_version == Unknown\n");
1235}
1236
1237static void rtl8169_hw_phy_config(struct net_device *dev)
1238{
1239 struct rtl8169_private *tp = netdev_priv(dev);
1240 void __iomem *ioaddr = tp->mmio_addr;
1241 struct {
1242 u16 regs[5]; /* Beware of bit-sign propagation */
1243 } phy_magic[5] = { {
1244 { 0x0000, //w 4 15 12 0
1245 0x00a1, //w 3 15 0 00a1
1246 0x0008, //w 2 15 0 0008
1247 0x1020, //w 1 15 0 1020
1248 0x1000 } },{ //w 0 15 0 1000
1249 { 0x7000, //w 4 15 12 7
1250 0xff41, //w 3 15 0 ff41
1251 0xde60, //w 2 15 0 de60
1252 0x0140, //w 1 15 0 0140
1253 0x0077 } },{ //w 0 15 0 0077
1254 { 0xa000, //w 4 15 12 a
1255 0xdf01, //w 3 15 0 df01
1256 0xdf20, //w 2 15 0 df20
1257 0xff95, //w 1 15 0 ff95
1258 0xfa00 } },{ //w 0 15 0 fa00
1259 { 0xb000, //w 4 15 12 b
1260 0xff41, //w 3 15 0 ff41
1261 0xde20, //w 2 15 0 de20
1262 0x0140, //w 1 15 0 0140
1263 0x00bb } },{ //w 0 15 0 00bb
1264 { 0xf000, //w 4 15 12 f
1265 0xdf01, //w 3 15 0 df01
1266 0xdf20, //w 2 15 0 df20
1267 0xff95, //w 1 15 0 ff95
1268 0xbf00 } //w 0 15 0 bf00
1269 }
1270 }, *p = phy_magic;
1271 int i;
1272
1273 rtl8169_print_mac_version(tp);
1274 rtl8169_print_phy_version(tp);
1275
bcf0bf90 1276 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1277 return;
1278 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1279 return;
1280
1281 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1282 dprintk("Do final_reg2.cfg\n");
1283
1284 /* Shazam ! */
1285
bcf0bf90 1286 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1287 mdio_write(ioaddr, 31, 0x0002);
1288 mdio_write(ioaddr, 1, 0x90d0);
1289 mdio_write(ioaddr, 31, 0x0000);
1290 return;
1291 }
1292
1293 /* phy config for RTL8169s mac_version C chip */
1294 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1295 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1296 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1297 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1298
1299 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1300 int val, pos = 4;
1301
1302 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1303 mdio_write(ioaddr, pos, val);
1304 while (--pos >= 0)
1305 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1306 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1307 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1308 }
1309 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1310}
1311
1312static void rtl8169_phy_timer(unsigned long __opaque)
1313{
1314 struct net_device *dev = (struct net_device *)__opaque;
1315 struct rtl8169_private *tp = netdev_priv(dev);
1316 struct timer_list *timer = &tp->timer;
1317 void __iomem *ioaddr = tp->mmio_addr;
1318 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1319
bcf0bf90 1320 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1321 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1322
64e4bfb4 1323 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1324 return;
1325
1326 spin_lock_irq(&tp->lock);
1327
1328 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1329 /*
1da177e4
LT
1330 * A busy loop could burn quite a few cycles on nowadays CPU.
1331 * Let's delay the execution of the timer for a few ticks.
1332 */
1333 timeout = HZ/10;
1334 goto out_mod_timer;
1335 }
1336
1337 if (tp->link_ok(ioaddr))
1338 goto out_unlock;
1339
b57b7e5a
SH
1340 if (netif_msg_link(tp))
1341 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1342
1343 tp->phy_reset_enable(ioaddr);
1344
1345out_mod_timer:
1346 mod_timer(timer, jiffies + timeout);
1347out_unlock:
1348 spin_unlock_irq(&tp->lock);
1349}
1350
1351static inline void rtl8169_delete_timer(struct net_device *dev)
1352{
1353 struct rtl8169_private *tp = netdev_priv(dev);
1354 struct timer_list *timer = &tp->timer;
1355
bcf0bf90 1356 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1357 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1358 return;
1359
1360 del_timer_sync(timer);
1361}
1362
1363static inline void rtl8169_request_timer(struct net_device *dev)
1364{
1365 struct rtl8169_private *tp = netdev_priv(dev);
1366 struct timer_list *timer = &tp->timer;
1367
bcf0bf90 1368 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1369 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1370 return;
1371
1372 init_timer(timer);
1373 timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
1374 timer->data = (unsigned long)(dev);
1375 timer->function = rtl8169_phy_timer;
1376 add_timer(timer);
1377}
1378
1379#ifdef CONFIG_NET_POLL_CONTROLLER
1380/*
1381 * Polling 'interrupt' - used by things like netconsole to send skbs
1382 * without having to re-enable interrupts. It's not called while
1383 * the interrupt routine is executing.
1384 */
1385static void rtl8169_netpoll(struct net_device *dev)
1386{
1387 struct rtl8169_private *tp = netdev_priv(dev);
1388 struct pci_dev *pdev = tp->pci_dev;
1389
1390 disable_irq(pdev->irq);
7d12e780 1391 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1392 enable_irq(pdev->irq);
1393}
1394#endif
1395
1396static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1397 void __iomem *ioaddr)
1398{
1399 iounmap(ioaddr);
1400 pci_release_regions(pdev);
1401 pci_disable_device(pdev);
1402 free_netdev(dev);
1403}
1404
bf793295
FR
1405static void rtl8169_phy_reset(struct net_device *dev,
1406 struct rtl8169_private *tp)
1407{
1408 void __iomem *ioaddr = tp->mmio_addr;
1409 int i;
1410
1411 tp->phy_reset_enable(ioaddr);
1412 for (i = 0; i < 100; i++) {
1413 if (!tp->phy_reset_pending(ioaddr))
1414 return;
1415 msleep(1);
1416 }
1417 if (netif_msg_link(tp))
1418 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1419}
1420
4ff96fa6
FR
1421static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1422{
1423 void __iomem *ioaddr = tp->mmio_addr;
1424 static int board_idx = -1;
1425 u8 autoneg, duplex;
1426 u16 speed;
1427
1428 board_idx++;
1429
1430 rtl8169_hw_phy_config(dev);
1431
1432 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1433 RTL_W8(0x82, 0x01);
1434
bcf0bf90 1435 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1436 dprintk("Set PCI Latency=0x40\n");
1437 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1438 }
1439
bcf0bf90 1440 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1441 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1442 RTL_W8(0x82, 0x01);
1443 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1444 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1445 }
1446
1447 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1448
bf793295
FR
1449 rtl8169_phy_reset(dev, tp);
1450
4ff96fa6
FR
1451 rtl8169_set_speed(dev, autoneg, speed, duplex);
1452
1453 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1454 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1455}
1456
5f787a1a
FR
1457static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1458{
1459 struct rtl8169_private *tp = netdev_priv(dev);
1460 struct mii_ioctl_data *data = if_mii(ifr);
1461
1462 if (!netif_running(dev))
1463 return -ENODEV;
1464
1465 switch (cmd) {
1466 case SIOCGMIIPHY:
1467 data->phy_id = 32; /* Internal PHY */
1468 return 0;
1469
1470 case SIOCGMIIREG:
1471 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1472 return 0;
1473
1474 case SIOCSMIIREG:
1475 if (!capable(CAP_NET_ADMIN))
1476 return -EPERM;
1477 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1478 return 0;
1479 }
1480 return -EOPNOTSUPP;
1481}
1482
1da177e4 1483static int __devinit
4ff96fa6 1484rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1485{
bcf0bf90 1486 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1487 struct rtl8169_private *tp;
4ff96fa6
FR
1488 struct net_device *dev;
1489 void __iomem *ioaddr;
315917d2
FR
1490 unsigned int pm_cap;
1491 int i, rc;
1da177e4 1492
4ff96fa6
FR
1493 if (netif_msg_drv(&debug)) {
1494 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1495 MODULENAME, RTL8169_VERSION);
1496 }
1da177e4 1497
1da177e4 1498 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1499 if (!dev) {
b57b7e5a 1500 if (netif_msg_drv(&debug))
9b91cf9d 1501 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1502 rc = -ENOMEM;
1503 goto out;
1da177e4
LT
1504 }
1505
1506 SET_MODULE_OWNER(dev);
1507 SET_NETDEV_DEV(dev, &pdev->dev);
1508 tp = netdev_priv(dev);
c4028958 1509 tp->dev = dev;
b57b7e5a 1510 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1511
1512 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1513 rc = pci_enable_device(pdev);
b57b7e5a 1514 if (rc < 0) {
2e8a538d 1515 if (netif_msg_probe(tp))
9b91cf9d 1516 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1517 goto err_out_free_dev_1;
1da177e4
LT
1518 }
1519
1520 rc = pci_set_mwi(pdev);
1521 if (rc < 0)
4ff96fa6 1522 goto err_out_disable_2;
1da177e4
LT
1523
1524 /* save power state before pci_enable_device overwrites it */
1525 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1526 if (pm_cap) {
4ff96fa6 1527 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1528
1529 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1530 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1531 } else {
4ff96fa6 1532 if (netif_msg_probe(tp)) {
9b91cf9d 1533 dev_err(&pdev->dev,
4ff96fa6
FR
1534 "PowerManagement capability not found.\n");
1535 }
1da177e4
LT
1536 }
1537
1538 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1539 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1540 if (netif_msg_probe(tp)) {
9b91cf9d 1541 dev_err(&pdev->dev,
bcf0bf90
FR
1542 "region #%d not an MMIO resource, aborting\n",
1543 region);
4ff96fa6 1544 }
1da177e4 1545 rc = -ENODEV;
4ff96fa6 1546 goto err_out_mwi_3;
1da177e4 1547 }
4ff96fa6 1548
1da177e4 1549 /* check for weird/broken PCI region reporting */
bcf0bf90 1550 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1551 if (netif_msg_probe(tp)) {
9b91cf9d 1552 dev_err(&pdev->dev,
4ff96fa6
FR
1553 "Invalid PCI region size(s), aborting\n");
1554 }
1da177e4 1555 rc = -ENODEV;
4ff96fa6 1556 goto err_out_mwi_3;
1da177e4
LT
1557 }
1558
1559 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1560 if (rc < 0) {
2e8a538d 1561 if (netif_msg_probe(tp))
9b91cf9d 1562 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1563 goto err_out_mwi_3;
1da177e4
LT
1564 }
1565
1566 tp->cp_cmd = PCIMulRW | RxChkSum;
1567
1568 if ((sizeof(dma_addr_t) > 4) &&
1569 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1570 tp->cp_cmd |= PCIDAC;
1571 dev->features |= NETIF_F_HIGHDMA;
1572 } else {
1573 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1574 if (rc < 0) {
4ff96fa6 1575 if (netif_msg_probe(tp)) {
9b91cf9d 1576 dev_err(&pdev->dev,
4ff96fa6
FR
1577 "DMA configuration failed.\n");
1578 }
1579 goto err_out_free_res_4;
1da177e4
LT
1580 }
1581 }
1582
1583 pci_set_master(pdev);
1584
1585 /* ioremap MMIO region */
bcf0bf90 1586 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1587 if (!ioaddr) {
b57b7e5a 1588 if (netif_msg_probe(tp))
9b91cf9d 1589 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1590 rc = -EIO;
4ff96fa6 1591 goto err_out_free_res_4;
1da177e4
LT
1592 }
1593
1594 /* Unneeded ? Don't mess with Mrs. Murphy. */
1595 rtl8169_irq_mask_and_ack(ioaddr);
1596
1597 /* Soft reset the chip. */
1598 RTL_W8(ChipCmd, CmdReset);
1599
1600 /* Check that the chip has finished the reset. */
b518fa8e 1601 for (i = 100; i > 0; i--) {
1da177e4
LT
1602 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1603 break;
b518fa8e 1604 msleep_interruptible(1);
1da177e4
LT
1605 }
1606
1607 /* Identify chip attached to board */
1608 rtl8169_get_mac_version(tp, ioaddr);
1609 rtl8169_get_phy_version(tp, ioaddr);
1610
1611 rtl8169_print_mac_version(tp);
1612 rtl8169_print_phy_version(tp);
1613
1614 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1615 if (tp->mac_version == rtl_chip_info[i].mac_version)
1616 break;
1617 }
1618 if (i < 0) {
1619 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1620 if (netif_msg_probe(tp)) {
2e8a538d 1621 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1622 "unknown chip version, assuming %s\n",
1623 rtl_chip_info[0].name);
b57b7e5a 1624 }
1da177e4
LT
1625 i++;
1626 }
1627 tp->chipset = i;
1628
5d06a99f
FR
1629 RTL_W8(Cfg9346, Cfg9346_Unlock);
1630 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1631 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1632 RTL_W8(Cfg9346, Cfg9346_Lock);
1633
1da177e4
LT
1634 if (RTL_R8(PHYstatus) & TBI_Enable) {
1635 tp->set_speed = rtl8169_set_speed_tbi;
1636 tp->get_settings = rtl8169_gset_tbi;
1637 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1638 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1639 tp->link_ok = rtl8169_tbi_link_ok;
1640
64e4bfb4 1641 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1642 } else {
1643 tp->set_speed = rtl8169_set_speed_xmii;
1644 tp->get_settings = rtl8169_gset_xmii;
1645 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1646 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1647 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1648
1649 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1650 }
1651
1652 /* Get MAC address. FIXME: read EEPROM */
1653 for (i = 0; i < MAC_ADDR_LEN; i++)
1654 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1655 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1656
1657 dev->open = rtl8169_open;
1658 dev->hard_start_xmit = rtl8169_start_xmit;
1659 dev->get_stats = rtl8169_get_stats;
1660 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1661 dev->stop = rtl8169_close;
1662 dev->tx_timeout = rtl8169_tx_timeout;
1663 dev->set_multicast_list = rtl8169_set_rx_mode;
1664 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1665 dev->irq = pdev->irq;
1666 dev->base_addr = (unsigned long) ioaddr;
1667 dev->change_mtu = rtl8169_change_mtu;
1668
1669#ifdef CONFIG_R8169_NAPI
1670 dev->poll = rtl8169_poll;
1671 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1672#endif
1673
1674#ifdef CONFIG_R8169_VLAN
1675 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1676 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1677 dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
1678#endif
1679
1680#ifdef CONFIG_NET_POLL_CONTROLLER
1681 dev->poll_controller = rtl8169_netpoll;
1682#endif
1683
1684 tp->intr_mask = 0xffff;
1685 tp->pci_dev = pdev;
1686 tp->mmio_addr = ioaddr;
bcf0bf90 1687 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4
LT
1688
1689 spin_lock_init(&tp->lock);
1690
1691 rc = register_netdev(dev);
4ff96fa6
FR
1692 if (rc < 0)
1693 goto err_out_unmap_5;
1da177e4
LT
1694
1695 pci_set_drvdata(pdev, dev);
1696
b57b7e5a
SH
1697 if (netif_msg_probe(tp)) {
1698 printk(KERN_INFO "%s: %s at 0x%lx, "
1699 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1700 "IRQ %d\n",
1701 dev->name,
bcf0bf90 1702 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1703 dev->base_addr,
1704 dev->dev_addr[0], dev->dev_addr[1],
1705 dev->dev_addr[2], dev->dev_addr[3],
1706 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1707 }
1da177e4 1708
4ff96fa6 1709 rtl8169_init_phy(dev, tp);
1da177e4 1710
4ff96fa6
FR
1711out:
1712 return rc;
1da177e4 1713
4ff96fa6
FR
1714err_out_unmap_5:
1715 iounmap(ioaddr);
1716err_out_free_res_4:
1717 pci_release_regions(pdev);
1718err_out_mwi_3:
1719 pci_clear_mwi(pdev);
1720err_out_disable_2:
1721 pci_disable_device(pdev);
1722err_out_free_dev_1:
1723 free_netdev(dev);
1724 goto out;
1da177e4
LT
1725}
1726
1727static void __devexit
1728rtl8169_remove_one(struct pci_dev *pdev)
1729{
1730 struct net_device *dev = pci_get_drvdata(pdev);
1731 struct rtl8169_private *tp = netdev_priv(dev);
1732
1733 assert(dev != NULL);
1734 assert(tp != NULL);
1735
eb2a021c
FR
1736 flush_scheduled_work();
1737
1da177e4
LT
1738 unregister_netdev(dev);
1739 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1740 pci_set_drvdata(pdev, NULL);
1741}
1742
1da177e4
LT
1743static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1744 struct net_device *dev)
1745{
1746 unsigned int mtu = dev->mtu;
1747
1748 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1749}
1750
1751static int rtl8169_open(struct net_device *dev)
1752{
1753 struct rtl8169_private *tp = netdev_priv(dev);
1754 struct pci_dev *pdev = tp->pci_dev;
1755 int retval;
1756
1757 rtl8169_set_rxbufsize(tp, dev);
1758
1759 retval =
1fb9df5d 1760 request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1761 if (retval < 0)
1762 goto out;
1763
1764 retval = -ENOMEM;
1765
1766 /*
1767 * Rx and Tx desscriptors needs 256 bytes alignment.
1768 * pci_alloc_consistent provides more.
1769 */
1770 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1771 &tp->TxPhyAddr);
1772 if (!tp->TxDescArray)
1773 goto err_free_irq;
1774
1775 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1776 &tp->RxPhyAddr);
1777 if (!tp->RxDescArray)
1778 goto err_free_tx;
1779
1780 retval = rtl8169_init_ring(dev);
1781 if (retval < 0)
1782 goto err_free_rx;
1783
c4028958 1784 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4
LT
1785
1786 rtl8169_hw_start(dev);
1787
1788 rtl8169_request_timer(dev);
1789
1790 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1791out:
1792 return retval;
1793
1794err_free_rx:
1795 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1796 tp->RxPhyAddr);
1797err_free_tx:
1798 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1799 tp->TxPhyAddr);
1800err_free_irq:
1801 free_irq(dev->irq, dev);
1802 goto out;
1803}
1804
1805static void rtl8169_hw_reset(void __iomem *ioaddr)
1806{
1807 /* Disable interrupts */
1808 rtl8169_irq_mask_and_ack(ioaddr);
1809
1810 /* Reset the chipset */
1811 RTL_W8(ChipCmd, CmdReset);
1812
1813 /* PCI commit */
1814 RTL_R8(ChipCmd);
1815}
1816
9cb427b6
FR
1817static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
1818{
1819 void __iomem *ioaddr = tp->mmio_addr;
1820 u32 cfg = rtl8169_rx_config;
1821
1822 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1823 RTL_W32(RxConfig, cfg);
1824
1825 /* Set DMA burst size and Interframe Gap Time */
1826 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1827 (InterFrameGap << TxInterFrameGapShift));
1828}
1829
1830static void rtl8169_hw_start(struct net_device *dev)
1da177e4
LT
1831{
1832 struct rtl8169_private *tp = netdev_priv(dev);
1833 void __iomem *ioaddr = tp->mmio_addr;
bcf0bf90 1834 struct pci_dev *pdev = tp->pci_dev;
9cb427b6 1835 u16 cmd;
1da177e4
LT
1836 u32 i;
1837
1838 /* Soft reset the chip. */
1839 RTL_W8(ChipCmd, CmdReset);
1840
1841 /* Check that the chip has finished the reset. */
b518fa8e 1842 for (i = 100; i > 0; i--) {
1da177e4
LT
1843 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1844 break;
b518fa8e 1845 msleep_interruptible(1);
1da177e4
LT
1846 }
1847
9cb427b6
FR
1848 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1849 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1850 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1851 }
1852
bcf0bf90
FR
1853 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1854 pci_write_config_word(pdev, 0x68, 0x00);
1855 pci_write_config_word(pdev, 0x69, 0x08);
1856 }
1857
1858 /* Undocumented stuff. */
1859 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1860 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1861 if ((RTL_R8(Config2) & 0x07) & 0x01)
1862 RTL_W32(0x7c, 0x0007ffff);
1863
1864 RTL_W32(0x7c, 0x0007ff00);
1865
1866 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1867 cmd = cmd & 0xef;
1868 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1869 }
1870
1871 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1872 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1873 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1874 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1875 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1876 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1877
1da177e4
LT
1878 RTL_W8(EarlyTxThres, EarlyTxThld);
1879
126fa4b9
FR
1880 /* Low hurts. Let's disable the filtering. */
1881 RTL_W16(RxMaxSize, 16383);
1da177e4 1882
9cb427b6
FR
1883 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1884 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1885 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1886 (tp->mac_version == RTL_GIGA_MAC_VER_04))
9cb427b6 1887 rtl8169_set_rx_tx_config_registers(tp);
1da177e4 1888
9cb427b6
FR
1889 cmd = RTL_R16(CPlusCmd);
1890 RTL_W16(CPlusCmd, cmd);
1da177e4 1891
9cb427b6 1892 tp->cp_cmd |= cmd | PCIMulRW;
1da177e4 1893
bcf0bf90
FR
1894 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1895 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1896 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1897 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1898 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1899 }
1900
bcf0bf90
FR
1901 RTL_W16(CPlusCmd, tp->cp_cmd);
1902
1da177e4
LT
1903 /*
1904 * Undocumented corner. Supposedly:
1905 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1906 */
1907 RTL_W16(IntrMitigate, 0x0000);
1908
b39fe41f
FR
1909 /*
1910 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1911 * register to be written before TxDescAddrLow to work.
1912 * Switching from MMIO to I/O access fixes the issue as well.
1913 */
1da177e4 1914 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
b39fe41f 1915 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1da177e4 1916 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
b39fe41f 1917 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
9cb427b6
FR
1918
1919 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1920 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1921 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1922 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1923 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1924 rtl8169_set_rx_tx_config_registers(tp);
1925 }
1926
1da177e4 1927 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1928
1929 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1930 RTL_R8(IntrMask);
1da177e4
LT
1931
1932 RTL_W32(RxMissed, 0);
1933
1934 rtl8169_set_rx_mode(dev);
1935
1936 /* no early-rx interrupts */
1937 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1938
1939 /* Enable all known interrupts by setting the interrupt mask. */
1940 RTL_W16(IntrMask, rtl8169_intr_mask);
1941
1942 netif_start_queue(dev);
1943}
1944
1945static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1946{
1947 struct rtl8169_private *tp = netdev_priv(dev);
1948 int ret = 0;
1949
1950 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
1951 return -EINVAL;
1952
1953 dev->mtu = new_mtu;
1954
1955 if (!netif_running(dev))
1956 goto out;
1957
1958 rtl8169_down(dev);
1959
1960 rtl8169_set_rxbufsize(tp, dev);
1961
1962 ret = rtl8169_init_ring(dev);
1963 if (ret < 0)
1964 goto out;
1965
1966 netif_poll_enable(dev);
1967
1968 rtl8169_hw_start(dev);
1969
1970 rtl8169_request_timer(dev);
1971
1972out:
1973 return ret;
1974}
1975
1976static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
1977{
1978 desc->addr = 0x0badbadbadbadbadull;
1979 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
1980}
1981
1982static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
1983 struct sk_buff **sk_buff, struct RxDesc *desc)
1984{
1985 struct pci_dev *pdev = tp->pci_dev;
1986
1987 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
1988 PCI_DMA_FROMDEVICE);
1989 dev_kfree_skb(*sk_buff);
1990 *sk_buff = NULL;
1991 rtl8169_make_unusable_by_asic(desc);
1992}
1993
1994static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
1995{
1996 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
1997
1998 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
1999}
2000
2001static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2002 u32 rx_buf_sz)
2003{
2004 desc->addr = cpu_to_le64(mapping);
2005 wmb();
2006 rtl8169_mark_to_asic(desc, rx_buf_sz);
2007}
2008
2009static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
bcf0bf90
FR
2010 struct RxDesc *desc, int rx_buf_sz,
2011 unsigned int align)
1da177e4
LT
2012{
2013 struct sk_buff *skb;
2014 dma_addr_t mapping;
2015 int ret = 0;
2016
bcf0bf90 2017 skb = dev_alloc_skb(rx_buf_sz + align);
1da177e4
LT
2018 if (!skb)
2019 goto err_out;
2020
dcb92f88 2021 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4
LT
2022 *sk_buff = skb;
2023
689be439 2024 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2025 PCI_DMA_FROMDEVICE);
2026
2027 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
2028
2029out:
2030 return ret;
2031
2032err_out:
2033 ret = -ENOMEM;
2034 rtl8169_make_unusable_by_asic(desc);
2035 goto out;
2036}
2037
2038static void rtl8169_rx_clear(struct rtl8169_private *tp)
2039{
2040 int i;
2041
2042 for (i = 0; i < NUM_RX_DESC; i++) {
2043 if (tp->Rx_skbuff[i]) {
2044 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2045 tp->RxDescArray + i);
2046 }
2047 }
2048}
2049
2050static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2051 u32 start, u32 end)
2052{
2053 u32 cur;
5b0384f4 2054
1da177e4
LT
2055 for (cur = start; end - cur > 0; cur++) {
2056 int ret, i = cur % NUM_RX_DESC;
2057
2058 if (tp->Rx_skbuff[i])
2059 continue;
bcf0bf90 2060
1da177e4 2061 ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
bcf0bf90 2062 tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
1da177e4
LT
2063 if (ret < 0)
2064 break;
2065 }
2066 return cur - start;
2067}
2068
2069static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2070{
2071 desc->opts1 |= cpu_to_le32(RingEnd);
2072}
2073
2074static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2075{
2076 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2077}
2078
2079static int rtl8169_init_ring(struct net_device *dev)
2080{
2081 struct rtl8169_private *tp = netdev_priv(dev);
2082
2083 rtl8169_init_ring_indexes(tp);
2084
2085 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2086 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2087
2088 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2089 goto err_out;
2090
2091 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2092
2093 return 0;
2094
2095err_out:
2096 rtl8169_rx_clear(tp);
2097 return -ENOMEM;
2098}
2099
2100static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2101 struct TxDesc *desc)
2102{
2103 unsigned int len = tx_skb->len;
2104
2105 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2106 desc->opts1 = 0x00;
2107 desc->opts2 = 0x00;
2108 desc->addr = 0x00;
2109 tx_skb->len = 0;
2110}
2111
2112static void rtl8169_tx_clear(struct rtl8169_private *tp)
2113{
2114 unsigned int i;
2115
2116 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2117 unsigned int entry = i % NUM_TX_DESC;
2118 struct ring_info *tx_skb = tp->tx_skb + entry;
2119 unsigned int len = tx_skb->len;
2120
2121 if (len) {
2122 struct sk_buff *skb = tx_skb->skb;
2123
2124 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2125 tp->TxDescArray + entry);
2126 if (skb) {
2127 dev_kfree_skb(skb);
2128 tx_skb->skb = NULL;
2129 }
2130 tp->stats.tx_dropped++;
2131 }
2132 }
2133 tp->cur_tx = tp->dirty_tx = 0;
2134}
2135
c4028958 2136static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2137{
2138 struct rtl8169_private *tp = netdev_priv(dev);
2139
c4028958 2140 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2141 schedule_delayed_work(&tp->task, 4);
2142}
2143
2144static void rtl8169_wait_for_quiescence(struct net_device *dev)
2145{
2146 struct rtl8169_private *tp = netdev_priv(dev);
2147 void __iomem *ioaddr = tp->mmio_addr;
2148
2149 synchronize_irq(dev->irq);
2150
2151 /* Wait for any pending NAPI task to complete */
2152 netif_poll_disable(dev);
2153
2154 rtl8169_irq_mask_and_ack(ioaddr);
2155
2156 netif_poll_enable(dev);
2157}
2158
c4028958 2159static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2160{
c4028958
DH
2161 struct rtl8169_private *tp =
2162 container_of(work, struct rtl8169_private, task.work);
2163 struct net_device *dev = tp->dev;
1da177e4
LT
2164 int ret;
2165
eb2a021c
FR
2166 rtnl_lock();
2167
2168 if (!netif_running(dev))
2169 goto out_unlock;
2170
2171 rtl8169_wait_for_quiescence(dev);
2172 rtl8169_close(dev);
1da177e4
LT
2173
2174 ret = rtl8169_open(dev);
2175 if (unlikely(ret < 0)) {
2176 if (net_ratelimit()) {
b57b7e5a
SH
2177 struct rtl8169_private *tp = netdev_priv(dev);
2178
2179 if (netif_msg_drv(tp)) {
2180 printk(PFX KERN_ERR
2181 "%s: reinit failure (status = %d)."
2182 " Rescheduling.\n", dev->name, ret);
2183 }
1da177e4
LT
2184 }
2185 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2186 }
eb2a021c
FR
2187
2188out_unlock:
2189 rtnl_unlock();
1da177e4
LT
2190}
2191
c4028958 2192static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2193{
c4028958
DH
2194 struct rtl8169_private *tp =
2195 container_of(work, struct rtl8169_private, task.work);
2196 struct net_device *dev = tp->dev;
1da177e4 2197
eb2a021c
FR
2198 rtnl_lock();
2199
1da177e4 2200 if (!netif_running(dev))
eb2a021c 2201 goto out_unlock;
1da177e4
LT
2202
2203 rtl8169_wait_for_quiescence(dev);
2204
2205 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2206 rtl8169_tx_clear(tp);
2207
2208 if (tp->dirty_rx == tp->cur_rx) {
2209 rtl8169_init_ring_indexes(tp);
2210 rtl8169_hw_start(dev);
2211 netif_wake_queue(dev);
2212 } else {
2213 if (net_ratelimit()) {
b57b7e5a
SH
2214 struct rtl8169_private *tp = netdev_priv(dev);
2215
2216 if (netif_msg_intr(tp)) {
2217 printk(PFX KERN_EMERG
2218 "%s: Rx buffers shortage\n", dev->name);
2219 }
1da177e4
LT
2220 }
2221 rtl8169_schedule_work(dev, rtl8169_reset_task);
2222 }
eb2a021c
FR
2223
2224out_unlock:
2225 rtnl_unlock();
1da177e4
LT
2226}
2227
2228static void rtl8169_tx_timeout(struct net_device *dev)
2229{
2230 struct rtl8169_private *tp = netdev_priv(dev);
2231
2232 rtl8169_hw_reset(tp->mmio_addr);
2233
2234 /* Let's wait a bit while any (async) irq lands on */
2235 rtl8169_schedule_work(dev, rtl8169_reset_task);
2236}
2237
2238static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2239 u32 opts1)
2240{
2241 struct skb_shared_info *info = skb_shinfo(skb);
2242 unsigned int cur_frag, entry;
2243 struct TxDesc *txd;
2244
2245 entry = tp->cur_tx;
2246 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2247 skb_frag_t *frag = info->frags + cur_frag;
2248 dma_addr_t mapping;
2249 u32 status, len;
2250 void *addr;
2251
2252 entry = (entry + 1) % NUM_TX_DESC;
2253
2254 txd = tp->TxDescArray + entry;
2255 len = frag->size;
2256 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2257 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2258
2259 /* anti gcc 2.95.3 bugware (sic) */
2260 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2261
2262 txd->opts1 = cpu_to_le32(status);
2263 txd->addr = cpu_to_le64(mapping);
2264
2265 tp->tx_skb[entry].len = len;
2266 }
2267
2268 if (cur_frag) {
2269 tp->tx_skb[entry].skb = skb;
2270 txd->opts1 |= cpu_to_le32(LastFrag);
2271 }
2272
2273 return cur_frag;
2274}
2275
2276static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2277{
2278 if (dev->features & NETIF_F_TSO) {
7967168c 2279 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2280
2281 if (mss)
2282 return LargeSend | ((mss & MSSMask) << MSSShift);
2283 }
84fa7933 2284 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
2285 const struct iphdr *ip = skb->nh.iph;
2286
2287 if (ip->protocol == IPPROTO_TCP)
2288 return IPCS | TCPCS;
2289 else if (ip->protocol == IPPROTO_UDP)
2290 return IPCS | UDPCS;
2291 WARN_ON(1); /* we need a WARN() */
2292 }
2293 return 0;
2294}
2295
2296static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2297{
2298 struct rtl8169_private *tp = netdev_priv(dev);
2299 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2300 struct TxDesc *txd = tp->TxDescArray + entry;
2301 void __iomem *ioaddr = tp->mmio_addr;
2302 dma_addr_t mapping;
2303 u32 status, len;
2304 u32 opts1;
188f4af0 2305 int ret = NETDEV_TX_OK;
5b0384f4 2306
1da177e4 2307 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2308 if (netif_msg_drv(tp)) {
2309 printk(KERN_ERR
2310 "%s: BUG! Tx Ring full when queue awake!\n",
2311 dev->name);
2312 }
1da177e4
LT
2313 goto err_stop;
2314 }
2315
2316 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2317 goto err_stop;
2318
2319 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2320
2321 frags = rtl8169_xmit_frags(tp, skb, opts1);
2322 if (frags) {
2323 len = skb_headlen(skb);
2324 opts1 |= FirstFrag;
2325 } else {
2326 len = skb->len;
2327
2328 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2329 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2330 goto err_update_stats;
2331 len = ETH_ZLEN;
2332 }
2333
2334 opts1 |= FirstFrag | LastFrag;
2335 tp->tx_skb[entry].skb = skb;
2336 }
2337
2338 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2339
2340 tp->tx_skb[entry].len = len;
2341 txd->addr = cpu_to_le64(mapping);
2342 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2343
2344 wmb();
2345
2346 /* anti gcc 2.95.3 bugware (sic) */
2347 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2348 txd->opts1 = cpu_to_le32(status);
2349
2350 dev->trans_start = jiffies;
2351
2352 tp->cur_tx += frags + 1;
2353
2354 smp_wmb();
2355
2356 RTL_W8(TxPoll, 0x40); /* set polling bit */
2357
2358 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2359 netif_stop_queue(dev);
2360 smp_rmb();
2361 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2362 netif_wake_queue(dev);
2363 }
2364
2365out:
2366 return ret;
2367
2368err_stop:
2369 netif_stop_queue(dev);
188f4af0 2370 ret = NETDEV_TX_BUSY;
1da177e4
LT
2371err_update_stats:
2372 tp->stats.tx_dropped++;
2373 goto out;
2374}
2375
2376static void rtl8169_pcierr_interrupt(struct net_device *dev)
2377{
2378 struct rtl8169_private *tp = netdev_priv(dev);
2379 struct pci_dev *pdev = tp->pci_dev;
2380 void __iomem *ioaddr = tp->mmio_addr;
2381 u16 pci_status, pci_cmd;
2382
2383 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2384 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2385
b57b7e5a
SH
2386 if (netif_msg_intr(tp)) {
2387 printk(KERN_ERR
2388 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2389 dev->name, pci_cmd, pci_status);
2390 }
1da177e4
LT
2391
2392 /*
2393 * The recovery sequence below admits a very elaborated explanation:
2394 * - it seems to work;
d03902b8
FR
2395 * - I did not see what else could be done;
2396 * - it makes iop3xx happy.
1da177e4
LT
2397 *
2398 * Feel free to adjust to your needs.
2399 */
a27993f3 2400 if (pdev->broken_parity_status)
d03902b8
FR
2401 pci_cmd &= ~PCI_COMMAND_PARITY;
2402 else
2403 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2404
2405 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2406
2407 pci_write_config_word(pdev, PCI_STATUS,
2408 pci_status & (PCI_STATUS_DETECTED_PARITY |
2409 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2410 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2411
2412 /* The infamous DAC f*ckup only happens at boot time */
2413 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2414 if (netif_msg_intr(tp))
2415 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2416 tp->cp_cmd &= ~PCIDAC;
2417 RTL_W16(CPlusCmd, tp->cp_cmd);
2418 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2419 }
2420
2421 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2422
2423 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2424}
2425
2426static void
2427rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2428 void __iomem *ioaddr)
2429{
2430 unsigned int dirty_tx, tx_left;
2431
2432 assert(dev != NULL);
2433 assert(tp != NULL);
2434 assert(ioaddr != NULL);
2435
2436 dirty_tx = tp->dirty_tx;
2437 smp_rmb();
2438 tx_left = tp->cur_tx - dirty_tx;
2439
2440 while (tx_left > 0) {
2441 unsigned int entry = dirty_tx % NUM_TX_DESC;
2442 struct ring_info *tx_skb = tp->tx_skb + entry;
2443 u32 len = tx_skb->len;
2444 u32 status;
2445
2446 rmb();
2447 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2448 if (status & DescOwn)
2449 break;
2450
2451 tp->stats.tx_bytes += len;
2452 tp->stats.tx_packets++;
2453
2454 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2455
2456 if (status & LastFrag) {
2457 dev_kfree_skb_irq(tx_skb->skb);
2458 tx_skb->skb = NULL;
2459 }
2460 dirty_tx++;
2461 tx_left--;
2462 }
2463
2464 if (tp->dirty_tx != dirty_tx) {
2465 tp->dirty_tx = dirty_tx;
2466 smp_wmb();
2467 if (netif_queue_stopped(dev) &&
2468 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2469 netif_wake_queue(dev);
2470 }
2471 }
2472}
2473
126fa4b9
FR
2474static inline int rtl8169_fragmented_frame(u32 status)
2475{
2476 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2477}
2478
1da177e4
LT
2479static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2480{
2481 u32 opts1 = le32_to_cpu(desc->opts1);
2482 u32 status = opts1 & RxProtoMask;
2483
2484 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2485 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2486 ((status == RxProtoIP) && !(opts1 & IPFail)))
2487 skb->ip_summed = CHECKSUM_UNNECESSARY;
2488 else
2489 skb->ip_summed = CHECKSUM_NONE;
2490}
2491
2492static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
bcf0bf90
FR
2493 struct RxDesc *desc, int rx_buf_sz,
2494 unsigned int align)
1da177e4
LT
2495{
2496 int ret = -1;
2497
2498 if (pkt_size < rx_copybreak) {
2499 struct sk_buff *skb;
2500
bcf0bf90 2501 skb = dev_alloc_skb(pkt_size + align);
1da177e4 2502 if (skb) {
dcb92f88 2503 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
689be439 2504 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
1da177e4
LT
2505 *sk_buff = skb;
2506 rtl8169_mark_to_asic(desc, rx_buf_sz);
2507 ret = 0;
2508 }
2509 }
2510 return ret;
2511}
2512
2513static int
2514rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2515 void __iomem *ioaddr)
2516{
2517 unsigned int cur_rx, rx_left;
2518 unsigned int delta, count;
2519
2520 assert(dev != NULL);
2521 assert(tp != NULL);
2522 assert(ioaddr != NULL);
2523
2524 cur_rx = tp->cur_rx;
2525 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2526 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2527
4dcb7d33 2528 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2529 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2530 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2531 u32 status;
2532
2533 rmb();
126fa4b9 2534 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2535
2536 if (status & DescOwn)
2537 break;
4dcb7d33 2538 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2539 if (netif_msg_rx_err(tp)) {
2540 printk(KERN_INFO
2541 "%s: Rx ERROR. status = %08x\n",
2542 dev->name, status);
2543 }
1da177e4
LT
2544 tp->stats.rx_errors++;
2545 if (status & (RxRWT | RxRUNT))
2546 tp->stats.rx_length_errors++;
2547 if (status & RxCRC)
2548 tp->stats.rx_crc_errors++;
9dccf611
FR
2549 if (status & RxFOVF) {
2550 rtl8169_schedule_work(dev, rtl8169_reset_task);
2551 tp->stats.rx_fifo_errors++;
2552 }
126fa4b9 2553 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2554 } else {
1da177e4
LT
2555 struct sk_buff *skb = tp->Rx_skbuff[entry];
2556 int pkt_size = (status & 0x00001FFF) - 4;
2557 void (*pci_action)(struct pci_dev *, dma_addr_t,
2558 size_t, int) = pci_dma_sync_single_for_device;
2559
126fa4b9
FR
2560 /*
2561 * The driver does not support incoming fragmented
2562 * frames. They are seen as a symptom of over-mtu
2563 * sized frames.
2564 */
2565 if (unlikely(rtl8169_fragmented_frame(status))) {
2566 tp->stats.rx_dropped++;
2567 tp->stats.rx_length_errors++;
2568 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2569 continue;
126fa4b9
FR
2570 }
2571
1da177e4 2572 rtl8169_rx_csum(skb, desc);
bcf0bf90 2573
1da177e4
LT
2574 pci_dma_sync_single_for_cpu(tp->pci_dev,
2575 le64_to_cpu(desc->addr), tp->rx_buf_sz,
2576 PCI_DMA_FROMDEVICE);
2577
2578 if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
bcf0bf90 2579 tp->rx_buf_sz, tp->align)) {
1da177e4
LT
2580 pci_action = pci_unmap_single;
2581 tp->Rx_skbuff[entry] = NULL;
2582 }
2583
2584 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2585 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2586
2587 skb->dev = dev;
2588 skb_put(skb, pkt_size);
2589 skb->protocol = eth_type_trans(skb, dev);
2590
2591 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2592 rtl8169_rx_skb(skb);
2593
2594 dev->last_rx = jiffies;
2595 tp->stats.rx_bytes += pkt_size;
2596 tp->stats.rx_packets++;
2597 }
1da177e4
LT
2598 }
2599
2600 count = cur_rx - tp->cur_rx;
2601 tp->cur_rx = cur_rx;
2602
2603 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2604 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2605 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2606 tp->dirty_rx += delta;
2607
2608 /*
2609 * FIXME: until there is periodic timer to try and refill the ring,
2610 * a temporary shortage may definitely kill the Rx process.
2611 * - disable the asic to try and avoid an overflow and kick it again
2612 * after refill ?
2613 * - how do others driver handle this condition (Uh oh...).
2614 */
b57b7e5a 2615 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2616 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2617
2618 return count;
2619}
2620
2621/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2622static irqreturn_t
7d12e780 2623rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2624{
2625 struct net_device *dev = (struct net_device *) dev_instance;
2626 struct rtl8169_private *tp = netdev_priv(dev);
2627 int boguscnt = max_interrupt_work;
2628 void __iomem *ioaddr = tp->mmio_addr;
2629 int status;
2630 int handled = 0;
2631
2632 do {
2633 status = RTL_R16(IntrStatus);
2634
2635 /* hotplug/major error/no more work/shared irq */
2636 if ((status == 0xFFFF) || !status)
2637 break;
2638
2639 handled = 1;
2640
2641 if (unlikely(!netif_running(dev))) {
2642 rtl8169_asic_down(ioaddr);
2643 goto out;
2644 }
2645
2646 status &= tp->intr_mask;
2647 RTL_W16(IntrStatus,
2648 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2649
2650 if (!(status & rtl8169_intr_mask))
2651 break;
2652
2653 if (unlikely(status & SYSErr)) {
2654 rtl8169_pcierr_interrupt(dev);
2655 break;
2656 }
2657
2658 if (status & LinkChg)
2659 rtl8169_check_link_status(dev, tp, ioaddr);
2660
2661#ifdef CONFIG_R8169_NAPI
2662 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2663 tp->intr_mask = ~rtl8169_napi_event;
2664
2665 if (likely(netif_rx_schedule_prep(dev)))
2666 __netif_rx_schedule(dev);
b57b7e5a 2667 else if (netif_msg_intr(tp)) {
1da177e4 2668 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2669 dev->name, status);
1da177e4
LT
2670 }
2671 break;
2672#else
2673 /* Rx interrupt */
2674 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2675 rtl8169_rx_interrupt(dev, tp, ioaddr);
2676 }
2677 /* Tx interrupt */
2678 if (status & (TxOK | TxErr))
2679 rtl8169_tx_interrupt(dev, tp, ioaddr);
2680#endif
2681
2682 boguscnt--;
2683 } while (boguscnt > 0);
2684
2685 if (boguscnt <= 0) {
7c8b2eb4 2686 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2687 printk(KERN_WARNING
2688 "%s: Too much work at interrupt!\n", dev->name);
2689 }
1da177e4
LT
2690 /* Clear all interrupt sources. */
2691 RTL_W16(IntrStatus, 0xffff);
2692 }
2693out:
2694 return IRQ_RETVAL(handled);
2695}
2696
2697#ifdef CONFIG_R8169_NAPI
2698static int rtl8169_poll(struct net_device *dev, int *budget)
2699{
2700 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2701 struct rtl8169_private *tp = netdev_priv(dev);
2702 void __iomem *ioaddr = tp->mmio_addr;
2703
2704 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2705 rtl8169_tx_interrupt(dev, tp, ioaddr);
2706
2707 *budget -= work_done;
2708 dev->quota -= work_done;
2709
2710 if (work_done < work_to_do) {
2711 netif_rx_complete(dev);
2712 tp->intr_mask = 0xffff;
2713 /*
2714 * 20040426: the barrier is not strictly required but the
2715 * behavior of the irq handler could be less predictable
2716 * without it. Btw, the lack of flush for the posted pci
2717 * write is safe - FR
2718 */
2719 smp_wmb();
2720 RTL_W16(IntrMask, rtl8169_intr_mask);
2721 }
2722
2723 return (work_done >= work_to_do);
2724}
2725#endif
2726
2727static void rtl8169_down(struct net_device *dev)
2728{
2729 struct rtl8169_private *tp = netdev_priv(dev);
2730 void __iomem *ioaddr = tp->mmio_addr;
2731 unsigned int poll_locked = 0;
733b736c 2732 unsigned int intrmask;
1da177e4
LT
2733
2734 rtl8169_delete_timer(dev);
2735
2736 netif_stop_queue(dev);
2737
1da177e4
LT
2738core_down:
2739 spin_lock_irq(&tp->lock);
2740
2741 rtl8169_asic_down(ioaddr);
2742
2743 /* Update the error counts. */
2744 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2745 RTL_W32(RxMissed, 0);
2746
2747 spin_unlock_irq(&tp->lock);
2748
2749 synchronize_irq(dev->irq);
2750
2751 if (!poll_locked) {
2752 netif_poll_disable(dev);
2753 poll_locked++;
2754 }
2755
2756 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2757 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2758
2759 /*
2760 * And now for the 50k$ question: are IRQ disabled or not ?
2761 *
2762 * Two paths lead here:
2763 * 1) dev->close
2764 * -> netif_running() is available to sync the current code and the
2765 * IRQ handler. See rtl8169_interrupt for details.
2766 * 2) dev->change_mtu
2767 * -> rtl8169_poll can not be issued again and re-enable the
2768 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2769 *
2770 * No loop if hotpluged or major error (0xffff).
1da177e4 2771 */
733b736c
AP
2772 intrmask = RTL_R16(IntrMask);
2773 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2774 goto core_down;
2775
2776 rtl8169_tx_clear(tp);
2777
2778 rtl8169_rx_clear(tp);
2779}
2780
2781static int rtl8169_close(struct net_device *dev)
2782{
2783 struct rtl8169_private *tp = netdev_priv(dev);
2784 struct pci_dev *pdev = tp->pci_dev;
2785
2786 rtl8169_down(dev);
2787
2788 free_irq(dev->irq, dev);
2789
2790 netif_poll_enable(dev);
2791
2792 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2793 tp->RxPhyAddr);
2794 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2795 tp->TxPhyAddr);
2796 tp->TxDescArray = NULL;
2797 tp->RxDescArray = NULL;
2798
2799 return 0;
2800}
2801
2802static void
2803rtl8169_set_rx_mode(struct net_device *dev)
2804{
2805 struct rtl8169_private *tp = netdev_priv(dev);
2806 void __iomem *ioaddr = tp->mmio_addr;
2807 unsigned long flags;
2808 u32 mc_filter[2]; /* Multicast hash filter */
2809 int i, rx_mode;
2810 u32 tmp = 0;
2811
2812 if (dev->flags & IFF_PROMISC) {
2813 /* Unconditionally log net taps. */
b57b7e5a
SH
2814 if (netif_msg_link(tp)) {
2815 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2816 dev->name);
2817 }
1da177e4
LT
2818 rx_mode =
2819 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2820 AcceptAllPhys;
2821 mc_filter[1] = mc_filter[0] = 0xffffffff;
2822 } else if ((dev->mc_count > multicast_filter_limit)
2823 || (dev->flags & IFF_ALLMULTI)) {
2824 /* Too many to filter perfectly -- accept all multicasts. */
2825 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2826 mc_filter[1] = mc_filter[0] = 0xffffffff;
2827 } else {
2828 struct dev_mc_list *mclist;
2829 rx_mode = AcceptBroadcast | AcceptMyPhys;
2830 mc_filter[1] = mc_filter[0] = 0;
2831 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2832 i++, mclist = mclist->next) {
2833 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2834 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2835 rx_mode |= AcceptMulticast;
2836 }
2837 }
2838
2839 spin_lock_irqsave(&tp->lock, flags);
2840
2841 tmp = rtl8169_rx_config | rx_mode |
2842 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2843
bcf0bf90
FR
2844 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2845 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2846 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2847 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2848 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2849 mc_filter[0] = 0xffffffff;
2850 mc_filter[1] = 0xffffffff;
2851 }
2852
1da177e4
LT
2853 RTL_W32(RxConfig, tmp);
2854 RTL_W32(MAR0 + 0, mc_filter[0]);
2855 RTL_W32(MAR0 + 4, mc_filter[1]);
2856
2857 spin_unlock_irqrestore(&tp->lock, flags);
2858}
2859
2860/**
2861 * rtl8169_get_stats - Get rtl8169 read/write statistics
2862 * @dev: The Ethernet Device to get statistics for
2863 *
2864 * Get TX/RX statistics for rtl8169
2865 */
2866static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2867{
2868 struct rtl8169_private *tp = netdev_priv(dev);
2869 void __iomem *ioaddr = tp->mmio_addr;
2870 unsigned long flags;
2871
2872 if (netif_running(dev)) {
2873 spin_lock_irqsave(&tp->lock, flags);
2874 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2875 RTL_W32(RxMissed, 0);
2876 spin_unlock_irqrestore(&tp->lock, flags);
2877 }
5b0384f4 2878
1da177e4
LT
2879 return &tp->stats;
2880}
2881
5d06a99f
FR
2882#ifdef CONFIG_PM
2883
2884static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2885{
2886 struct net_device *dev = pci_get_drvdata(pdev);
2887 struct rtl8169_private *tp = netdev_priv(dev);
2888 void __iomem *ioaddr = tp->mmio_addr;
2889
2890 if (!netif_running(dev))
2891 goto out;
2892
2893 netif_device_detach(dev);
2894 netif_stop_queue(dev);
2895
2896 spin_lock_irq(&tp->lock);
2897
2898 rtl8169_asic_down(ioaddr);
2899
2900 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2901 RTL_W32(RxMissed, 0);
2902
2903 spin_unlock_irq(&tp->lock);
2904
2905 pci_save_state(pdev);
61a4dcc2 2906 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f
FR
2907 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2908out:
2909 return 0;
2910}
2911
2912static int rtl8169_resume(struct pci_dev *pdev)
2913{
2914 struct net_device *dev = pci_get_drvdata(pdev);
2915
2916 if (!netif_running(dev))
2917 goto out;
2918
2919 netif_device_attach(dev);
2920
2921 pci_set_power_state(pdev, PCI_D0);
2922 pci_restore_state(pdev);
61a4dcc2 2923 pci_enable_wake(pdev, PCI_D0, 0);
5d06a99f
FR
2924
2925 rtl8169_schedule_work(dev, rtl8169_reset_task);
2926out:
2927 return 0;
2928}
2929
2930#endif /* CONFIG_PM */
2931
1da177e4
LT
2932static struct pci_driver rtl8169_pci_driver = {
2933 .name = MODULENAME,
2934 .id_table = rtl8169_pci_tbl,
2935 .probe = rtl8169_init_one,
2936 .remove = __devexit_p(rtl8169_remove_one),
2937#ifdef CONFIG_PM
2938 .suspend = rtl8169_suspend,
2939 .resume = rtl8169_resume,
2940#endif
2941};
2942
2943static int __init
2944rtl8169_init_module(void)
2945{
29917620 2946 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
2947}
2948
2949static void __exit
2950rtl8169_cleanup_module(void)
2951{
2952 pci_unregister_driver(&rtl8169_pci_driver);
2953}
2954
2955module_init(rtl8169_init_module);
2956module_exit(rtl8169_cleanup_module);