]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/r8169.c
IRQ: Typedef the IRQ handler function type
[net-next-2.6.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
69#include <asm/io.h>
70#include <asm/irq.h>
71
f7ccf420
SH
72#ifdef CONFIG_R8169_NAPI
73#define NAPI_SUFFIX "-NAPI"
74#else
75#define NAPI_SUFFIX ""
76#endif
77
78#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
79#define MODULENAME "r8169"
80#define PFX MODULENAME ": "
81
82#ifdef RTL8169_DEBUG
83#define assert(expr) \
5b0384f4
FR
84 if (!(expr)) { \
85 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
86 #expr,__FILE__,__FUNCTION__,__LINE__); \
87 }
1da177e4
LT
88#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
89#else
90#define assert(expr) do {} while (0)
91#define dprintk(fmt, args...) do {} while (0)
92#endif /* RTL8169_DEBUG */
93
b57b7e5a 94#define R8169_MSG_DEFAULT \
f0e837d9 95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 96
1da177e4
LT
97#define TX_BUFFS_AVAIL(tp) \
98 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
99
100#ifdef CONFIG_R8169_NAPI
101#define rtl8169_rx_skb netif_receive_skb
0b50f81d 102#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
103#define rtl8169_rx_quota(count, quota) min(count, quota)
104#else
105#define rtl8169_rx_skb netif_rx
0b50f81d 106#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
107#define rtl8169_rx_quota(count, quota) count
108#endif
109
110/* media options */
111#define MAX_UNITS 8
112static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
113static int num_media = 0;
114
115/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 116static const int max_interrupt_work = 20;
1da177e4
LT
117
118/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
119 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 120static const int multicast_filter_limit = 32;
1da177e4
LT
121
122/* MAC address length */
123#define MAC_ADDR_LEN 6
124
125#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
126#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
129#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
130#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
131#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
132
133#define R8169_REGS_SIZE 256
134#define R8169_NAPI_WEIGHT 64
135#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
136#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
137#define RX_BUF_SIZE 1536 /* Rx Buffer size */
138#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
139#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
140
141#define RTL8169_TX_TIMEOUT (6*HZ)
142#define RTL8169_PHY_TIMEOUT (10*HZ)
143
144/* write/read MMIO register */
145#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
146#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
147#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
148#define RTL_R8(reg) readb (ioaddr + (reg))
149#define RTL_R16(reg) readw (ioaddr + (reg))
150#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
151
152enum mac_version {
bcf0bf90
FR
153 RTL_GIGA_MAC_VER_01 = 0x00,
154 RTL_GIGA_MAC_VER_02 = 0x01,
155 RTL_GIGA_MAC_VER_03 = 0x02,
156 RTL_GIGA_MAC_VER_04 = 0x03,
157 RTL_GIGA_MAC_VER_05 = 0x04,
158 RTL_GIGA_MAC_VER_11 = 0x0b,
159 RTL_GIGA_MAC_VER_12 = 0x0c,
160 RTL_GIGA_MAC_VER_13 = 0x0d,
161 RTL_GIGA_MAC_VER_14 = 0x0e,
162 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
163};
164
165enum phy_version {
166 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
167 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
170 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
171 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
172};
173
1da177e4
LT
174#define _R(NAME,MAC,MASK) \
175 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
176
3c6bee1d 177static const struct {
1da177e4
LT
178 const char *name;
179 u8 mac_version;
180 u32 RxConfigMask; /* Clears the bits supported by this chip */
181} rtl_chip_info[] = {
bcf0bf90
FR
182 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
183 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
185 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
186 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
187 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
189 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
190 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
192};
193#undef _R
194
bcf0bf90
FR
195enum cfg_version {
196 RTL_CFG_0 = 0x00,
197 RTL_CFG_1,
198 RTL_CFG_2
199};
200
201static const struct {
202 unsigned int region;
203 unsigned int align;
204} rtl_cfg_info[] = {
205 [RTL_CFG_0] = { 1, NET_IP_ALIGN },
206 [RTL_CFG_1] = { 2, NET_IP_ALIGN },
207 [RTL_CFG_2] = { 2, 8 }
208};
209
1da177e4 210static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 211 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 212 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 213 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
214 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
215 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
216 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
217 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
218 { PCI_VENDOR_ID_LINKSYS, 0x1032,
219 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
220 {0,},
221};
222
223MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
224
225static int rx_copybreak = 200;
226static int use_dac;
b57b7e5a
SH
227static struct {
228 u32 msg_enable;
229} debug = { -1 };
1da177e4
LT
230
231enum RTL8169_registers {
232 MAC0 = 0, /* Ethernet hardware address. */
233 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
234 CounterAddrLow = 0x10,
235 CounterAddrHigh = 0x14,
1da177e4
LT
236 TxDescStartAddrLow = 0x20,
237 TxDescStartAddrHigh = 0x24,
238 TxHDescStartAddrLow = 0x28,
239 TxHDescStartAddrHigh = 0x2c,
240 FLASH = 0x30,
241 ERSR = 0x36,
242 ChipCmd = 0x37,
243 TxPoll = 0x38,
244 IntrMask = 0x3C,
245 IntrStatus = 0x3E,
246 TxConfig = 0x40,
247 RxConfig = 0x44,
248 RxMissed = 0x4C,
249 Cfg9346 = 0x50,
250 Config0 = 0x51,
251 Config1 = 0x52,
252 Config2 = 0x53,
253 Config3 = 0x54,
254 Config4 = 0x55,
255 Config5 = 0x56,
256 MultiIntr = 0x5C,
257 PHYAR = 0x60,
258 TBICSR = 0x64,
259 TBI_ANAR = 0x68,
260 TBI_LPAR = 0x6A,
261 PHYstatus = 0x6C,
262 RxMaxSize = 0xDA,
263 CPlusCmd = 0xE0,
264 IntrMitigate = 0xE2,
265 RxDescAddrLow = 0xE4,
266 RxDescAddrHigh = 0xE8,
267 EarlyTxThres = 0xEC,
268 FuncEvent = 0xF0,
269 FuncEventMask = 0xF4,
270 FuncPresetState = 0xF8,
271 FuncForceEvent = 0xFC,
272};
273
274enum RTL8169_register_content {
275 /* InterruptStatusBits */
276 SYSErr = 0x8000,
277 PCSTimeout = 0x4000,
278 SWInt = 0x0100,
279 TxDescUnavail = 0x80,
280 RxFIFOOver = 0x40,
281 LinkChg = 0x20,
282 RxOverflow = 0x10,
283 TxErr = 0x08,
284 TxOK = 0x04,
285 RxErr = 0x02,
286 RxOK = 0x01,
287
288 /* RxStatusDesc */
9dccf611
FR
289 RxFOVF = (1 << 23),
290 RxRWT = (1 << 22),
291 RxRES = (1 << 21),
292 RxRUNT = (1 << 20),
293 RxCRC = (1 << 19),
1da177e4
LT
294
295 /* ChipCmdBits */
296 CmdReset = 0x10,
297 CmdRxEnb = 0x08,
298 CmdTxEnb = 0x04,
299 RxBufEmpty = 0x01,
300
301 /* Cfg9346Bits */
302 Cfg9346_Lock = 0x00,
303 Cfg9346_Unlock = 0xC0,
304
305 /* rx_mode_bits */
306 AcceptErr = 0x20,
307 AcceptRunt = 0x10,
308 AcceptBroadcast = 0x08,
309 AcceptMulticast = 0x04,
310 AcceptMyPhys = 0x02,
311 AcceptAllPhys = 0x01,
312
313 /* RxConfigBits */
314 RxCfgFIFOShift = 13,
315 RxCfgDMAShift = 8,
316
317 /* TxConfigBits */
318 TxInterFrameGapShift = 24,
319 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
320
5d06a99f
FR
321 /* Config1 register p.24 */
322 PMEnable = (1 << 0), /* Power Management Enable */
323
61a4dcc2
FR
324 /* Config3 register p.25 */
325 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
326 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
327
5d06a99f 328 /* Config5 register p.27 */
61a4dcc2
FR
329 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
330 MWF = (1 << 5), /* Accept Multicast wakeup frame */
331 UWF = (1 << 4), /* Accept Unicast wakeup frame */
332 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
333 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
334
1da177e4
LT
335 /* TBICSR p.28 */
336 TBIReset = 0x80000000,
337 TBILoopback = 0x40000000,
338 TBINwEnable = 0x20000000,
339 TBINwRestart = 0x10000000,
340 TBILinkOk = 0x02000000,
341 TBINwComplete = 0x01000000,
342
343 /* CPlusCmd p.31 */
344 RxVlan = (1 << 6),
345 RxChkSum = (1 << 5),
346 PCIDAC = (1 << 4),
347 PCIMulRW = (1 << 3),
348
349 /* rtl8169_PHYstatus */
350 TBI_Enable = 0x80,
351 TxFlowCtrl = 0x40,
352 RxFlowCtrl = 0x20,
353 _1000bpsF = 0x10,
354 _100bps = 0x08,
355 _10bps = 0x04,
356 LinkStatus = 0x02,
357 FullDup = 0x01,
358
1da177e4
LT
359 /* _MediaType */
360 _10_Half = 0x01,
361 _10_Full = 0x02,
362 _100_Half = 0x04,
363 _100_Full = 0x08,
364 _1000_Full = 0x10,
365
366 /* _TBICSRBit */
367 TBILinkOK = 0x02000000,
d4a3a0fc
SH
368
369 /* DumpCounterCommand */
370 CounterDump = 0x8,
1da177e4
LT
371};
372
373enum _DescStatusBit {
374 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
375 RingEnd = (1 << 30), /* End of descriptor ring */
376 FirstFrag = (1 << 29), /* First segment of a packet */
377 LastFrag = (1 << 28), /* Final segment of a packet */
378
379 /* Tx private */
380 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
381 MSSShift = 16, /* MSS value position */
382 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
383 IPCS = (1 << 18), /* Calculate IP checksum */
384 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
385 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
386 TxVlanTag = (1 << 17), /* Add VLAN tag */
387
388 /* Rx private */
389 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
390 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
391
392#define RxProtoUDP (PID1)
393#define RxProtoTCP (PID0)
394#define RxProtoIP (PID1 | PID0)
395#define RxProtoMask RxProtoIP
396
397 IPFail = (1 << 16), /* IP checksum failed */
398 UDPFail = (1 << 15), /* UDP/IP checksum failed */
399 TCPFail = (1 << 14), /* TCP/IP checksum failed */
400 RxVlanTag = (1 << 16), /* VLAN tag available */
401};
402
403#define RsvdMask 0x3fffc000
404
405struct TxDesc {
406 u32 opts1;
407 u32 opts2;
408 u64 addr;
409};
410
411struct RxDesc {
412 u32 opts1;
413 u32 opts2;
414 u64 addr;
415};
416
417struct ring_info {
418 struct sk_buff *skb;
419 u32 len;
420 u8 __pad[sizeof(void *) - sizeof(u32)];
421};
422
423struct rtl8169_private {
424 void __iomem *mmio_addr; /* memory map physical address */
425 struct pci_dev *pci_dev; /* Index of PCI device */
426 struct net_device_stats stats; /* statistics of net device */
427 spinlock_t lock; /* spin lock flag */
b57b7e5a 428 u32 msg_enable;
1da177e4
LT
429 int chipset;
430 int mac_version;
431 int phy_version;
432 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
433 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
434 u32 dirty_rx;
435 u32 dirty_tx;
436 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
437 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
438 dma_addr_t TxPhyAddr;
439 dma_addr_t RxPhyAddr;
440 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
441 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 442 unsigned align;
1da177e4
LT
443 unsigned rx_buf_sz;
444 struct timer_list timer;
445 u16 cp_cmd;
446 u16 intr_mask;
447 int phy_auto_nego_reg;
448 int phy_1000_ctrl_reg;
449#ifdef CONFIG_R8169_VLAN
450 struct vlan_group *vlgrp;
451#endif
452 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
453 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
454 void (*phy_reset_enable)(void __iomem *);
455 unsigned int (*phy_reset_pending)(void __iomem *);
456 unsigned int (*link_ok)(void __iomem *);
457 struct work_struct task;
61a4dcc2 458 unsigned wol_enabled : 1;
1da177e4
LT
459};
460
979b6c13 461MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
462MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
463module_param_array(media, int, &num_media, 0);
df0a1bf6 464MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 465module_param(rx_copybreak, int, 0);
1b7efd58 466MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
467module_param(use_dac, int, 0);
468MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
469module_param_named(debug, debug.msg_enable, int, 0);
470MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
471MODULE_LICENSE("GPL");
472MODULE_VERSION(RTL8169_VERSION);
473
474static int rtl8169_open(struct net_device *dev);
475static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
476static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance,
477 struct pt_regs *regs);
478static int rtl8169_init_ring(struct net_device *dev);
479static void rtl8169_hw_start(struct net_device *dev);
480static int rtl8169_close(struct net_device *dev);
481static void rtl8169_set_rx_mode(struct net_device *dev);
482static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 483static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
484static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
485 void __iomem *);
4dcb7d33 486static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4
LT
487static void rtl8169_down(struct net_device *dev);
488
489#ifdef CONFIG_R8169_NAPI
490static int rtl8169_poll(struct net_device *dev, int *budget);
491#endif
492
493static const u16 rtl8169_intr_mask =
494 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
495static const u16 rtl8169_napi_event =
496 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
497static const unsigned int rtl8169_rx_config =
5b0384f4 498 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
499
500static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
501{
502 int i;
503
504 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 505
2371408c 506 for (i = 20; i > 0; i--) {
1da177e4 507 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 508 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 509 break;
2371408c 510 udelay(25);
1da177e4
LT
511 }
512}
513
514static int mdio_read(void __iomem *ioaddr, int RegAddr)
515{
516 int i, value = -1;
517
518 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 519
2371408c 520 for (i = 20; i > 0; i--) {
1da177e4
LT
521 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
522 if (RTL_R32(PHYAR) & 0x80000000) {
523 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
524 break;
525 }
2371408c 526 udelay(25);
1da177e4
LT
527 }
528 return value;
529}
530
531static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
532{
533 RTL_W16(IntrMask, 0x0000);
534
535 RTL_W16(IntrStatus, 0xffff);
536}
537
538static void rtl8169_asic_down(void __iomem *ioaddr)
539{
540 RTL_W8(ChipCmd, 0x00);
541 rtl8169_irq_mask_and_ack(ioaddr);
542 RTL_R16(CPlusCmd);
543}
544
545static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
546{
547 return RTL_R32(TBICSR) & TBIReset;
548}
549
550static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
551{
64e4bfb4 552 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
553}
554
555static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
556{
557 return RTL_R32(TBICSR) & TBILinkOk;
558}
559
560static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
561{
562 return RTL_R8(PHYstatus) & LinkStatus;
563}
564
565static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
566{
567 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
568}
569
570static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
571{
572 unsigned int val;
573
64e4bfb4
FR
574 val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
575 mdio_write(ioaddr, MII_BMCR, val);
1da177e4
LT
576}
577
578static void rtl8169_check_link_status(struct net_device *dev,
579 struct rtl8169_private *tp, void __iomem *ioaddr)
580{
581 unsigned long flags;
582
583 spin_lock_irqsave(&tp->lock, flags);
584 if (tp->link_ok(ioaddr)) {
585 netif_carrier_on(dev);
b57b7e5a
SH
586 if (netif_msg_ifup(tp))
587 printk(KERN_INFO PFX "%s: link up\n", dev->name);
588 } else {
589 if (netif_msg_ifdown(tp))
590 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 591 netif_carrier_off(dev);
b57b7e5a 592 }
1da177e4
LT
593 spin_unlock_irqrestore(&tp->lock, flags);
594}
595
596static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
597{
598 struct {
599 u16 speed;
600 u8 duplex;
601 u8 autoneg;
602 u8 media;
603 } link_settings[] = {
604 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
605 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
606 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
607 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
608 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
609 /* Make TBI happy */
610 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
611 }, *p;
612 unsigned char option;
5b0384f4 613
1da177e4
LT
614 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
615
b57b7e5a 616 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
617 printk(KERN_WARNING PFX "media option is deprecated.\n");
618
619 for (p = link_settings; p->media != 0xff; p++) {
620 if (p->media == option)
621 break;
622 }
623 *autoneg = p->autoneg;
624 *speed = p->speed;
625 *duplex = p->duplex;
626}
627
61a4dcc2
FR
628static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
629{
630 struct rtl8169_private *tp = netdev_priv(dev);
631 void __iomem *ioaddr = tp->mmio_addr;
632 u8 options;
633
634 wol->wolopts = 0;
635
636#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
637 wol->supported = WAKE_ANY;
638
639 spin_lock_irq(&tp->lock);
640
641 options = RTL_R8(Config1);
642 if (!(options & PMEnable))
643 goto out_unlock;
644
645 options = RTL_R8(Config3);
646 if (options & LinkUp)
647 wol->wolopts |= WAKE_PHY;
648 if (options & MagicPacket)
649 wol->wolopts |= WAKE_MAGIC;
650
651 options = RTL_R8(Config5);
652 if (options & UWF)
653 wol->wolopts |= WAKE_UCAST;
654 if (options & BWF)
5b0384f4 655 wol->wolopts |= WAKE_BCAST;
61a4dcc2 656 if (options & MWF)
5b0384f4 657 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
658
659out_unlock:
660 spin_unlock_irq(&tp->lock);
661}
662
663static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664{
665 struct rtl8169_private *tp = netdev_priv(dev);
666 void __iomem *ioaddr = tp->mmio_addr;
667 int i;
668 static struct {
669 u32 opt;
670 u16 reg;
671 u8 mask;
672 } cfg[] = {
673 { WAKE_ANY, Config1, PMEnable },
674 { WAKE_PHY, Config3, LinkUp },
675 { WAKE_MAGIC, Config3, MagicPacket },
676 { WAKE_UCAST, Config5, UWF },
677 { WAKE_BCAST, Config5, BWF },
678 { WAKE_MCAST, Config5, MWF },
679 { WAKE_ANY, Config5, LanWake }
680 };
681
682 spin_lock_irq(&tp->lock);
683
684 RTL_W8(Cfg9346, Cfg9346_Unlock);
685
686 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
687 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
688 if (wol->wolopts & cfg[i].opt)
689 options |= cfg[i].mask;
690 RTL_W8(cfg[i].reg, options);
691 }
692
693 RTL_W8(Cfg9346, Cfg9346_Lock);
694
695 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
696
697 spin_unlock_irq(&tp->lock);
698
699 return 0;
700}
701
1da177e4
LT
702static void rtl8169_get_drvinfo(struct net_device *dev,
703 struct ethtool_drvinfo *info)
704{
705 struct rtl8169_private *tp = netdev_priv(dev);
706
707 strcpy(info->driver, MODULENAME);
708 strcpy(info->version, RTL8169_VERSION);
709 strcpy(info->bus_info, pci_name(tp->pci_dev));
710}
711
712static int rtl8169_get_regs_len(struct net_device *dev)
713{
714 return R8169_REGS_SIZE;
715}
716
717static int rtl8169_set_speed_tbi(struct net_device *dev,
718 u8 autoneg, u16 speed, u8 duplex)
719{
720 struct rtl8169_private *tp = netdev_priv(dev);
721 void __iomem *ioaddr = tp->mmio_addr;
722 int ret = 0;
723 u32 reg;
724
725 reg = RTL_R32(TBICSR);
726 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
727 (duplex == DUPLEX_FULL)) {
728 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
729 } else if (autoneg == AUTONEG_ENABLE)
730 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
731 else {
b57b7e5a
SH
732 if (netif_msg_link(tp)) {
733 printk(KERN_WARNING "%s: "
734 "incorrect speed setting refused in TBI mode\n",
735 dev->name);
736 }
1da177e4
LT
737 ret = -EOPNOTSUPP;
738 }
739
740 return ret;
741}
742
743static int rtl8169_set_speed_xmii(struct net_device *dev,
744 u8 autoneg, u16 speed, u8 duplex)
745{
746 struct rtl8169_private *tp = netdev_priv(dev);
747 void __iomem *ioaddr = tp->mmio_addr;
748 int auto_nego, giga_ctrl;
749
64e4bfb4
FR
750 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
751 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
752 ADVERTISE_100HALF | ADVERTISE_100FULL);
753 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
754 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
755
756 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
757 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
758 ADVERTISE_100HALF | ADVERTISE_100FULL);
759 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
760 } else {
761 if (speed == SPEED_10)
64e4bfb4 762 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 763 else if (speed == SPEED_100)
64e4bfb4 764 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 765 else if (speed == SPEED_1000)
64e4bfb4 766 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
767
768 if (duplex == DUPLEX_HALF)
64e4bfb4 769 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
770
771 if (duplex == DUPLEX_FULL)
64e4bfb4 772 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
773
774 /* This tweak comes straight from Realtek's driver. */
775 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
776 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 777 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
778 }
779 }
780
781 /* The 8100e/8101e do Fast Ethernet only. */
782 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
783 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
784 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 785 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
786 netif_msg_link(tp)) {
787 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
788 dev->name);
789 }
64e4bfb4 790 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
791 }
792
623a1593
FR
793 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
794
1da177e4
LT
795 tp->phy_auto_nego_reg = auto_nego;
796 tp->phy_1000_ctrl_reg = giga_ctrl;
797
64e4bfb4
FR
798 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
799 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
800 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
801 return 0;
802}
803
804static int rtl8169_set_speed(struct net_device *dev,
805 u8 autoneg, u16 speed, u8 duplex)
806{
807 struct rtl8169_private *tp = netdev_priv(dev);
808 int ret;
809
810 ret = tp->set_speed(dev, autoneg, speed, duplex);
811
64e4bfb4 812 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
813 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
814
815 return ret;
816}
817
818static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
819{
820 struct rtl8169_private *tp = netdev_priv(dev);
821 unsigned long flags;
822 int ret;
823
824 spin_lock_irqsave(&tp->lock, flags);
825 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
826 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 827
1da177e4
LT
828 return ret;
829}
830
831static u32 rtl8169_get_rx_csum(struct net_device *dev)
832{
833 struct rtl8169_private *tp = netdev_priv(dev);
834
835 return tp->cp_cmd & RxChkSum;
836}
837
838static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
839{
840 struct rtl8169_private *tp = netdev_priv(dev);
841 void __iomem *ioaddr = tp->mmio_addr;
842 unsigned long flags;
843
844 spin_lock_irqsave(&tp->lock, flags);
845
846 if (data)
847 tp->cp_cmd |= RxChkSum;
848 else
849 tp->cp_cmd &= ~RxChkSum;
850
851 RTL_W16(CPlusCmd, tp->cp_cmd);
852 RTL_R16(CPlusCmd);
853
854 spin_unlock_irqrestore(&tp->lock, flags);
855
856 return 0;
857}
858
859#ifdef CONFIG_R8169_VLAN
860
861static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
862 struct sk_buff *skb)
863{
864 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
865 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
866}
867
868static void rtl8169_vlan_rx_register(struct net_device *dev,
869 struct vlan_group *grp)
870{
871 struct rtl8169_private *tp = netdev_priv(dev);
872 void __iomem *ioaddr = tp->mmio_addr;
873 unsigned long flags;
874
875 spin_lock_irqsave(&tp->lock, flags);
876 tp->vlgrp = grp;
877 if (tp->vlgrp)
878 tp->cp_cmd |= RxVlan;
879 else
880 tp->cp_cmd &= ~RxVlan;
881 RTL_W16(CPlusCmd, tp->cp_cmd);
882 RTL_R16(CPlusCmd);
883 spin_unlock_irqrestore(&tp->lock, flags);
884}
885
886static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
887{
888 struct rtl8169_private *tp = netdev_priv(dev);
889 unsigned long flags;
890
891 spin_lock_irqsave(&tp->lock, flags);
892 if (tp->vlgrp)
893 tp->vlgrp->vlan_devices[vid] = NULL;
894 spin_unlock_irqrestore(&tp->lock, flags);
895}
896
897static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
898 struct sk_buff *skb)
899{
900 u32 opts2 = le32_to_cpu(desc->opts2);
901 int ret;
902
903 if (tp->vlgrp && (opts2 & RxVlanTag)) {
904 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
905 swab16(opts2 & 0xffff));
906 ret = 0;
907 } else
908 ret = -1;
909 desc->opts2 = 0;
910 return ret;
911}
912
913#else /* !CONFIG_R8169_VLAN */
914
915static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
916 struct sk_buff *skb)
917{
918 return 0;
919}
920
921static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
922 struct sk_buff *skb)
923{
924 return -1;
925}
926
927#endif
928
929static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
930{
931 struct rtl8169_private *tp = netdev_priv(dev);
932 void __iomem *ioaddr = tp->mmio_addr;
933 u32 status;
934
935 cmd->supported =
936 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
937 cmd->port = PORT_FIBRE;
938 cmd->transceiver = XCVR_INTERNAL;
939
940 status = RTL_R32(TBICSR);
941 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
942 cmd->autoneg = !!(status & TBINwEnable);
943
944 cmd->speed = SPEED_1000;
945 cmd->duplex = DUPLEX_FULL; /* Always set */
946}
947
948static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
949{
950 struct rtl8169_private *tp = netdev_priv(dev);
951 void __iomem *ioaddr = tp->mmio_addr;
952 u8 status;
953
954 cmd->supported = SUPPORTED_10baseT_Half |
955 SUPPORTED_10baseT_Full |
956 SUPPORTED_100baseT_Half |
957 SUPPORTED_100baseT_Full |
958 SUPPORTED_1000baseT_Full |
959 SUPPORTED_Autoneg |
5b0384f4 960 SUPPORTED_TP;
1da177e4
LT
961
962 cmd->autoneg = 1;
963 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
964
64e4bfb4 965 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 966 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 967 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 968 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 969 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 970 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 971 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 972 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 973 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
974 cmd->advertising |= ADVERTISED_1000baseT_Full;
975
976 status = RTL_R8(PHYstatus);
977
978 if (status & _1000bpsF)
979 cmd->speed = SPEED_1000;
980 else if (status & _100bps)
981 cmd->speed = SPEED_100;
982 else if (status & _10bps)
983 cmd->speed = SPEED_10;
984
623a1593
FR
985 if (status & TxFlowCtrl)
986 cmd->advertising |= ADVERTISED_Asym_Pause;
987 if (status & RxFlowCtrl)
988 cmd->advertising |= ADVERTISED_Pause;
989
1da177e4
LT
990 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
991 DUPLEX_FULL : DUPLEX_HALF;
992}
993
994static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
995{
996 struct rtl8169_private *tp = netdev_priv(dev);
997 unsigned long flags;
998
999 spin_lock_irqsave(&tp->lock, flags);
1000
1001 tp->get_settings(dev, cmd);
1002
1003 spin_unlock_irqrestore(&tp->lock, flags);
1004 return 0;
1005}
1006
1007static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1008 void *p)
1009{
5b0384f4
FR
1010 struct rtl8169_private *tp = netdev_priv(dev);
1011 unsigned long flags;
1da177e4 1012
5b0384f4
FR
1013 if (regs->len > R8169_REGS_SIZE)
1014 regs->len = R8169_REGS_SIZE;
1da177e4 1015
5b0384f4
FR
1016 spin_lock_irqsave(&tp->lock, flags);
1017 memcpy_fromio(p, tp->mmio_addr, regs->len);
1018 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1019}
1020
b57b7e5a
SH
1021static u32 rtl8169_get_msglevel(struct net_device *dev)
1022{
1023 struct rtl8169_private *tp = netdev_priv(dev);
1024
1025 return tp->msg_enable;
1026}
1027
1028static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1029{
1030 struct rtl8169_private *tp = netdev_priv(dev);
1031
1032 tp->msg_enable = value;
1033}
1034
d4a3a0fc
SH
1035static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1036 "tx_packets",
1037 "rx_packets",
1038 "tx_errors",
1039 "rx_errors",
1040 "rx_missed",
1041 "align_errors",
1042 "tx_single_collisions",
1043 "tx_multi_collisions",
1044 "unicast",
1045 "broadcast",
1046 "multicast",
1047 "tx_aborted",
1048 "tx_underrun",
1049};
1050
1051struct rtl8169_counters {
1052 u64 tx_packets;
1053 u64 rx_packets;
1054 u64 tx_errors;
1055 u32 rx_errors;
1056 u16 rx_missed;
1057 u16 align_errors;
1058 u32 tx_one_collision;
1059 u32 tx_multi_collision;
1060 u64 rx_unicast;
1061 u64 rx_broadcast;
1062 u32 rx_multicast;
1063 u16 tx_aborted;
1064 u16 tx_underun;
1065};
1066
1067static int rtl8169_get_stats_count(struct net_device *dev)
1068{
1069 return ARRAY_SIZE(rtl8169_gstrings);
1070}
1071
1072static void rtl8169_get_ethtool_stats(struct net_device *dev,
1073 struct ethtool_stats *stats, u64 *data)
1074{
1075 struct rtl8169_private *tp = netdev_priv(dev);
1076 void __iomem *ioaddr = tp->mmio_addr;
1077 struct rtl8169_counters *counters;
1078 dma_addr_t paddr;
1079 u32 cmd;
1080
1081 ASSERT_RTNL();
1082
1083 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1084 if (!counters)
1085 return;
1086
1087 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1088 cmd = (u64)paddr & DMA_32BIT_MASK;
1089 RTL_W32(CounterAddrLow, cmd);
1090 RTL_W32(CounterAddrLow, cmd | CounterDump);
1091
1092 while (RTL_R32(CounterAddrLow) & CounterDump) {
1093 if (msleep_interruptible(1))
1094 break;
1095 }
1096
1097 RTL_W32(CounterAddrLow, 0);
1098 RTL_W32(CounterAddrHigh, 0);
1099
5b0384f4 1100 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1101 data[1] = le64_to_cpu(counters->rx_packets);
1102 data[2] = le64_to_cpu(counters->tx_errors);
1103 data[3] = le32_to_cpu(counters->rx_errors);
1104 data[4] = le16_to_cpu(counters->rx_missed);
1105 data[5] = le16_to_cpu(counters->align_errors);
1106 data[6] = le32_to_cpu(counters->tx_one_collision);
1107 data[7] = le32_to_cpu(counters->tx_multi_collision);
1108 data[8] = le64_to_cpu(counters->rx_unicast);
1109 data[9] = le64_to_cpu(counters->rx_broadcast);
1110 data[10] = le32_to_cpu(counters->rx_multicast);
1111 data[11] = le16_to_cpu(counters->tx_aborted);
1112 data[12] = le16_to_cpu(counters->tx_underun);
1113
1114 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1115}
1116
1117static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1118{
1119 switch(stringset) {
1120 case ETH_SS_STATS:
1121 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1122 break;
1123 }
1124}
1125
1126
7282d491 1127static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1128 .get_drvinfo = rtl8169_get_drvinfo,
1129 .get_regs_len = rtl8169_get_regs_len,
1130 .get_link = ethtool_op_get_link,
1131 .get_settings = rtl8169_get_settings,
1132 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1133 .get_msglevel = rtl8169_get_msglevel,
1134 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1135 .get_rx_csum = rtl8169_get_rx_csum,
1136 .set_rx_csum = rtl8169_set_rx_csum,
1137 .get_tx_csum = ethtool_op_get_tx_csum,
1138 .set_tx_csum = ethtool_op_set_tx_csum,
1139 .get_sg = ethtool_op_get_sg,
1140 .set_sg = ethtool_op_set_sg,
1141 .get_tso = ethtool_op_get_tso,
1142 .set_tso = ethtool_op_set_tso,
1143 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1144 .get_wol = rtl8169_get_wol,
1145 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1146 .get_strings = rtl8169_get_strings,
1147 .get_stats_count = rtl8169_get_stats_count,
1148 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1149 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1150};
1151
1152static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1153 int bitval)
1154{
1155 int val;
1156
1157 val = mdio_read(ioaddr, reg);
1158 val = (bitval == 1) ?
1159 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1160 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1161}
1162
1163static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1164{
1165 const struct {
1166 u32 mask;
1167 int mac_version;
1168 } mac_info[] = {
bcf0bf90
FR
1169 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1170 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1171 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1172 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1173 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1174 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1175 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1176 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1177 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1178 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1179 }, *p = mac_info;
1180 u32 reg;
1181
1182 reg = RTL_R32(TxConfig) & 0x7c800000;
1183 while ((reg & p->mask) != p->mask)
1184 p++;
1185 tp->mac_version = p->mac_version;
1186}
1187
1188static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1189{
bcf0bf90 1190 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1191}
1192
1193static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1194{
1195 const struct {
1196 u16 mask;
1197 u16 set;
1198 int phy_version;
1199 } phy_info[] = {
1200 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1201 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1202 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1203 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1204 }, *p = phy_info;
1205 u16 reg;
1206
64e4bfb4 1207 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1208 while ((reg & p->mask) != p->set)
1209 p++;
1210 tp->phy_version = p->phy_version;
1211}
1212
1213static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1214{
1215 struct {
1216 int version;
1217 char *msg;
1218 u32 reg;
1219 } phy_print[] = {
1220 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1221 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1222 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1223 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1224 { 0, NULL, 0x0000 }
1225 }, *p;
1226
1227 for (p = phy_print; p->msg; p++) {
1228 if (tp->phy_version == p->version) {
1229 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1230 return;
1231 }
1232 }
1233 dprintk("phy_version == Unknown\n");
1234}
1235
1236static void rtl8169_hw_phy_config(struct net_device *dev)
1237{
1238 struct rtl8169_private *tp = netdev_priv(dev);
1239 void __iomem *ioaddr = tp->mmio_addr;
1240 struct {
1241 u16 regs[5]; /* Beware of bit-sign propagation */
1242 } phy_magic[5] = { {
1243 { 0x0000, //w 4 15 12 0
1244 0x00a1, //w 3 15 0 00a1
1245 0x0008, //w 2 15 0 0008
1246 0x1020, //w 1 15 0 1020
1247 0x1000 } },{ //w 0 15 0 1000
1248 { 0x7000, //w 4 15 12 7
1249 0xff41, //w 3 15 0 ff41
1250 0xde60, //w 2 15 0 de60
1251 0x0140, //w 1 15 0 0140
1252 0x0077 } },{ //w 0 15 0 0077
1253 { 0xa000, //w 4 15 12 a
1254 0xdf01, //w 3 15 0 df01
1255 0xdf20, //w 2 15 0 df20
1256 0xff95, //w 1 15 0 ff95
1257 0xfa00 } },{ //w 0 15 0 fa00
1258 { 0xb000, //w 4 15 12 b
1259 0xff41, //w 3 15 0 ff41
1260 0xde20, //w 2 15 0 de20
1261 0x0140, //w 1 15 0 0140
1262 0x00bb } },{ //w 0 15 0 00bb
1263 { 0xf000, //w 4 15 12 f
1264 0xdf01, //w 3 15 0 df01
1265 0xdf20, //w 2 15 0 df20
1266 0xff95, //w 1 15 0 ff95
1267 0xbf00 } //w 0 15 0 bf00
1268 }
1269 }, *p = phy_magic;
1270 int i;
1271
1272 rtl8169_print_mac_version(tp);
1273 rtl8169_print_phy_version(tp);
1274
bcf0bf90 1275 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1276 return;
1277 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1278 return;
1279
1280 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1281 dprintk("Do final_reg2.cfg\n");
1282
1283 /* Shazam ! */
1284
bcf0bf90 1285 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1286 mdio_write(ioaddr, 31, 0x0001);
1287 mdio_write(ioaddr, 9, 0x273a);
1288 mdio_write(ioaddr, 14, 0x7bfb);
1289 mdio_write(ioaddr, 27, 0x841e);
1290
1291 mdio_write(ioaddr, 31, 0x0002);
1292 mdio_write(ioaddr, 1, 0x90d0);
1293 mdio_write(ioaddr, 31, 0x0000);
1294 return;
1295 }
1296
1297 /* phy config for RTL8169s mac_version C chip */
1298 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1299 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1300 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1301 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1302
1303 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1304 int val, pos = 4;
1305
1306 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1307 mdio_write(ioaddr, pos, val);
1308 while (--pos >= 0)
1309 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1310 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1311 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1312 }
1313 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1314}
1315
1316static void rtl8169_phy_timer(unsigned long __opaque)
1317{
1318 struct net_device *dev = (struct net_device *)__opaque;
1319 struct rtl8169_private *tp = netdev_priv(dev);
1320 struct timer_list *timer = &tp->timer;
1321 void __iomem *ioaddr = tp->mmio_addr;
1322 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1323
bcf0bf90 1324 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1325 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1326
64e4bfb4 1327 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1328 return;
1329
1330 spin_lock_irq(&tp->lock);
1331
1332 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1333 /*
1da177e4
LT
1334 * A busy loop could burn quite a few cycles on nowadays CPU.
1335 * Let's delay the execution of the timer for a few ticks.
1336 */
1337 timeout = HZ/10;
1338 goto out_mod_timer;
1339 }
1340
1341 if (tp->link_ok(ioaddr))
1342 goto out_unlock;
1343
b57b7e5a
SH
1344 if (netif_msg_link(tp))
1345 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1346
1347 tp->phy_reset_enable(ioaddr);
1348
1349out_mod_timer:
1350 mod_timer(timer, jiffies + timeout);
1351out_unlock:
1352 spin_unlock_irq(&tp->lock);
1353}
1354
1355static inline void rtl8169_delete_timer(struct net_device *dev)
1356{
1357 struct rtl8169_private *tp = netdev_priv(dev);
1358 struct timer_list *timer = &tp->timer;
1359
bcf0bf90 1360 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1361 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1362 return;
1363
1364 del_timer_sync(timer);
1365}
1366
1367static inline void rtl8169_request_timer(struct net_device *dev)
1368{
1369 struct rtl8169_private *tp = netdev_priv(dev);
1370 struct timer_list *timer = &tp->timer;
1371
bcf0bf90 1372 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1373 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1374 return;
1375
1376 init_timer(timer);
1377 timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
1378 timer->data = (unsigned long)(dev);
1379 timer->function = rtl8169_phy_timer;
1380 add_timer(timer);
1381}
1382
1383#ifdef CONFIG_NET_POLL_CONTROLLER
1384/*
1385 * Polling 'interrupt' - used by things like netconsole to send skbs
1386 * without having to re-enable interrupts. It's not called while
1387 * the interrupt routine is executing.
1388 */
1389static void rtl8169_netpoll(struct net_device *dev)
1390{
1391 struct rtl8169_private *tp = netdev_priv(dev);
1392 struct pci_dev *pdev = tp->pci_dev;
1393
1394 disable_irq(pdev->irq);
1395 rtl8169_interrupt(pdev->irq, dev, NULL);
1396 enable_irq(pdev->irq);
1397}
1398#endif
1399
a2b98a69
FR
1400static void __rtl8169_set_mac_addr(struct net_device *dev, void __iomem *ioaddr)
1401{
1402 unsigned int i, j;
1403
1404 RTL_W8(Cfg9346, Cfg9346_Unlock);
1405 for (i = 0; i < 2; i++) {
1406 __le32 l = 0;
1407
1408 for (j = 0; j < 4; j++) {
1409 l <<= 8;
1410 l |= dev->dev_addr[4*i + j];
1411 }
1412 RTL_W32(MAC0 + 4*i, cpu_to_be32(l));
1413 }
1414 RTL_W8(Cfg9346, Cfg9346_Lock);
1415}
1416
1417static int rtl8169_set_mac_addr(struct net_device *dev, void *p)
1418{
1419 struct rtl8169_private *tp = netdev_priv(dev);
1420 struct sockaddr *addr = p;
1421
1422 if (!is_valid_ether_addr(addr->sa_data))
1423 return -EINVAL;
1424
1425 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1426
1427 if (netif_running(dev)) {
1428 spin_lock_irq(&tp->lock);
1429 __rtl8169_set_mac_addr(dev, tp->mmio_addr);
1430 spin_unlock_irq(&tp->lock);
1431 }
1432 return 0;
1433}
1434
1da177e4
LT
1435static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1436 void __iomem *ioaddr)
1437{
1438 iounmap(ioaddr);
1439 pci_release_regions(pdev);
1440 pci_disable_device(pdev);
1441 free_netdev(dev);
1442}
1443
4ff96fa6
FR
1444static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1445{
1446 void __iomem *ioaddr = tp->mmio_addr;
1447 static int board_idx = -1;
1448 u8 autoneg, duplex;
1449 u16 speed;
1450
1451 board_idx++;
1452
1453 rtl8169_hw_phy_config(dev);
1454
1455 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1456 RTL_W8(0x82, 0x01);
1457
bcf0bf90 1458 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1459 dprintk("Set PCI Latency=0x40\n");
1460 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1461 }
1462
bcf0bf90 1463 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1464 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1465 RTL_W8(0x82, 0x01);
1466 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1467 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1468 }
1469
1470 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1471
1472 rtl8169_set_speed(dev, autoneg, speed, duplex);
1473
1474 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1475 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1476}
1477
5f787a1a
FR
1478static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1479{
1480 struct rtl8169_private *tp = netdev_priv(dev);
1481 struct mii_ioctl_data *data = if_mii(ifr);
1482
1483 if (!netif_running(dev))
1484 return -ENODEV;
1485
1486 switch (cmd) {
1487 case SIOCGMIIPHY:
1488 data->phy_id = 32; /* Internal PHY */
1489 return 0;
1490
1491 case SIOCGMIIREG:
1492 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1493 return 0;
1494
1495 case SIOCSMIIREG:
1496 if (!capable(CAP_NET_ADMIN))
1497 return -EPERM;
1498 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1499 return 0;
1500 }
1501 return -EOPNOTSUPP;
1502}
1503
1da177e4 1504static int __devinit
4ff96fa6 1505rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1506{
bcf0bf90 1507 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1508 struct rtl8169_private *tp;
4ff96fa6
FR
1509 struct net_device *dev;
1510 void __iomem *ioaddr;
1511 unsigned int i, pm_cap;
1512 int rc;
1da177e4 1513
4ff96fa6
FR
1514 if (netif_msg_drv(&debug)) {
1515 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1516 MODULENAME, RTL8169_VERSION);
1517 }
1da177e4 1518
1da177e4 1519 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1520 if (!dev) {
b57b7e5a 1521 if (netif_msg_drv(&debug))
9b91cf9d 1522 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1523 rc = -ENOMEM;
1524 goto out;
1da177e4
LT
1525 }
1526
1527 SET_MODULE_OWNER(dev);
1528 SET_NETDEV_DEV(dev, &pdev->dev);
1529 tp = netdev_priv(dev);
b57b7e5a 1530 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1531
1532 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1533 rc = pci_enable_device(pdev);
b57b7e5a 1534 if (rc < 0) {
2e8a538d 1535 if (netif_msg_probe(tp))
9b91cf9d 1536 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1537 goto err_out_free_dev_1;
1da177e4
LT
1538 }
1539
1540 rc = pci_set_mwi(pdev);
1541 if (rc < 0)
4ff96fa6 1542 goto err_out_disable_2;
1da177e4
LT
1543
1544 /* save power state before pci_enable_device overwrites it */
1545 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1546 if (pm_cap) {
4ff96fa6 1547 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1548
1549 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1550 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1551 } else {
4ff96fa6 1552 if (netif_msg_probe(tp)) {
9b91cf9d 1553 dev_err(&pdev->dev,
4ff96fa6
FR
1554 "PowerManagement capability not found.\n");
1555 }
1da177e4
LT
1556 }
1557
1558 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1559 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1560 if (netif_msg_probe(tp)) {
9b91cf9d 1561 dev_err(&pdev->dev,
bcf0bf90
FR
1562 "region #%d not an MMIO resource, aborting\n",
1563 region);
4ff96fa6 1564 }
1da177e4 1565 rc = -ENODEV;
4ff96fa6 1566 goto err_out_mwi_3;
1da177e4 1567 }
4ff96fa6 1568
1da177e4 1569 /* check for weird/broken PCI region reporting */
bcf0bf90 1570 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1571 if (netif_msg_probe(tp)) {
9b91cf9d 1572 dev_err(&pdev->dev,
4ff96fa6
FR
1573 "Invalid PCI region size(s), aborting\n");
1574 }
1da177e4 1575 rc = -ENODEV;
4ff96fa6 1576 goto err_out_mwi_3;
1da177e4
LT
1577 }
1578
1579 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1580 if (rc < 0) {
2e8a538d 1581 if (netif_msg_probe(tp))
9b91cf9d 1582 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1583 goto err_out_mwi_3;
1da177e4
LT
1584 }
1585
1586 tp->cp_cmd = PCIMulRW | RxChkSum;
1587
1588 if ((sizeof(dma_addr_t) > 4) &&
1589 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1590 tp->cp_cmd |= PCIDAC;
1591 dev->features |= NETIF_F_HIGHDMA;
1592 } else {
1593 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1594 if (rc < 0) {
4ff96fa6 1595 if (netif_msg_probe(tp)) {
9b91cf9d 1596 dev_err(&pdev->dev,
4ff96fa6
FR
1597 "DMA configuration failed.\n");
1598 }
1599 goto err_out_free_res_4;
1da177e4
LT
1600 }
1601 }
1602
1603 pci_set_master(pdev);
1604
1605 /* ioremap MMIO region */
bcf0bf90 1606 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1607 if (!ioaddr) {
b57b7e5a 1608 if (netif_msg_probe(tp))
9b91cf9d 1609 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1610 rc = -EIO;
4ff96fa6 1611 goto err_out_free_res_4;
1da177e4
LT
1612 }
1613
1614 /* Unneeded ? Don't mess with Mrs. Murphy. */
1615 rtl8169_irq_mask_and_ack(ioaddr);
1616
1617 /* Soft reset the chip. */
1618 RTL_W8(ChipCmd, CmdReset);
1619
1620 /* Check that the chip has finished the reset. */
b518fa8e 1621 for (i = 100; i > 0; i--) {
1da177e4
LT
1622 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1623 break;
b518fa8e 1624 msleep_interruptible(1);
1da177e4
LT
1625 }
1626
1627 /* Identify chip attached to board */
1628 rtl8169_get_mac_version(tp, ioaddr);
1629 rtl8169_get_phy_version(tp, ioaddr);
1630
1631 rtl8169_print_mac_version(tp);
1632 rtl8169_print_phy_version(tp);
1633
1634 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1635 if (tp->mac_version == rtl_chip_info[i].mac_version)
1636 break;
1637 }
1638 if (i < 0) {
1639 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1640 if (netif_msg_probe(tp)) {
2e8a538d 1641 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1642 "unknown chip version, assuming %s\n",
1643 rtl_chip_info[0].name);
b57b7e5a 1644 }
1da177e4
LT
1645 i++;
1646 }
1647 tp->chipset = i;
1648
5d06a99f
FR
1649 RTL_W8(Cfg9346, Cfg9346_Unlock);
1650 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1651 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1652 RTL_W8(Cfg9346, Cfg9346_Lock);
1653
1da177e4
LT
1654 if (RTL_R8(PHYstatus) & TBI_Enable) {
1655 tp->set_speed = rtl8169_set_speed_tbi;
1656 tp->get_settings = rtl8169_gset_tbi;
1657 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1658 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1659 tp->link_ok = rtl8169_tbi_link_ok;
1660
64e4bfb4 1661 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1662 } else {
1663 tp->set_speed = rtl8169_set_speed_xmii;
1664 tp->get_settings = rtl8169_gset_xmii;
1665 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1666 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1667 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1668
1669 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1670 }
1671
1672 /* Get MAC address. FIXME: read EEPROM */
1673 for (i = 0; i < MAC_ADDR_LEN; i++)
1674 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1675 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1676
1677 dev->open = rtl8169_open;
1678 dev->hard_start_xmit = rtl8169_start_xmit;
1679 dev->get_stats = rtl8169_get_stats;
1680 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1681 dev->stop = rtl8169_close;
1682 dev->tx_timeout = rtl8169_tx_timeout;
1683 dev->set_multicast_list = rtl8169_set_rx_mode;
a2b98a69 1684 dev->set_mac_address = rtl8169_set_mac_addr;
1da177e4
LT
1685 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1686 dev->irq = pdev->irq;
1687 dev->base_addr = (unsigned long) ioaddr;
1688 dev->change_mtu = rtl8169_change_mtu;
1689
1690#ifdef CONFIG_R8169_NAPI
1691 dev->poll = rtl8169_poll;
1692 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1693#endif
1694
1695#ifdef CONFIG_R8169_VLAN
1696 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1697 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1698 dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
1699#endif
1700
1701#ifdef CONFIG_NET_POLL_CONTROLLER
1702 dev->poll_controller = rtl8169_netpoll;
1703#endif
1704
1705 tp->intr_mask = 0xffff;
1706 tp->pci_dev = pdev;
1707 tp->mmio_addr = ioaddr;
bcf0bf90 1708 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4
LT
1709
1710 spin_lock_init(&tp->lock);
1711
1712 rc = register_netdev(dev);
4ff96fa6
FR
1713 if (rc < 0)
1714 goto err_out_unmap_5;
1da177e4
LT
1715
1716 pci_set_drvdata(pdev, dev);
1717
b57b7e5a
SH
1718 if (netif_msg_probe(tp)) {
1719 printk(KERN_INFO "%s: %s at 0x%lx, "
1720 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1721 "IRQ %d\n",
1722 dev->name,
bcf0bf90 1723 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1724 dev->base_addr,
1725 dev->dev_addr[0], dev->dev_addr[1],
1726 dev->dev_addr[2], dev->dev_addr[3],
1727 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1728 }
1da177e4 1729
4ff96fa6 1730 rtl8169_init_phy(dev, tp);
1da177e4 1731
4ff96fa6
FR
1732out:
1733 return rc;
1da177e4 1734
4ff96fa6
FR
1735err_out_unmap_5:
1736 iounmap(ioaddr);
1737err_out_free_res_4:
1738 pci_release_regions(pdev);
1739err_out_mwi_3:
1740 pci_clear_mwi(pdev);
1741err_out_disable_2:
1742 pci_disable_device(pdev);
1743err_out_free_dev_1:
1744 free_netdev(dev);
1745 goto out;
1da177e4
LT
1746}
1747
1748static void __devexit
1749rtl8169_remove_one(struct pci_dev *pdev)
1750{
1751 struct net_device *dev = pci_get_drvdata(pdev);
1752 struct rtl8169_private *tp = netdev_priv(dev);
1753
1754 assert(dev != NULL);
1755 assert(tp != NULL);
1756
1757 unregister_netdev(dev);
1758 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1759 pci_set_drvdata(pdev, NULL);
1760}
1761
1da177e4
LT
1762static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1763 struct net_device *dev)
1764{
1765 unsigned int mtu = dev->mtu;
1766
1767 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1768}
1769
1770static int rtl8169_open(struct net_device *dev)
1771{
1772 struct rtl8169_private *tp = netdev_priv(dev);
1773 struct pci_dev *pdev = tp->pci_dev;
1774 int retval;
1775
1776 rtl8169_set_rxbufsize(tp, dev);
1777
1778 retval =
1fb9df5d 1779 request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1780 if (retval < 0)
1781 goto out;
1782
1783 retval = -ENOMEM;
1784
1785 /*
1786 * Rx and Tx desscriptors needs 256 bytes alignment.
1787 * pci_alloc_consistent provides more.
1788 */
1789 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1790 &tp->TxPhyAddr);
1791 if (!tp->TxDescArray)
1792 goto err_free_irq;
1793
1794 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1795 &tp->RxPhyAddr);
1796 if (!tp->RxDescArray)
1797 goto err_free_tx;
1798
1799 retval = rtl8169_init_ring(dev);
1800 if (retval < 0)
1801 goto err_free_rx;
1802
1803 INIT_WORK(&tp->task, NULL, dev);
1804
1805 rtl8169_hw_start(dev);
1806
1807 rtl8169_request_timer(dev);
1808
1809 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1810out:
1811 return retval;
1812
1813err_free_rx:
1814 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1815 tp->RxPhyAddr);
1816err_free_tx:
1817 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1818 tp->TxPhyAddr);
1819err_free_irq:
1820 free_irq(dev->irq, dev);
1821 goto out;
1822}
1823
1824static void rtl8169_hw_reset(void __iomem *ioaddr)
1825{
1826 /* Disable interrupts */
1827 rtl8169_irq_mask_and_ack(ioaddr);
1828
1829 /* Reset the chipset */
1830 RTL_W8(ChipCmd, CmdReset);
1831
1832 /* PCI commit */
1833 RTL_R8(ChipCmd);
1834}
1835
1836static void
1837rtl8169_hw_start(struct net_device *dev)
1838{
1839 struct rtl8169_private *tp = netdev_priv(dev);
1840 void __iomem *ioaddr = tp->mmio_addr;
bcf0bf90 1841 struct pci_dev *pdev = tp->pci_dev;
1da177e4
LT
1842 u32 i;
1843
1844 /* Soft reset the chip. */
1845 RTL_W8(ChipCmd, CmdReset);
1846
1847 /* Check that the chip has finished the reset. */
b518fa8e 1848 for (i = 100; i > 0; i--) {
1da177e4
LT
1849 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1850 break;
b518fa8e 1851 msleep_interruptible(1);
1da177e4
LT
1852 }
1853
bcf0bf90
FR
1854 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1855 pci_write_config_word(pdev, 0x68, 0x00);
1856 pci_write_config_word(pdev, 0x69, 0x08);
1857 }
1858
1859 /* Undocumented stuff. */
1860 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1861 u16 cmd;
1862
1863 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1864 if ((RTL_R8(Config2) & 0x07) & 0x01)
1865 RTL_W32(0x7c, 0x0007ffff);
1866
1867 RTL_W32(0x7c, 0x0007ff00);
1868
1869 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1870 cmd = cmd & 0xef;
1871 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1872 }
1873
bcf0bf90 1874
1da177e4 1875 RTL_W8(Cfg9346, Cfg9346_Unlock);
1da177e4
LT
1876 RTL_W8(EarlyTxThres, EarlyTxThld);
1877
126fa4b9
FR
1878 /* Low hurts. Let's disable the filtering. */
1879 RTL_W16(RxMaxSize, 16383);
1da177e4
LT
1880
1881 /* Set Rx Config register */
1882 i = rtl8169_rx_config |
1883 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1884 RTL_W32(RxConfig, i);
1885
1886 /* Set DMA burst size and Interframe Gap Time */
5b0384f4
FR
1887 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1888 (InterFrameGap << TxInterFrameGapShift));
1da177e4 1889
bcf0bf90 1890 tp->cp_cmd |= RTL_R16(CPlusCmd) | PCIMulRW;
1da177e4 1891
bcf0bf90
FR
1892 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1893 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1894 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1895 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1896 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1897 }
1898
bcf0bf90
FR
1899 RTL_W16(CPlusCmd, tp->cp_cmd);
1900
1da177e4
LT
1901 /*
1902 * Undocumented corner. Supposedly:
1903 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1904 */
1905 RTL_W16(IntrMitigate, 0x0000);
1906
b39fe41f
FR
1907 /*
1908 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1909 * register to be written before TxDescAddrLow to work.
1910 * Switching from MMIO to I/O access fixes the issue as well.
1911 */
1da177e4 1912 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
b39fe41f 1913 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1da177e4 1914 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
b39fe41f 1915 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
bcf0bf90 1916 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1da177e4 1917 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1918
1919 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1920 RTL_R8(IntrMask);
1da177e4
LT
1921
1922 RTL_W32(RxMissed, 0);
1923
1924 rtl8169_set_rx_mode(dev);
1925
1926 /* no early-rx interrupts */
1927 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1928
1929 /* Enable all known interrupts by setting the interrupt mask. */
1930 RTL_W16(IntrMask, rtl8169_intr_mask);
1931
a2b98a69
FR
1932 __rtl8169_set_mac_addr(dev, ioaddr);
1933
1da177e4
LT
1934 netif_start_queue(dev);
1935}
1936
1937static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1938{
1939 struct rtl8169_private *tp = netdev_priv(dev);
1940 int ret = 0;
1941
1942 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
1943 return -EINVAL;
1944
1945 dev->mtu = new_mtu;
1946
1947 if (!netif_running(dev))
1948 goto out;
1949
1950 rtl8169_down(dev);
1951
1952 rtl8169_set_rxbufsize(tp, dev);
1953
1954 ret = rtl8169_init_ring(dev);
1955 if (ret < 0)
1956 goto out;
1957
1958 netif_poll_enable(dev);
1959
1960 rtl8169_hw_start(dev);
1961
1962 rtl8169_request_timer(dev);
1963
1964out:
1965 return ret;
1966}
1967
1968static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
1969{
1970 desc->addr = 0x0badbadbadbadbadull;
1971 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
1972}
1973
1974static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
1975 struct sk_buff **sk_buff, struct RxDesc *desc)
1976{
1977 struct pci_dev *pdev = tp->pci_dev;
1978
1979 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
1980 PCI_DMA_FROMDEVICE);
1981 dev_kfree_skb(*sk_buff);
1982 *sk_buff = NULL;
1983 rtl8169_make_unusable_by_asic(desc);
1984}
1985
1986static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
1987{
1988 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
1989
1990 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
1991}
1992
1993static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
1994 u32 rx_buf_sz)
1995{
1996 desc->addr = cpu_to_le64(mapping);
1997 wmb();
1998 rtl8169_mark_to_asic(desc, rx_buf_sz);
1999}
2000
2001static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
bcf0bf90
FR
2002 struct RxDesc *desc, int rx_buf_sz,
2003 unsigned int align)
1da177e4
LT
2004{
2005 struct sk_buff *skb;
2006 dma_addr_t mapping;
2007 int ret = 0;
2008
bcf0bf90 2009 skb = dev_alloc_skb(rx_buf_sz + align);
1da177e4
LT
2010 if (!skb)
2011 goto err_out;
2012
bcf0bf90 2013 skb_reserve(skb, align);
1da177e4
LT
2014 *sk_buff = skb;
2015
689be439 2016 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2017 PCI_DMA_FROMDEVICE);
2018
2019 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
2020
2021out:
2022 return ret;
2023
2024err_out:
2025 ret = -ENOMEM;
2026 rtl8169_make_unusable_by_asic(desc);
2027 goto out;
2028}
2029
2030static void rtl8169_rx_clear(struct rtl8169_private *tp)
2031{
2032 int i;
2033
2034 for (i = 0; i < NUM_RX_DESC; i++) {
2035 if (tp->Rx_skbuff[i]) {
2036 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2037 tp->RxDescArray + i);
2038 }
2039 }
2040}
2041
2042static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2043 u32 start, u32 end)
2044{
2045 u32 cur;
5b0384f4 2046
1da177e4
LT
2047 for (cur = start; end - cur > 0; cur++) {
2048 int ret, i = cur % NUM_RX_DESC;
2049
2050 if (tp->Rx_skbuff[i])
2051 continue;
bcf0bf90 2052
1da177e4 2053 ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
bcf0bf90 2054 tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
1da177e4
LT
2055 if (ret < 0)
2056 break;
2057 }
2058 return cur - start;
2059}
2060
2061static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2062{
2063 desc->opts1 |= cpu_to_le32(RingEnd);
2064}
2065
2066static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2067{
2068 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2069}
2070
2071static int rtl8169_init_ring(struct net_device *dev)
2072{
2073 struct rtl8169_private *tp = netdev_priv(dev);
2074
2075 rtl8169_init_ring_indexes(tp);
2076
2077 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2078 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2079
2080 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2081 goto err_out;
2082
2083 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2084
2085 return 0;
2086
2087err_out:
2088 rtl8169_rx_clear(tp);
2089 return -ENOMEM;
2090}
2091
2092static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2093 struct TxDesc *desc)
2094{
2095 unsigned int len = tx_skb->len;
2096
2097 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2098 desc->opts1 = 0x00;
2099 desc->opts2 = 0x00;
2100 desc->addr = 0x00;
2101 tx_skb->len = 0;
2102}
2103
2104static void rtl8169_tx_clear(struct rtl8169_private *tp)
2105{
2106 unsigned int i;
2107
2108 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2109 unsigned int entry = i % NUM_TX_DESC;
2110 struct ring_info *tx_skb = tp->tx_skb + entry;
2111 unsigned int len = tx_skb->len;
2112
2113 if (len) {
2114 struct sk_buff *skb = tx_skb->skb;
2115
2116 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2117 tp->TxDescArray + entry);
2118 if (skb) {
2119 dev_kfree_skb(skb);
2120 tx_skb->skb = NULL;
2121 }
2122 tp->stats.tx_dropped++;
2123 }
2124 }
2125 tp->cur_tx = tp->dirty_tx = 0;
2126}
2127
2128static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
2129{
2130 struct rtl8169_private *tp = netdev_priv(dev);
2131
2132 PREPARE_WORK(&tp->task, task, dev);
2133 schedule_delayed_work(&tp->task, 4);
2134}
2135
2136static void rtl8169_wait_for_quiescence(struct net_device *dev)
2137{
2138 struct rtl8169_private *tp = netdev_priv(dev);
2139 void __iomem *ioaddr = tp->mmio_addr;
2140
2141 synchronize_irq(dev->irq);
2142
2143 /* Wait for any pending NAPI task to complete */
2144 netif_poll_disable(dev);
2145
2146 rtl8169_irq_mask_and_ack(ioaddr);
2147
2148 netif_poll_enable(dev);
2149}
2150
2151static void rtl8169_reinit_task(void *_data)
2152{
2153 struct net_device *dev = _data;
2154 int ret;
2155
2156 if (netif_running(dev)) {
2157 rtl8169_wait_for_quiescence(dev);
2158 rtl8169_close(dev);
2159 }
2160
2161 ret = rtl8169_open(dev);
2162 if (unlikely(ret < 0)) {
2163 if (net_ratelimit()) {
b57b7e5a
SH
2164 struct rtl8169_private *tp = netdev_priv(dev);
2165
2166 if (netif_msg_drv(tp)) {
2167 printk(PFX KERN_ERR
2168 "%s: reinit failure (status = %d)."
2169 " Rescheduling.\n", dev->name, ret);
2170 }
1da177e4
LT
2171 }
2172 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2173 }
2174}
2175
2176static void rtl8169_reset_task(void *_data)
2177{
2178 struct net_device *dev = _data;
2179 struct rtl8169_private *tp = netdev_priv(dev);
2180
2181 if (!netif_running(dev))
2182 return;
2183
2184 rtl8169_wait_for_quiescence(dev);
2185
2186 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2187 rtl8169_tx_clear(tp);
2188
2189 if (tp->dirty_rx == tp->cur_rx) {
2190 rtl8169_init_ring_indexes(tp);
2191 rtl8169_hw_start(dev);
2192 netif_wake_queue(dev);
2193 } else {
2194 if (net_ratelimit()) {
b57b7e5a
SH
2195 struct rtl8169_private *tp = netdev_priv(dev);
2196
2197 if (netif_msg_intr(tp)) {
2198 printk(PFX KERN_EMERG
2199 "%s: Rx buffers shortage\n", dev->name);
2200 }
1da177e4
LT
2201 }
2202 rtl8169_schedule_work(dev, rtl8169_reset_task);
2203 }
2204}
2205
2206static void rtl8169_tx_timeout(struct net_device *dev)
2207{
2208 struct rtl8169_private *tp = netdev_priv(dev);
2209
2210 rtl8169_hw_reset(tp->mmio_addr);
2211
2212 /* Let's wait a bit while any (async) irq lands on */
2213 rtl8169_schedule_work(dev, rtl8169_reset_task);
2214}
2215
2216static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2217 u32 opts1)
2218{
2219 struct skb_shared_info *info = skb_shinfo(skb);
2220 unsigned int cur_frag, entry;
2221 struct TxDesc *txd;
2222
2223 entry = tp->cur_tx;
2224 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2225 skb_frag_t *frag = info->frags + cur_frag;
2226 dma_addr_t mapping;
2227 u32 status, len;
2228 void *addr;
2229
2230 entry = (entry + 1) % NUM_TX_DESC;
2231
2232 txd = tp->TxDescArray + entry;
2233 len = frag->size;
2234 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2235 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2236
2237 /* anti gcc 2.95.3 bugware (sic) */
2238 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2239
2240 txd->opts1 = cpu_to_le32(status);
2241 txd->addr = cpu_to_le64(mapping);
2242
2243 tp->tx_skb[entry].len = len;
2244 }
2245
2246 if (cur_frag) {
2247 tp->tx_skb[entry].skb = skb;
2248 txd->opts1 |= cpu_to_le32(LastFrag);
2249 }
2250
2251 return cur_frag;
2252}
2253
2254static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2255{
2256 if (dev->features & NETIF_F_TSO) {
7967168c 2257 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2258
2259 if (mss)
2260 return LargeSend | ((mss & MSSMask) << MSSShift);
2261 }
84fa7933 2262 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
2263 const struct iphdr *ip = skb->nh.iph;
2264
2265 if (ip->protocol == IPPROTO_TCP)
2266 return IPCS | TCPCS;
2267 else if (ip->protocol == IPPROTO_UDP)
2268 return IPCS | UDPCS;
2269 WARN_ON(1); /* we need a WARN() */
2270 }
2271 return 0;
2272}
2273
2274static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2275{
2276 struct rtl8169_private *tp = netdev_priv(dev);
2277 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2278 struct TxDesc *txd = tp->TxDescArray + entry;
2279 void __iomem *ioaddr = tp->mmio_addr;
2280 dma_addr_t mapping;
2281 u32 status, len;
2282 u32 opts1;
188f4af0 2283 int ret = NETDEV_TX_OK;
5b0384f4 2284
1da177e4 2285 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2286 if (netif_msg_drv(tp)) {
2287 printk(KERN_ERR
2288 "%s: BUG! Tx Ring full when queue awake!\n",
2289 dev->name);
2290 }
1da177e4
LT
2291 goto err_stop;
2292 }
2293
2294 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2295 goto err_stop;
2296
2297 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2298
2299 frags = rtl8169_xmit_frags(tp, skb, opts1);
2300 if (frags) {
2301 len = skb_headlen(skb);
2302 opts1 |= FirstFrag;
2303 } else {
2304 len = skb->len;
2305
2306 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2307 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2308 goto err_update_stats;
2309 len = ETH_ZLEN;
2310 }
2311
2312 opts1 |= FirstFrag | LastFrag;
2313 tp->tx_skb[entry].skb = skb;
2314 }
2315
2316 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2317
2318 tp->tx_skb[entry].len = len;
2319 txd->addr = cpu_to_le64(mapping);
2320 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2321
2322 wmb();
2323
2324 /* anti gcc 2.95.3 bugware (sic) */
2325 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2326 txd->opts1 = cpu_to_le32(status);
2327
2328 dev->trans_start = jiffies;
2329
2330 tp->cur_tx += frags + 1;
2331
2332 smp_wmb();
2333
2334 RTL_W8(TxPoll, 0x40); /* set polling bit */
2335
2336 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2337 netif_stop_queue(dev);
2338 smp_rmb();
2339 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2340 netif_wake_queue(dev);
2341 }
2342
2343out:
2344 return ret;
2345
2346err_stop:
2347 netif_stop_queue(dev);
188f4af0 2348 ret = NETDEV_TX_BUSY;
1da177e4
LT
2349err_update_stats:
2350 tp->stats.tx_dropped++;
2351 goto out;
2352}
2353
2354static void rtl8169_pcierr_interrupt(struct net_device *dev)
2355{
2356 struct rtl8169_private *tp = netdev_priv(dev);
2357 struct pci_dev *pdev = tp->pci_dev;
2358 void __iomem *ioaddr = tp->mmio_addr;
2359 u16 pci_status, pci_cmd;
2360
2361 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2362 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2363
b57b7e5a
SH
2364 if (netif_msg_intr(tp)) {
2365 printk(KERN_ERR
2366 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2367 dev->name, pci_cmd, pci_status);
2368 }
1da177e4
LT
2369
2370 /*
2371 * The recovery sequence below admits a very elaborated explanation:
2372 * - it seems to work;
2373 * - I did not see what else could be done.
2374 *
2375 * Feel free to adjust to your needs.
2376 */
2377 pci_write_config_word(pdev, PCI_COMMAND,
2378 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2379
2380 pci_write_config_word(pdev, PCI_STATUS,
2381 pci_status & (PCI_STATUS_DETECTED_PARITY |
2382 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2383 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2384
2385 /* The infamous DAC f*ckup only happens at boot time */
2386 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2387 if (netif_msg_intr(tp))
2388 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2389 tp->cp_cmd &= ~PCIDAC;
2390 RTL_W16(CPlusCmd, tp->cp_cmd);
2391 dev->features &= ~NETIF_F_HIGHDMA;
2392 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2393 }
2394
2395 rtl8169_hw_reset(ioaddr);
2396}
2397
2398static void
2399rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2400 void __iomem *ioaddr)
2401{
2402 unsigned int dirty_tx, tx_left;
2403
2404 assert(dev != NULL);
2405 assert(tp != NULL);
2406 assert(ioaddr != NULL);
2407
2408 dirty_tx = tp->dirty_tx;
2409 smp_rmb();
2410 tx_left = tp->cur_tx - dirty_tx;
2411
2412 while (tx_left > 0) {
2413 unsigned int entry = dirty_tx % NUM_TX_DESC;
2414 struct ring_info *tx_skb = tp->tx_skb + entry;
2415 u32 len = tx_skb->len;
2416 u32 status;
2417
2418 rmb();
2419 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2420 if (status & DescOwn)
2421 break;
2422
2423 tp->stats.tx_bytes += len;
2424 tp->stats.tx_packets++;
2425
2426 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2427
2428 if (status & LastFrag) {
2429 dev_kfree_skb_irq(tx_skb->skb);
2430 tx_skb->skb = NULL;
2431 }
2432 dirty_tx++;
2433 tx_left--;
2434 }
2435
2436 if (tp->dirty_tx != dirty_tx) {
2437 tp->dirty_tx = dirty_tx;
2438 smp_wmb();
2439 if (netif_queue_stopped(dev) &&
2440 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2441 netif_wake_queue(dev);
2442 }
2443 }
2444}
2445
126fa4b9
FR
2446static inline int rtl8169_fragmented_frame(u32 status)
2447{
2448 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2449}
2450
1da177e4
LT
2451static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2452{
2453 u32 opts1 = le32_to_cpu(desc->opts1);
2454 u32 status = opts1 & RxProtoMask;
2455
2456 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2457 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2458 ((status == RxProtoIP) && !(opts1 & IPFail)))
2459 skb->ip_summed = CHECKSUM_UNNECESSARY;
2460 else
2461 skb->ip_summed = CHECKSUM_NONE;
2462}
2463
2464static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
bcf0bf90
FR
2465 struct RxDesc *desc, int rx_buf_sz,
2466 unsigned int align)
1da177e4
LT
2467{
2468 int ret = -1;
2469
2470 if (pkt_size < rx_copybreak) {
2471 struct sk_buff *skb;
2472
bcf0bf90 2473 skb = dev_alloc_skb(pkt_size + align);
1da177e4 2474 if (skb) {
bcf0bf90 2475 skb_reserve(skb, align);
689be439 2476 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
1da177e4
LT
2477 *sk_buff = skb;
2478 rtl8169_mark_to_asic(desc, rx_buf_sz);
2479 ret = 0;
2480 }
2481 }
2482 return ret;
2483}
2484
2485static int
2486rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2487 void __iomem *ioaddr)
2488{
2489 unsigned int cur_rx, rx_left;
2490 unsigned int delta, count;
2491
2492 assert(dev != NULL);
2493 assert(tp != NULL);
2494 assert(ioaddr != NULL);
2495
2496 cur_rx = tp->cur_rx;
2497 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2498 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2499
4dcb7d33 2500 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2501 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2502 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2503 u32 status;
2504
2505 rmb();
126fa4b9 2506 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2507
2508 if (status & DescOwn)
2509 break;
4dcb7d33 2510 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2511 if (netif_msg_rx_err(tp)) {
2512 printk(KERN_INFO
2513 "%s: Rx ERROR. status = %08x\n",
2514 dev->name, status);
2515 }
1da177e4
LT
2516 tp->stats.rx_errors++;
2517 if (status & (RxRWT | RxRUNT))
2518 tp->stats.rx_length_errors++;
2519 if (status & RxCRC)
2520 tp->stats.rx_crc_errors++;
9dccf611
FR
2521 if (status & RxFOVF) {
2522 rtl8169_schedule_work(dev, rtl8169_reset_task);
2523 tp->stats.rx_fifo_errors++;
2524 }
126fa4b9 2525 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2526 } else {
1da177e4
LT
2527 struct sk_buff *skb = tp->Rx_skbuff[entry];
2528 int pkt_size = (status & 0x00001FFF) - 4;
2529 void (*pci_action)(struct pci_dev *, dma_addr_t,
2530 size_t, int) = pci_dma_sync_single_for_device;
2531
126fa4b9
FR
2532 /*
2533 * The driver does not support incoming fragmented
2534 * frames. They are seen as a symptom of over-mtu
2535 * sized frames.
2536 */
2537 if (unlikely(rtl8169_fragmented_frame(status))) {
2538 tp->stats.rx_dropped++;
2539 tp->stats.rx_length_errors++;
2540 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2541 continue;
126fa4b9
FR
2542 }
2543
1da177e4 2544 rtl8169_rx_csum(skb, desc);
bcf0bf90 2545
1da177e4
LT
2546 pci_dma_sync_single_for_cpu(tp->pci_dev,
2547 le64_to_cpu(desc->addr), tp->rx_buf_sz,
2548 PCI_DMA_FROMDEVICE);
2549
2550 if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
bcf0bf90 2551 tp->rx_buf_sz, tp->align)) {
1da177e4
LT
2552 pci_action = pci_unmap_single;
2553 tp->Rx_skbuff[entry] = NULL;
2554 }
2555
2556 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2557 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2558
2559 skb->dev = dev;
2560 skb_put(skb, pkt_size);
2561 skb->protocol = eth_type_trans(skb, dev);
2562
2563 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2564 rtl8169_rx_skb(skb);
2565
2566 dev->last_rx = jiffies;
2567 tp->stats.rx_bytes += pkt_size;
2568 tp->stats.rx_packets++;
2569 }
1da177e4
LT
2570 }
2571
2572 count = cur_rx - tp->cur_rx;
2573 tp->cur_rx = cur_rx;
2574
2575 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2576 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2577 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2578 tp->dirty_rx += delta;
2579
2580 /*
2581 * FIXME: until there is periodic timer to try and refill the ring,
2582 * a temporary shortage may definitely kill the Rx process.
2583 * - disable the asic to try and avoid an overflow and kick it again
2584 * after refill ?
2585 * - how do others driver handle this condition (Uh oh...).
2586 */
b57b7e5a 2587 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2588 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2589
2590 return count;
2591}
2592
2593/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2594static irqreturn_t
2595rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2596{
2597 struct net_device *dev = (struct net_device *) dev_instance;
2598 struct rtl8169_private *tp = netdev_priv(dev);
2599 int boguscnt = max_interrupt_work;
2600 void __iomem *ioaddr = tp->mmio_addr;
2601 int status;
2602 int handled = 0;
2603
2604 do {
2605 status = RTL_R16(IntrStatus);
2606
2607 /* hotplug/major error/no more work/shared irq */
2608 if ((status == 0xFFFF) || !status)
2609 break;
2610
2611 handled = 1;
2612
2613 if (unlikely(!netif_running(dev))) {
2614 rtl8169_asic_down(ioaddr);
2615 goto out;
2616 }
2617
2618 status &= tp->intr_mask;
2619 RTL_W16(IntrStatus,
2620 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2621
2622 if (!(status & rtl8169_intr_mask))
2623 break;
2624
2625 if (unlikely(status & SYSErr)) {
2626 rtl8169_pcierr_interrupt(dev);
2627 break;
2628 }
2629
2630 if (status & LinkChg)
2631 rtl8169_check_link_status(dev, tp, ioaddr);
2632
2633#ifdef CONFIG_R8169_NAPI
2634 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2635 tp->intr_mask = ~rtl8169_napi_event;
2636
2637 if (likely(netif_rx_schedule_prep(dev)))
2638 __netif_rx_schedule(dev);
b57b7e5a 2639 else if (netif_msg_intr(tp)) {
1da177e4 2640 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2641 dev->name, status);
1da177e4
LT
2642 }
2643 break;
2644#else
2645 /* Rx interrupt */
2646 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2647 rtl8169_rx_interrupt(dev, tp, ioaddr);
2648 }
2649 /* Tx interrupt */
2650 if (status & (TxOK | TxErr))
2651 rtl8169_tx_interrupt(dev, tp, ioaddr);
2652#endif
2653
2654 boguscnt--;
2655 } while (boguscnt > 0);
2656
2657 if (boguscnt <= 0) {
7c8b2eb4 2658 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2659 printk(KERN_WARNING
2660 "%s: Too much work at interrupt!\n", dev->name);
2661 }
1da177e4
LT
2662 /* Clear all interrupt sources. */
2663 RTL_W16(IntrStatus, 0xffff);
2664 }
2665out:
2666 return IRQ_RETVAL(handled);
2667}
2668
2669#ifdef CONFIG_R8169_NAPI
2670static int rtl8169_poll(struct net_device *dev, int *budget)
2671{
2672 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2673 struct rtl8169_private *tp = netdev_priv(dev);
2674 void __iomem *ioaddr = tp->mmio_addr;
2675
2676 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2677 rtl8169_tx_interrupt(dev, tp, ioaddr);
2678
2679 *budget -= work_done;
2680 dev->quota -= work_done;
2681
2682 if (work_done < work_to_do) {
2683 netif_rx_complete(dev);
2684 tp->intr_mask = 0xffff;
2685 /*
2686 * 20040426: the barrier is not strictly required but the
2687 * behavior of the irq handler could be less predictable
2688 * without it. Btw, the lack of flush for the posted pci
2689 * write is safe - FR
2690 */
2691 smp_wmb();
2692 RTL_W16(IntrMask, rtl8169_intr_mask);
2693 }
2694
2695 return (work_done >= work_to_do);
2696}
2697#endif
2698
2699static void rtl8169_down(struct net_device *dev)
2700{
2701 struct rtl8169_private *tp = netdev_priv(dev);
2702 void __iomem *ioaddr = tp->mmio_addr;
2703 unsigned int poll_locked = 0;
2704
2705 rtl8169_delete_timer(dev);
2706
2707 netif_stop_queue(dev);
2708
2709 flush_scheduled_work();
2710
2711core_down:
2712 spin_lock_irq(&tp->lock);
2713
2714 rtl8169_asic_down(ioaddr);
2715
2716 /* Update the error counts. */
2717 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2718 RTL_W32(RxMissed, 0);
2719
2720 spin_unlock_irq(&tp->lock);
2721
2722 synchronize_irq(dev->irq);
2723
2724 if (!poll_locked) {
2725 netif_poll_disable(dev);
2726 poll_locked++;
2727 }
2728
2729 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2730 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2731
2732 /*
2733 * And now for the 50k$ question: are IRQ disabled or not ?
2734 *
2735 * Two paths lead here:
2736 * 1) dev->close
2737 * -> netif_running() is available to sync the current code and the
2738 * IRQ handler. See rtl8169_interrupt for details.
2739 * 2) dev->change_mtu
2740 * -> rtl8169_poll can not be issued again and re-enable the
2741 * interruptions. Let's simply issue the IRQ down sequence again.
2742 */
2743 if (RTL_R16(IntrMask))
2744 goto core_down;
2745
2746 rtl8169_tx_clear(tp);
2747
2748 rtl8169_rx_clear(tp);
2749}
2750
2751static int rtl8169_close(struct net_device *dev)
2752{
2753 struct rtl8169_private *tp = netdev_priv(dev);
2754 struct pci_dev *pdev = tp->pci_dev;
2755
2756 rtl8169_down(dev);
2757
2758 free_irq(dev->irq, dev);
2759
2760 netif_poll_enable(dev);
2761
2762 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2763 tp->RxPhyAddr);
2764 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2765 tp->TxPhyAddr);
2766 tp->TxDescArray = NULL;
2767 tp->RxDescArray = NULL;
2768
2769 return 0;
2770}
2771
2772static void
2773rtl8169_set_rx_mode(struct net_device *dev)
2774{
2775 struct rtl8169_private *tp = netdev_priv(dev);
2776 void __iomem *ioaddr = tp->mmio_addr;
2777 unsigned long flags;
2778 u32 mc_filter[2]; /* Multicast hash filter */
2779 int i, rx_mode;
2780 u32 tmp = 0;
2781
2782 if (dev->flags & IFF_PROMISC) {
2783 /* Unconditionally log net taps. */
b57b7e5a
SH
2784 if (netif_msg_link(tp)) {
2785 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2786 dev->name);
2787 }
1da177e4
LT
2788 rx_mode =
2789 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2790 AcceptAllPhys;
2791 mc_filter[1] = mc_filter[0] = 0xffffffff;
2792 } else if ((dev->mc_count > multicast_filter_limit)
2793 || (dev->flags & IFF_ALLMULTI)) {
2794 /* Too many to filter perfectly -- accept all multicasts. */
2795 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2796 mc_filter[1] = mc_filter[0] = 0xffffffff;
2797 } else {
2798 struct dev_mc_list *mclist;
2799 rx_mode = AcceptBroadcast | AcceptMyPhys;
2800 mc_filter[1] = mc_filter[0] = 0;
2801 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2802 i++, mclist = mclist->next) {
2803 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2804 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2805 rx_mode |= AcceptMulticast;
2806 }
2807 }
2808
2809 spin_lock_irqsave(&tp->lock, flags);
2810
2811 tmp = rtl8169_rx_config | rx_mode |
2812 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2813
bcf0bf90
FR
2814 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2815 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2816 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2817 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2818 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2819 mc_filter[0] = 0xffffffff;
2820 mc_filter[1] = 0xffffffff;
2821 }
2822
1da177e4
LT
2823 RTL_W32(RxConfig, tmp);
2824 RTL_W32(MAR0 + 0, mc_filter[0]);
2825 RTL_W32(MAR0 + 4, mc_filter[1]);
2826
2827 spin_unlock_irqrestore(&tp->lock, flags);
2828}
2829
2830/**
2831 * rtl8169_get_stats - Get rtl8169 read/write statistics
2832 * @dev: The Ethernet Device to get statistics for
2833 *
2834 * Get TX/RX statistics for rtl8169
2835 */
2836static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2837{
2838 struct rtl8169_private *tp = netdev_priv(dev);
2839 void __iomem *ioaddr = tp->mmio_addr;
2840 unsigned long flags;
2841
2842 if (netif_running(dev)) {
2843 spin_lock_irqsave(&tp->lock, flags);
2844 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2845 RTL_W32(RxMissed, 0);
2846 spin_unlock_irqrestore(&tp->lock, flags);
2847 }
5b0384f4 2848
1da177e4
LT
2849 return &tp->stats;
2850}
2851
5d06a99f
FR
2852#ifdef CONFIG_PM
2853
2854static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2855{
2856 struct net_device *dev = pci_get_drvdata(pdev);
2857 struct rtl8169_private *tp = netdev_priv(dev);
2858 void __iomem *ioaddr = tp->mmio_addr;
2859
2860 if (!netif_running(dev))
2861 goto out;
2862
2863 netif_device_detach(dev);
2864 netif_stop_queue(dev);
2865
2866 spin_lock_irq(&tp->lock);
2867
2868 rtl8169_asic_down(ioaddr);
2869
2870 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2871 RTL_W32(RxMissed, 0);
2872
2873 spin_unlock_irq(&tp->lock);
2874
2875 pci_save_state(pdev);
61a4dcc2 2876 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f
FR
2877 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2878out:
2879 return 0;
2880}
2881
2882static int rtl8169_resume(struct pci_dev *pdev)
2883{
2884 struct net_device *dev = pci_get_drvdata(pdev);
2885
2886 if (!netif_running(dev))
2887 goto out;
2888
2889 netif_device_attach(dev);
2890
2891 pci_set_power_state(pdev, PCI_D0);
2892 pci_restore_state(pdev);
61a4dcc2 2893 pci_enable_wake(pdev, PCI_D0, 0);
5d06a99f
FR
2894
2895 rtl8169_schedule_work(dev, rtl8169_reset_task);
2896out:
2897 return 0;
2898}
2899
2900#endif /* CONFIG_PM */
2901
1da177e4
LT
2902static struct pci_driver rtl8169_pci_driver = {
2903 .name = MODULENAME,
2904 .id_table = rtl8169_pci_tbl,
2905 .probe = rtl8169_init_one,
2906 .remove = __devexit_p(rtl8169_remove_one),
2907#ifdef CONFIG_PM
2908 .suspend = rtl8169_suspend,
2909 .resume = rtl8169_resume,
2910#endif
2911};
2912
2913static int __init
2914rtl8169_init_module(void)
2915{
29917620 2916 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
2917}
2918
2919static void __exit
2920rtl8169_cleanup_module(void)
2921{
2922 pci_unregister_driver(&rtl8169_pci_driver);
2923}
2924
2925module_init(rtl8169_init_module);
2926module_exit(rtl8169_cleanup_module);