]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/r8169.c
r8169: issue request_irq after the private data are completely initialized
[net-next-2.6.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
99f252b0 69#include <asm/system.h>
1da177e4
LT
70#include <asm/io.h>
71#include <asm/irq.h>
72
f7ccf420
SH
73#ifdef CONFIG_R8169_NAPI
74#define NAPI_SUFFIX "-NAPI"
75#else
76#define NAPI_SUFFIX ""
77#endif
78
79#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
80#define MODULENAME "r8169"
81#define PFX MODULENAME ": "
82
83#ifdef RTL8169_DEBUG
84#define assert(expr) \
5b0384f4
FR
85 if (!(expr)) { \
86 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
87 #expr,__FILE__,__FUNCTION__,__LINE__); \
88 }
1da177e4
LT
89#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
90#else
91#define assert(expr) do {} while (0)
92#define dprintk(fmt, args...) do {} while (0)
93#endif /* RTL8169_DEBUG */
94
b57b7e5a 95#define R8169_MSG_DEFAULT \
f0e837d9 96 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 97
1da177e4
LT
98#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100
101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb
0b50f81d 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else
106#define rtl8169_rx_skb netif_rx
0b50f81d 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
108#define rtl8169_rx_quota(count, quota) count
109#endif
110
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 117static const int max_interrupt_work = 20;
1da177e4
LT
118
119/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
120 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 121static const int multicast_filter_limit = 32;
1da177e4
LT
122
123/* MAC address length */
124#define MAC_ADDR_LEN 6
125
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
133
134#define R8169_REGS_SIZE 256
135#define R8169_NAPI_WEIGHT 64
136#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
137#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
138#define RX_BUF_SIZE 1536 /* Rx Buffer size */
139#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
140#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
141
142#define RTL8169_TX_TIMEOUT (6*HZ)
143#define RTL8169_PHY_TIMEOUT (10*HZ)
144
145/* write/read MMIO register */
146#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
147#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
148#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
149#define RTL_R8(reg) readb (ioaddr + (reg))
150#define RTL_R16(reg) readw (ioaddr + (reg))
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152
153enum mac_version {
bcf0bf90
FR
154 RTL_GIGA_MAC_VER_01 = 0x00,
155 RTL_GIGA_MAC_VER_02 = 0x01,
156 RTL_GIGA_MAC_VER_03 = 0x02,
157 RTL_GIGA_MAC_VER_04 = 0x03,
158 RTL_GIGA_MAC_VER_05 = 0x04,
159 RTL_GIGA_MAC_VER_11 = 0x0b,
160 RTL_GIGA_MAC_VER_12 = 0x0c,
161 RTL_GIGA_MAC_VER_13 = 0x0d,
162 RTL_GIGA_MAC_VER_14 = 0x0e,
163 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
164};
165
166enum phy_version {
167 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
170 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
171 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
172 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
173};
174
1da177e4
LT
175#define _R(NAME,MAC,MASK) \
176 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
177
3c6bee1d 178static const struct {
1da177e4
LT
179 const char *name;
180 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = {
bcf0bf90
FR
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
185 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
192 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
193};
194#undef _R
195
bcf0bf90
FR
196enum cfg_version {
197 RTL_CFG_0 = 0x00,
198 RTL_CFG_1,
199 RTL_CFG_2
200};
201
202static const struct {
203 unsigned int region;
204 unsigned int align;
205} rtl_cfg_info[] = {
206 [RTL_CFG_0] = { 1, NET_IP_ALIGN },
207 [RTL_CFG_1] = { 2, NET_IP_ALIGN },
208 [RTL_CFG_2] = { 2, 8 }
209};
210
1da177e4 211static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 212 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 213 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 214 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
215 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
216 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
217 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 218 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
219 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
220 { PCI_VENDOR_ID_LINKSYS, 0x1032,
221 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
222 {0,},
223};
224
225MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
226
227static int rx_copybreak = 200;
228static int use_dac;
b57b7e5a
SH
229static struct {
230 u32 msg_enable;
231} debug = { -1 };
1da177e4
LT
232
233enum RTL8169_registers {
234 MAC0 = 0, /* Ethernet hardware address. */
235 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
236 CounterAddrLow = 0x10,
237 CounterAddrHigh = 0x14,
1da177e4
LT
238 TxDescStartAddrLow = 0x20,
239 TxDescStartAddrHigh = 0x24,
240 TxHDescStartAddrLow = 0x28,
241 TxHDescStartAddrHigh = 0x2c,
242 FLASH = 0x30,
243 ERSR = 0x36,
244 ChipCmd = 0x37,
245 TxPoll = 0x38,
246 IntrMask = 0x3C,
247 IntrStatus = 0x3E,
248 TxConfig = 0x40,
249 RxConfig = 0x44,
250 RxMissed = 0x4C,
251 Cfg9346 = 0x50,
252 Config0 = 0x51,
253 Config1 = 0x52,
254 Config2 = 0x53,
255 Config3 = 0x54,
256 Config4 = 0x55,
257 Config5 = 0x56,
258 MultiIntr = 0x5C,
259 PHYAR = 0x60,
260 TBICSR = 0x64,
261 TBI_ANAR = 0x68,
262 TBI_LPAR = 0x6A,
263 PHYstatus = 0x6C,
264 RxMaxSize = 0xDA,
265 CPlusCmd = 0xE0,
266 IntrMitigate = 0xE2,
267 RxDescAddrLow = 0xE4,
268 RxDescAddrHigh = 0xE8,
269 EarlyTxThres = 0xEC,
270 FuncEvent = 0xF0,
271 FuncEventMask = 0xF4,
272 FuncPresetState = 0xF8,
273 FuncForceEvent = 0xFC,
274};
275
276enum RTL8169_register_content {
277 /* InterruptStatusBits */
278 SYSErr = 0x8000,
279 PCSTimeout = 0x4000,
280 SWInt = 0x0100,
281 TxDescUnavail = 0x80,
282 RxFIFOOver = 0x40,
283 LinkChg = 0x20,
284 RxOverflow = 0x10,
285 TxErr = 0x08,
286 TxOK = 0x04,
287 RxErr = 0x02,
288 RxOK = 0x01,
289
290 /* RxStatusDesc */
9dccf611
FR
291 RxFOVF = (1 << 23),
292 RxRWT = (1 << 22),
293 RxRES = (1 << 21),
294 RxRUNT = (1 << 20),
295 RxCRC = (1 << 19),
1da177e4
LT
296
297 /* ChipCmdBits */
298 CmdReset = 0x10,
299 CmdRxEnb = 0x08,
300 CmdTxEnb = 0x04,
301 RxBufEmpty = 0x01,
302
303 /* Cfg9346Bits */
304 Cfg9346_Lock = 0x00,
305 Cfg9346_Unlock = 0xC0,
306
307 /* rx_mode_bits */
308 AcceptErr = 0x20,
309 AcceptRunt = 0x10,
310 AcceptBroadcast = 0x08,
311 AcceptMulticast = 0x04,
312 AcceptMyPhys = 0x02,
313 AcceptAllPhys = 0x01,
314
315 /* RxConfigBits */
316 RxCfgFIFOShift = 13,
317 RxCfgDMAShift = 8,
318
319 /* TxConfigBits */
320 TxInterFrameGapShift = 24,
321 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
322
5d06a99f
FR
323 /* Config1 register p.24 */
324 PMEnable = (1 << 0), /* Power Management Enable */
325
61a4dcc2
FR
326 /* Config3 register p.25 */
327 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
328 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
329
5d06a99f 330 /* Config5 register p.27 */
61a4dcc2
FR
331 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
332 MWF = (1 << 5), /* Accept Multicast wakeup frame */
333 UWF = (1 << 4), /* Accept Unicast wakeup frame */
334 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
335 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
336
1da177e4
LT
337 /* TBICSR p.28 */
338 TBIReset = 0x80000000,
339 TBILoopback = 0x40000000,
340 TBINwEnable = 0x20000000,
341 TBINwRestart = 0x10000000,
342 TBILinkOk = 0x02000000,
343 TBINwComplete = 0x01000000,
344
345 /* CPlusCmd p.31 */
346 RxVlan = (1 << 6),
347 RxChkSum = (1 << 5),
348 PCIDAC = (1 << 4),
349 PCIMulRW = (1 << 3),
350
351 /* rtl8169_PHYstatus */
352 TBI_Enable = 0x80,
353 TxFlowCtrl = 0x40,
354 RxFlowCtrl = 0x20,
355 _1000bpsF = 0x10,
356 _100bps = 0x08,
357 _10bps = 0x04,
358 LinkStatus = 0x02,
359 FullDup = 0x01,
360
1da177e4
LT
361 /* _MediaType */
362 _10_Half = 0x01,
363 _10_Full = 0x02,
364 _100_Half = 0x04,
365 _100_Full = 0x08,
366 _1000_Full = 0x10,
367
368 /* _TBICSRBit */
369 TBILinkOK = 0x02000000,
d4a3a0fc
SH
370
371 /* DumpCounterCommand */
372 CounterDump = 0x8,
1da177e4
LT
373};
374
375enum _DescStatusBit {
376 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
377 RingEnd = (1 << 30), /* End of descriptor ring */
378 FirstFrag = (1 << 29), /* First segment of a packet */
379 LastFrag = (1 << 28), /* Final segment of a packet */
380
381 /* Tx private */
382 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
383 MSSShift = 16, /* MSS value position */
384 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
385 IPCS = (1 << 18), /* Calculate IP checksum */
386 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
387 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
388 TxVlanTag = (1 << 17), /* Add VLAN tag */
389
390 /* Rx private */
391 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
392 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
393
394#define RxProtoUDP (PID1)
395#define RxProtoTCP (PID0)
396#define RxProtoIP (PID1 | PID0)
397#define RxProtoMask RxProtoIP
398
399 IPFail = (1 << 16), /* IP checksum failed */
400 UDPFail = (1 << 15), /* UDP/IP checksum failed */
401 TCPFail = (1 << 14), /* TCP/IP checksum failed */
402 RxVlanTag = (1 << 16), /* VLAN tag available */
403};
404
405#define RsvdMask 0x3fffc000
406
407struct TxDesc {
408 u32 opts1;
409 u32 opts2;
410 u64 addr;
411};
412
413struct RxDesc {
414 u32 opts1;
415 u32 opts2;
416 u64 addr;
417};
418
419struct ring_info {
420 struct sk_buff *skb;
421 u32 len;
422 u8 __pad[sizeof(void *) - sizeof(u32)];
423};
424
425struct rtl8169_private {
426 void __iomem *mmio_addr; /* memory map physical address */
427 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 428 struct net_device *dev;
1da177e4
LT
429 struct net_device_stats stats; /* statistics of net device */
430 spinlock_t lock; /* spin lock flag */
b57b7e5a 431 u32 msg_enable;
1da177e4
LT
432 int chipset;
433 int mac_version;
434 int phy_version;
435 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
436 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
437 u32 dirty_rx;
438 u32 dirty_tx;
439 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
440 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
441 dma_addr_t TxPhyAddr;
442 dma_addr_t RxPhyAddr;
443 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
444 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 445 unsigned align;
1da177e4
LT
446 unsigned rx_buf_sz;
447 struct timer_list timer;
448 u16 cp_cmd;
449 u16 intr_mask;
450 int phy_auto_nego_reg;
451 int phy_1000_ctrl_reg;
452#ifdef CONFIG_R8169_VLAN
453 struct vlan_group *vlgrp;
454#endif
455 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
456 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
457 void (*phy_reset_enable)(void __iomem *);
458 unsigned int (*phy_reset_pending)(void __iomem *);
459 unsigned int (*link_ok)(void __iomem *);
c4028958 460 struct delayed_work task;
61a4dcc2 461 unsigned wol_enabled : 1;
1da177e4
LT
462};
463
979b6c13 464MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
465MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
466module_param_array(media, int, &num_media, 0);
df0a1bf6 467MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 468module_param(rx_copybreak, int, 0);
1b7efd58 469MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
470module_param(use_dac, int, 0);
471MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
472module_param_named(debug, debug.msg_enable, int, 0);
473MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
474MODULE_LICENSE("GPL");
475MODULE_VERSION(RTL8169_VERSION);
476
477static int rtl8169_open(struct net_device *dev);
478static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 479static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4
LT
480static int rtl8169_init_ring(struct net_device *dev);
481static void rtl8169_hw_start(struct net_device *dev);
482static int rtl8169_close(struct net_device *dev);
483static void rtl8169_set_rx_mode(struct net_device *dev);
484static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 485static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
486static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
487 void __iomem *);
4dcb7d33 488static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 489static void rtl8169_down(struct net_device *dev);
99f252b0 490static void rtl8169_rx_clear(struct rtl8169_private *tp);
1da177e4
LT
491
492#ifdef CONFIG_R8169_NAPI
493static int rtl8169_poll(struct net_device *dev, int *budget);
494#endif
495
496static const u16 rtl8169_intr_mask =
497 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
498static const u16 rtl8169_napi_event =
499 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
500static const unsigned int rtl8169_rx_config =
5b0384f4 501 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
502
503static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
504{
505 int i;
506
507 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 508
2371408c 509 for (i = 20; i > 0; i--) {
1da177e4 510 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 511 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 512 break;
2371408c 513 udelay(25);
1da177e4
LT
514 }
515}
516
517static int mdio_read(void __iomem *ioaddr, int RegAddr)
518{
519 int i, value = -1;
520
521 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 522
2371408c 523 for (i = 20; i > 0; i--) {
1da177e4
LT
524 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
525 if (RTL_R32(PHYAR) & 0x80000000) {
526 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
527 break;
528 }
2371408c 529 udelay(25);
1da177e4
LT
530 }
531 return value;
532}
533
534static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
535{
536 RTL_W16(IntrMask, 0x0000);
537
538 RTL_W16(IntrStatus, 0xffff);
539}
540
541static void rtl8169_asic_down(void __iomem *ioaddr)
542{
543 RTL_W8(ChipCmd, 0x00);
544 rtl8169_irq_mask_and_ack(ioaddr);
545 RTL_R16(CPlusCmd);
546}
547
548static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
549{
550 return RTL_R32(TBICSR) & TBIReset;
551}
552
553static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
554{
64e4bfb4 555 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
556}
557
558static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
559{
560 return RTL_R32(TBICSR) & TBILinkOk;
561}
562
563static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
564{
565 return RTL_R8(PHYstatus) & LinkStatus;
566}
567
568static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
569{
570 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
571}
572
573static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
574{
575 unsigned int val;
576
9e0db8ef
FR
577 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
578 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
1da177e4
LT
579}
580
581static void rtl8169_check_link_status(struct net_device *dev,
582 struct rtl8169_private *tp, void __iomem *ioaddr)
583{
584 unsigned long flags;
585
586 spin_lock_irqsave(&tp->lock, flags);
587 if (tp->link_ok(ioaddr)) {
588 netif_carrier_on(dev);
b57b7e5a
SH
589 if (netif_msg_ifup(tp))
590 printk(KERN_INFO PFX "%s: link up\n", dev->name);
591 } else {
592 if (netif_msg_ifdown(tp))
593 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 594 netif_carrier_off(dev);
b57b7e5a 595 }
1da177e4
LT
596 spin_unlock_irqrestore(&tp->lock, flags);
597}
598
599static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
600{
601 struct {
602 u16 speed;
603 u8 duplex;
604 u8 autoneg;
605 u8 media;
606 } link_settings[] = {
607 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
608 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
609 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
610 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
611 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
612 /* Make TBI happy */
613 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
614 }, *p;
615 unsigned char option;
5b0384f4 616
1da177e4
LT
617 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
618
b57b7e5a 619 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
620 printk(KERN_WARNING PFX "media option is deprecated.\n");
621
622 for (p = link_settings; p->media != 0xff; p++) {
623 if (p->media == option)
624 break;
625 }
626 *autoneg = p->autoneg;
627 *speed = p->speed;
628 *duplex = p->duplex;
629}
630
61a4dcc2
FR
631static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
632{
633 struct rtl8169_private *tp = netdev_priv(dev);
634 void __iomem *ioaddr = tp->mmio_addr;
635 u8 options;
636
637 wol->wolopts = 0;
638
639#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
640 wol->supported = WAKE_ANY;
641
642 spin_lock_irq(&tp->lock);
643
644 options = RTL_R8(Config1);
645 if (!(options & PMEnable))
646 goto out_unlock;
647
648 options = RTL_R8(Config3);
649 if (options & LinkUp)
650 wol->wolopts |= WAKE_PHY;
651 if (options & MagicPacket)
652 wol->wolopts |= WAKE_MAGIC;
653
654 options = RTL_R8(Config5);
655 if (options & UWF)
656 wol->wolopts |= WAKE_UCAST;
657 if (options & BWF)
5b0384f4 658 wol->wolopts |= WAKE_BCAST;
61a4dcc2 659 if (options & MWF)
5b0384f4 660 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
661
662out_unlock:
663 spin_unlock_irq(&tp->lock);
664}
665
666static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
667{
668 struct rtl8169_private *tp = netdev_priv(dev);
669 void __iomem *ioaddr = tp->mmio_addr;
670 int i;
671 static struct {
672 u32 opt;
673 u16 reg;
674 u8 mask;
675 } cfg[] = {
676 { WAKE_ANY, Config1, PMEnable },
677 { WAKE_PHY, Config3, LinkUp },
678 { WAKE_MAGIC, Config3, MagicPacket },
679 { WAKE_UCAST, Config5, UWF },
680 { WAKE_BCAST, Config5, BWF },
681 { WAKE_MCAST, Config5, MWF },
682 { WAKE_ANY, Config5, LanWake }
683 };
684
685 spin_lock_irq(&tp->lock);
686
687 RTL_W8(Cfg9346, Cfg9346_Unlock);
688
689 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
690 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
691 if (wol->wolopts & cfg[i].opt)
692 options |= cfg[i].mask;
693 RTL_W8(cfg[i].reg, options);
694 }
695
696 RTL_W8(Cfg9346, Cfg9346_Lock);
697
698 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
699
700 spin_unlock_irq(&tp->lock);
701
702 return 0;
703}
704
1da177e4
LT
705static void rtl8169_get_drvinfo(struct net_device *dev,
706 struct ethtool_drvinfo *info)
707{
708 struct rtl8169_private *tp = netdev_priv(dev);
709
710 strcpy(info->driver, MODULENAME);
711 strcpy(info->version, RTL8169_VERSION);
712 strcpy(info->bus_info, pci_name(tp->pci_dev));
713}
714
715static int rtl8169_get_regs_len(struct net_device *dev)
716{
717 return R8169_REGS_SIZE;
718}
719
720static int rtl8169_set_speed_tbi(struct net_device *dev,
721 u8 autoneg, u16 speed, u8 duplex)
722{
723 struct rtl8169_private *tp = netdev_priv(dev);
724 void __iomem *ioaddr = tp->mmio_addr;
725 int ret = 0;
726 u32 reg;
727
728 reg = RTL_R32(TBICSR);
729 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
730 (duplex == DUPLEX_FULL)) {
731 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
732 } else if (autoneg == AUTONEG_ENABLE)
733 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
734 else {
b57b7e5a
SH
735 if (netif_msg_link(tp)) {
736 printk(KERN_WARNING "%s: "
737 "incorrect speed setting refused in TBI mode\n",
738 dev->name);
739 }
1da177e4
LT
740 ret = -EOPNOTSUPP;
741 }
742
743 return ret;
744}
745
746static int rtl8169_set_speed_xmii(struct net_device *dev,
747 u8 autoneg, u16 speed, u8 duplex)
748{
749 struct rtl8169_private *tp = netdev_priv(dev);
750 void __iomem *ioaddr = tp->mmio_addr;
751 int auto_nego, giga_ctrl;
752
64e4bfb4
FR
753 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
754 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
755 ADVERTISE_100HALF | ADVERTISE_100FULL);
756 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
757 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
758
759 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
760 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL);
762 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
763 } else {
764 if (speed == SPEED_10)
64e4bfb4 765 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 766 else if (speed == SPEED_100)
64e4bfb4 767 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 768 else if (speed == SPEED_1000)
64e4bfb4 769 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
770
771 if (duplex == DUPLEX_HALF)
64e4bfb4 772 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
773
774 if (duplex == DUPLEX_FULL)
64e4bfb4 775 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
776
777 /* This tweak comes straight from Realtek's driver. */
778 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
779 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 780 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
781 }
782 }
783
784 /* The 8100e/8101e do Fast Ethernet only. */
785 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
786 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
787 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 788 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
789 netif_msg_link(tp)) {
790 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
791 dev->name);
792 }
64e4bfb4 793 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
794 }
795
623a1593
FR
796 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
797
1da177e4
LT
798 tp->phy_auto_nego_reg = auto_nego;
799 tp->phy_1000_ctrl_reg = giga_ctrl;
800
64e4bfb4
FR
801 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
802 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
803 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
804 return 0;
805}
806
807static int rtl8169_set_speed(struct net_device *dev,
808 u8 autoneg, u16 speed, u8 duplex)
809{
810 struct rtl8169_private *tp = netdev_priv(dev);
811 int ret;
812
813 ret = tp->set_speed(dev, autoneg, speed, duplex);
814
64e4bfb4 815 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
816 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
817
818 return ret;
819}
820
821static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
822{
823 struct rtl8169_private *tp = netdev_priv(dev);
824 unsigned long flags;
825 int ret;
826
827 spin_lock_irqsave(&tp->lock, flags);
828 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
829 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 830
1da177e4
LT
831 return ret;
832}
833
834static u32 rtl8169_get_rx_csum(struct net_device *dev)
835{
836 struct rtl8169_private *tp = netdev_priv(dev);
837
838 return tp->cp_cmd & RxChkSum;
839}
840
841static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
842{
843 struct rtl8169_private *tp = netdev_priv(dev);
844 void __iomem *ioaddr = tp->mmio_addr;
845 unsigned long flags;
846
847 spin_lock_irqsave(&tp->lock, flags);
848
849 if (data)
850 tp->cp_cmd |= RxChkSum;
851 else
852 tp->cp_cmd &= ~RxChkSum;
853
854 RTL_W16(CPlusCmd, tp->cp_cmd);
855 RTL_R16(CPlusCmd);
856
857 spin_unlock_irqrestore(&tp->lock, flags);
858
859 return 0;
860}
861
862#ifdef CONFIG_R8169_VLAN
863
864static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
865 struct sk_buff *skb)
866{
867 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
868 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
869}
870
871static void rtl8169_vlan_rx_register(struct net_device *dev,
872 struct vlan_group *grp)
873{
874 struct rtl8169_private *tp = netdev_priv(dev);
875 void __iomem *ioaddr = tp->mmio_addr;
876 unsigned long flags;
877
878 spin_lock_irqsave(&tp->lock, flags);
879 tp->vlgrp = grp;
880 if (tp->vlgrp)
881 tp->cp_cmd |= RxVlan;
882 else
883 tp->cp_cmd &= ~RxVlan;
884 RTL_W16(CPlusCmd, tp->cp_cmd);
885 RTL_R16(CPlusCmd);
886 spin_unlock_irqrestore(&tp->lock, flags);
887}
888
889static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
890{
891 struct rtl8169_private *tp = netdev_priv(dev);
892 unsigned long flags;
893
894 spin_lock_irqsave(&tp->lock, flags);
5c15bdec 895 vlan_group_set_device(tp->vlgrp, vid, NULL);
1da177e4
LT
896 spin_unlock_irqrestore(&tp->lock, flags);
897}
898
899static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
900 struct sk_buff *skb)
901{
902 u32 opts2 = le32_to_cpu(desc->opts2);
903 int ret;
904
905 if (tp->vlgrp && (opts2 & RxVlanTag)) {
906 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
907 swab16(opts2 & 0xffff));
908 ret = 0;
909 } else
910 ret = -1;
911 desc->opts2 = 0;
912 return ret;
913}
914
915#else /* !CONFIG_R8169_VLAN */
916
917static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
918 struct sk_buff *skb)
919{
920 return 0;
921}
922
923static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
924 struct sk_buff *skb)
925{
926 return -1;
927}
928
929#endif
930
931static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
932{
933 struct rtl8169_private *tp = netdev_priv(dev);
934 void __iomem *ioaddr = tp->mmio_addr;
935 u32 status;
936
937 cmd->supported =
938 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
939 cmd->port = PORT_FIBRE;
940 cmd->transceiver = XCVR_INTERNAL;
941
942 status = RTL_R32(TBICSR);
943 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
944 cmd->autoneg = !!(status & TBINwEnable);
945
946 cmd->speed = SPEED_1000;
947 cmd->duplex = DUPLEX_FULL; /* Always set */
948}
949
950static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
951{
952 struct rtl8169_private *tp = netdev_priv(dev);
953 void __iomem *ioaddr = tp->mmio_addr;
954 u8 status;
955
956 cmd->supported = SUPPORTED_10baseT_Half |
957 SUPPORTED_10baseT_Full |
958 SUPPORTED_100baseT_Half |
959 SUPPORTED_100baseT_Full |
960 SUPPORTED_1000baseT_Full |
961 SUPPORTED_Autoneg |
5b0384f4 962 SUPPORTED_TP;
1da177e4
LT
963
964 cmd->autoneg = 1;
965 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
966
64e4bfb4 967 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 968 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 969 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 970 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 971 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 972 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 973 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 974 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 975 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
976 cmd->advertising |= ADVERTISED_1000baseT_Full;
977
978 status = RTL_R8(PHYstatus);
979
980 if (status & _1000bpsF)
981 cmd->speed = SPEED_1000;
982 else if (status & _100bps)
983 cmd->speed = SPEED_100;
984 else if (status & _10bps)
985 cmd->speed = SPEED_10;
986
623a1593
FR
987 if (status & TxFlowCtrl)
988 cmd->advertising |= ADVERTISED_Asym_Pause;
989 if (status & RxFlowCtrl)
990 cmd->advertising |= ADVERTISED_Pause;
991
1da177e4
LT
992 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
993 DUPLEX_FULL : DUPLEX_HALF;
994}
995
996static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
997{
998 struct rtl8169_private *tp = netdev_priv(dev);
999 unsigned long flags;
1000
1001 spin_lock_irqsave(&tp->lock, flags);
1002
1003 tp->get_settings(dev, cmd);
1004
1005 spin_unlock_irqrestore(&tp->lock, flags);
1006 return 0;
1007}
1008
1009static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1010 void *p)
1011{
5b0384f4
FR
1012 struct rtl8169_private *tp = netdev_priv(dev);
1013 unsigned long flags;
1da177e4 1014
5b0384f4
FR
1015 if (regs->len > R8169_REGS_SIZE)
1016 regs->len = R8169_REGS_SIZE;
1da177e4 1017
5b0384f4
FR
1018 spin_lock_irqsave(&tp->lock, flags);
1019 memcpy_fromio(p, tp->mmio_addr, regs->len);
1020 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1021}
1022
b57b7e5a
SH
1023static u32 rtl8169_get_msglevel(struct net_device *dev)
1024{
1025 struct rtl8169_private *tp = netdev_priv(dev);
1026
1027 return tp->msg_enable;
1028}
1029
1030static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1031{
1032 struct rtl8169_private *tp = netdev_priv(dev);
1033
1034 tp->msg_enable = value;
1035}
1036
d4a3a0fc
SH
1037static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1038 "tx_packets",
1039 "rx_packets",
1040 "tx_errors",
1041 "rx_errors",
1042 "rx_missed",
1043 "align_errors",
1044 "tx_single_collisions",
1045 "tx_multi_collisions",
1046 "unicast",
1047 "broadcast",
1048 "multicast",
1049 "tx_aborted",
1050 "tx_underrun",
1051};
1052
1053struct rtl8169_counters {
1054 u64 tx_packets;
1055 u64 rx_packets;
1056 u64 tx_errors;
1057 u32 rx_errors;
1058 u16 rx_missed;
1059 u16 align_errors;
1060 u32 tx_one_collision;
1061 u32 tx_multi_collision;
1062 u64 rx_unicast;
1063 u64 rx_broadcast;
1064 u32 rx_multicast;
1065 u16 tx_aborted;
1066 u16 tx_underun;
1067};
1068
1069static int rtl8169_get_stats_count(struct net_device *dev)
1070{
1071 return ARRAY_SIZE(rtl8169_gstrings);
1072}
1073
1074static void rtl8169_get_ethtool_stats(struct net_device *dev,
1075 struct ethtool_stats *stats, u64 *data)
1076{
1077 struct rtl8169_private *tp = netdev_priv(dev);
1078 void __iomem *ioaddr = tp->mmio_addr;
1079 struct rtl8169_counters *counters;
1080 dma_addr_t paddr;
1081 u32 cmd;
1082
1083 ASSERT_RTNL();
1084
1085 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1086 if (!counters)
1087 return;
1088
1089 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1090 cmd = (u64)paddr & DMA_32BIT_MASK;
1091 RTL_W32(CounterAddrLow, cmd);
1092 RTL_W32(CounterAddrLow, cmd | CounterDump);
1093
1094 while (RTL_R32(CounterAddrLow) & CounterDump) {
1095 if (msleep_interruptible(1))
1096 break;
1097 }
1098
1099 RTL_W32(CounterAddrLow, 0);
1100 RTL_W32(CounterAddrHigh, 0);
1101
5b0384f4 1102 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1103 data[1] = le64_to_cpu(counters->rx_packets);
1104 data[2] = le64_to_cpu(counters->tx_errors);
1105 data[3] = le32_to_cpu(counters->rx_errors);
1106 data[4] = le16_to_cpu(counters->rx_missed);
1107 data[5] = le16_to_cpu(counters->align_errors);
1108 data[6] = le32_to_cpu(counters->tx_one_collision);
1109 data[7] = le32_to_cpu(counters->tx_multi_collision);
1110 data[8] = le64_to_cpu(counters->rx_unicast);
1111 data[9] = le64_to_cpu(counters->rx_broadcast);
1112 data[10] = le32_to_cpu(counters->rx_multicast);
1113 data[11] = le16_to_cpu(counters->tx_aborted);
1114 data[12] = le16_to_cpu(counters->tx_underun);
1115
1116 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1117}
1118
1119static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1120{
1121 switch(stringset) {
1122 case ETH_SS_STATS:
1123 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1124 break;
1125 }
1126}
1127
1128
7282d491 1129static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1130 .get_drvinfo = rtl8169_get_drvinfo,
1131 .get_regs_len = rtl8169_get_regs_len,
1132 .get_link = ethtool_op_get_link,
1133 .get_settings = rtl8169_get_settings,
1134 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1135 .get_msglevel = rtl8169_get_msglevel,
1136 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1137 .get_rx_csum = rtl8169_get_rx_csum,
1138 .set_rx_csum = rtl8169_set_rx_csum,
1139 .get_tx_csum = ethtool_op_get_tx_csum,
1140 .set_tx_csum = ethtool_op_set_tx_csum,
1141 .get_sg = ethtool_op_get_sg,
1142 .set_sg = ethtool_op_set_sg,
1143 .get_tso = ethtool_op_get_tso,
1144 .set_tso = ethtool_op_set_tso,
1145 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1146 .get_wol = rtl8169_get_wol,
1147 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1148 .get_strings = rtl8169_get_strings,
1149 .get_stats_count = rtl8169_get_stats_count,
1150 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1151 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1152};
1153
1154static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1155 int bitval)
1156{
1157 int val;
1158
1159 val = mdio_read(ioaddr, reg);
1160 val = (bitval == 1) ?
1161 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1162 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1163}
1164
1165static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1166{
1167 const struct {
1168 u32 mask;
1169 int mac_version;
1170 } mac_info[] = {
bcf0bf90
FR
1171 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1172 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1173 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1174 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1175 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1176 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1177 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1178 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1179 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1180 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1181 }, *p = mac_info;
1182 u32 reg;
1183
1184 reg = RTL_R32(TxConfig) & 0x7c800000;
1185 while ((reg & p->mask) != p->mask)
1186 p++;
1187 tp->mac_version = p->mac_version;
1188}
1189
1190static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1191{
bcf0bf90 1192 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1193}
1194
1195static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1196{
1197 const struct {
1198 u16 mask;
1199 u16 set;
1200 int phy_version;
1201 } phy_info[] = {
1202 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1203 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1204 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1205 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1206 }, *p = phy_info;
1207 u16 reg;
1208
64e4bfb4 1209 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1210 while ((reg & p->mask) != p->set)
1211 p++;
1212 tp->phy_version = p->phy_version;
1213}
1214
1215static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1216{
1217 struct {
1218 int version;
1219 char *msg;
1220 u32 reg;
1221 } phy_print[] = {
1222 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1223 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1224 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1225 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1226 { 0, NULL, 0x0000 }
1227 }, *p;
1228
1229 for (p = phy_print; p->msg; p++) {
1230 if (tp->phy_version == p->version) {
1231 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1232 return;
1233 }
1234 }
1235 dprintk("phy_version == Unknown\n");
1236}
1237
1238static void rtl8169_hw_phy_config(struct net_device *dev)
1239{
1240 struct rtl8169_private *tp = netdev_priv(dev);
1241 void __iomem *ioaddr = tp->mmio_addr;
1242 struct {
1243 u16 regs[5]; /* Beware of bit-sign propagation */
1244 } phy_magic[5] = { {
1245 { 0x0000, //w 4 15 12 0
1246 0x00a1, //w 3 15 0 00a1
1247 0x0008, //w 2 15 0 0008
1248 0x1020, //w 1 15 0 1020
1249 0x1000 } },{ //w 0 15 0 1000
1250 { 0x7000, //w 4 15 12 7
1251 0xff41, //w 3 15 0 ff41
1252 0xde60, //w 2 15 0 de60
1253 0x0140, //w 1 15 0 0140
1254 0x0077 } },{ //w 0 15 0 0077
1255 { 0xa000, //w 4 15 12 a
1256 0xdf01, //w 3 15 0 df01
1257 0xdf20, //w 2 15 0 df20
1258 0xff95, //w 1 15 0 ff95
1259 0xfa00 } },{ //w 0 15 0 fa00
1260 { 0xb000, //w 4 15 12 b
1261 0xff41, //w 3 15 0 ff41
1262 0xde20, //w 2 15 0 de20
1263 0x0140, //w 1 15 0 0140
1264 0x00bb } },{ //w 0 15 0 00bb
1265 { 0xf000, //w 4 15 12 f
1266 0xdf01, //w 3 15 0 df01
1267 0xdf20, //w 2 15 0 df20
1268 0xff95, //w 1 15 0 ff95
1269 0xbf00 } //w 0 15 0 bf00
1270 }
1271 }, *p = phy_magic;
1272 int i;
1273
1274 rtl8169_print_mac_version(tp);
1275 rtl8169_print_phy_version(tp);
1276
bcf0bf90 1277 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1278 return;
1279 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1280 return;
1281
1282 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1283 dprintk("Do final_reg2.cfg\n");
1284
1285 /* Shazam ! */
1286
bcf0bf90 1287 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1288 mdio_write(ioaddr, 31, 0x0002);
1289 mdio_write(ioaddr, 1, 0x90d0);
1290 mdio_write(ioaddr, 31, 0x0000);
1291 return;
1292 }
1293
1294 /* phy config for RTL8169s mac_version C chip */
1295 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1296 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1297 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1298 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1299
1300 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1301 int val, pos = 4;
1302
1303 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1304 mdio_write(ioaddr, pos, val);
1305 while (--pos >= 0)
1306 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1307 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1308 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1309 }
1310 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1311}
1312
1313static void rtl8169_phy_timer(unsigned long __opaque)
1314{
1315 struct net_device *dev = (struct net_device *)__opaque;
1316 struct rtl8169_private *tp = netdev_priv(dev);
1317 struct timer_list *timer = &tp->timer;
1318 void __iomem *ioaddr = tp->mmio_addr;
1319 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1320
bcf0bf90 1321 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1322 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1323
64e4bfb4 1324 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1325 return;
1326
1327 spin_lock_irq(&tp->lock);
1328
1329 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1330 /*
1da177e4
LT
1331 * A busy loop could burn quite a few cycles on nowadays CPU.
1332 * Let's delay the execution of the timer for a few ticks.
1333 */
1334 timeout = HZ/10;
1335 goto out_mod_timer;
1336 }
1337
1338 if (tp->link_ok(ioaddr))
1339 goto out_unlock;
1340
b57b7e5a
SH
1341 if (netif_msg_link(tp))
1342 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1343
1344 tp->phy_reset_enable(ioaddr);
1345
1346out_mod_timer:
1347 mod_timer(timer, jiffies + timeout);
1348out_unlock:
1349 spin_unlock_irq(&tp->lock);
1350}
1351
1352static inline void rtl8169_delete_timer(struct net_device *dev)
1353{
1354 struct rtl8169_private *tp = netdev_priv(dev);
1355 struct timer_list *timer = &tp->timer;
1356
bcf0bf90 1357 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1358 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1359 return;
1360
1361 del_timer_sync(timer);
1362}
1363
1364static inline void rtl8169_request_timer(struct net_device *dev)
1365{
1366 struct rtl8169_private *tp = netdev_priv(dev);
1367 struct timer_list *timer = &tp->timer;
1368
bcf0bf90 1369 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1370 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1371 return;
1372
2efa53f3 1373 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1da177e4
LT
1374}
1375
1376#ifdef CONFIG_NET_POLL_CONTROLLER
1377/*
1378 * Polling 'interrupt' - used by things like netconsole to send skbs
1379 * without having to re-enable interrupts. It's not called while
1380 * the interrupt routine is executing.
1381 */
1382static void rtl8169_netpoll(struct net_device *dev)
1383{
1384 struct rtl8169_private *tp = netdev_priv(dev);
1385 struct pci_dev *pdev = tp->pci_dev;
1386
1387 disable_irq(pdev->irq);
7d12e780 1388 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1389 enable_irq(pdev->irq);
1390}
1391#endif
1392
1393static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1394 void __iomem *ioaddr)
1395{
1396 iounmap(ioaddr);
1397 pci_release_regions(pdev);
1398 pci_disable_device(pdev);
1399 free_netdev(dev);
1400}
1401
bf793295
FR
1402static void rtl8169_phy_reset(struct net_device *dev,
1403 struct rtl8169_private *tp)
1404{
1405 void __iomem *ioaddr = tp->mmio_addr;
1406 int i;
1407
1408 tp->phy_reset_enable(ioaddr);
1409 for (i = 0; i < 100; i++) {
1410 if (!tp->phy_reset_pending(ioaddr))
1411 return;
1412 msleep(1);
1413 }
1414 if (netif_msg_link(tp))
1415 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1416}
1417
4ff96fa6
FR
1418static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1419{
1420 void __iomem *ioaddr = tp->mmio_addr;
1421 static int board_idx = -1;
1422 u8 autoneg, duplex;
1423 u16 speed;
1424
1425 board_idx++;
1426
1427 rtl8169_hw_phy_config(dev);
1428
1429 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1430 RTL_W8(0x82, 0x01);
1431
bcf0bf90 1432 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1433 dprintk("Set PCI Latency=0x40\n");
1434 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1435 }
1436
bcf0bf90 1437 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1438 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1439 RTL_W8(0x82, 0x01);
1440 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1441 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1442 }
1443
1444 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1445
bf793295
FR
1446 rtl8169_phy_reset(dev, tp);
1447
4ff96fa6
FR
1448 rtl8169_set_speed(dev, autoneg, speed, duplex);
1449
1450 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1451 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1452}
1453
5f787a1a
FR
1454static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1455{
1456 struct rtl8169_private *tp = netdev_priv(dev);
1457 struct mii_ioctl_data *data = if_mii(ifr);
1458
1459 if (!netif_running(dev))
1460 return -ENODEV;
1461
1462 switch (cmd) {
1463 case SIOCGMIIPHY:
1464 data->phy_id = 32; /* Internal PHY */
1465 return 0;
1466
1467 case SIOCGMIIREG:
1468 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1469 return 0;
1470
1471 case SIOCSMIIREG:
1472 if (!capable(CAP_NET_ADMIN))
1473 return -EPERM;
1474 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1475 return 0;
1476 }
1477 return -EOPNOTSUPP;
1478}
1479
1da177e4 1480static int __devinit
4ff96fa6 1481rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1482{
bcf0bf90 1483 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1484 struct rtl8169_private *tp;
4ff96fa6
FR
1485 struct net_device *dev;
1486 void __iomem *ioaddr;
315917d2
FR
1487 unsigned int pm_cap;
1488 int i, rc;
1da177e4 1489
4ff96fa6
FR
1490 if (netif_msg_drv(&debug)) {
1491 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1492 MODULENAME, RTL8169_VERSION);
1493 }
1da177e4 1494
1da177e4 1495 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1496 if (!dev) {
b57b7e5a 1497 if (netif_msg_drv(&debug))
9b91cf9d 1498 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1499 rc = -ENOMEM;
1500 goto out;
1da177e4
LT
1501 }
1502
1503 SET_MODULE_OWNER(dev);
1504 SET_NETDEV_DEV(dev, &pdev->dev);
1505 tp = netdev_priv(dev);
c4028958 1506 tp->dev = dev;
b57b7e5a 1507 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1508
1509 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1510 rc = pci_enable_device(pdev);
b57b7e5a 1511 if (rc < 0) {
2e8a538d 1512 if (netif_msg_probe(tp))
9b91cf9d 1513 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1514 goto err_out_free_dev_1;
1da177e4
LT
1515 }
1516
1517 rc = pci_set_mwi(pdev);
1518 if (rc < 0)
4ff96fa6 1519 goto err_out_disable_2;
1da177e4
LT
1520
1521 /* save power state before pci_enable_device overwrites it */
1522 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1523 if (pm_cap) {
4ff96fa6 1524 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1525
1526 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1527 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1528 } else {
4ff96fa6 1529 if (netif_msg_probe(tp)) {
9b91cf9d 1530 dev_err(&pdev->dev,
4ff96fa6
FR
1531 "PowerManagement capability not found.\n");
1532 }
1da177e4
LT
1533 }
1534
1535 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1536 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1537 if (netif_msg_probe(tp)) {
9b91cf9d 1538 dev_err(&pdev->dev,
bcf0bf90
FR
1539 "region #%d not an MMIO resource, aborting\n",
1540 region);
4ff96fa6 1541 }
1da177e4 1542 rc = -ENODEV;
4ff96fa6 1543 goto err_out_mwi_3;
1da177e4 1544 }
4ff96fa6 1545
1da177e4 1546 /* check for weird/broken PCI region reporting */
bcf0bf90 1547 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1548 if (netif_msg_probe(tp)) {
9b91cf9d 1549 dev_err(&pdev->dev,
4ff96fa6
FR
1550 "Invalid PCI region size(s), aborting\n");
1551 }
1da177e4 1552 rc = -ENODEV;
4ff96fa6 1553 goto err_out_mwi_3;
1da177e4
LT
1554 }
1555
1556 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1557 if (rc < 0) {
2e8a538d 1558 if (netif_msg_probe(tp))
9b91cf9d 1559 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1560 goto err_out_mwi_3;
1da177e4
LT
1561 }
1562
1563 tp->cp_cmd = PCIMulRW | RxChkSum;
1564
1565 if ((sizeof(dma_addr_t) > 4) &&
1566 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1567 tp->cp_cmd |= PCIDAC;
1568 dev->features |= NETIF_F_HIGHDMA;
1569 } else {
1570 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1571 if (rc < 0) {
4ff96fa6 1572 if (netif_msg_probe(tp)) {
9b91cf9d 1573 dev_err(&pdev->dev,
4ff96fa6
FR
1574 "DMA configuration failed.\n");
1575 }
1576 goto err_out_free_res_4;
1da177e4
LT
1577 }
1578 }
1579
1580 pci_set_master(pdev);
1581
1582 /* ioremap MMIO region */
bcf0bf90 1583 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1584 if (!ioaddr) {
b57b7e5a 1585 if (netif_msg_probe(tp))
9b91cf9d 1586 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1587 rc = -EIO;
4ff96fa6 1588 goto err_out_free_res_4;
1da177e4
LT
1589 }
1590
1591 /* Unneeded ? Don't mess with Mrs. Murphy. */
1592 rtl8169_irq_mask_and_ack(ioaddr);
1593
1594 /* Soft reset the chip. */
1595 RTL_W8(ChipCmd, CmdReset);
1596
1597 /* Check that the chip has finished the reset. */
b518fa8e 1598 for (i = 100; i > 0; i--) {
1da177e4
LT
1599 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1600 break;
b518fa8e 1601 msleep_interruptible(1);
1da177e4
LT
1602 }
1603
1604 /* Identify chip attached to board */
1605 rtl8169_get_mac_version(tp, ioaddr);
1606 rtl8169_get_phy_version(tp, ioaddr);
1607
1608 rtl8169_print_mac_version(tp);
1609 rtl8169_print_phy_version(tp);
1610
1611 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1612 if (tp->mac_version == rtl_chip_info[i].mac_version)
1613 break;
1614 }
1615 if (i < 0) {
1616 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1617 if (netif_msg_probe(tp)) {
2e8a538d 1618 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1619 "unknown chip version, assuming %s\n",
1620 rtl_chip_info[0].name);
b57b7e5a 1621 }
1da177e4
LT
1622 i++;
1623 }
1624 tp->chipset = i;
1625
5d06a99f
FR
1626 RTL_W8(Cfg9346, Cfg9346_Unlock);
1627 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1628 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1629 RTL_W8(Cfg9346, Cfg9346_Lock);
1630
1da177e4
LT
1631 if (RTL_R8(PHYstatus) & TBI_Enable) {
1632 tp->set_speed = rtl8169_set_speed_tbi;
1633 tp->get_settings = rtl8169_gset_tbi;
1634 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1635 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1636 tp->link_ok = rtl8169_tbi_link_ok;
1637
64e4bfb4 1638 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1639 } else {
1640 tp->set_speed = rtl8169_set_speed_xmii;
1641 tp->get_settings = rtl8169_gset_xmii;
1642 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1643 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1644 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1645
1646 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1647 }
1648
1649 /* Get MAC address. FIXME: read EEPROM */
1650 for (i = 0; i < MAC_ADDR_LEN; i++)
1651 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1652 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1653
1654 dev->open = rtl8169_open;
1655 dev->hard_start_xmit = rtl8169_start_xmit;
1656 dev->get_stats = rtl8169_get_stats;
1657 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1658 dev->stop = rtl8169_close;
1659 dev->tx_timeout = rtl8169_tx_timeout;
1660 dev->set_multicast_list = rtl8169_set_rx_mode;
1661 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1662 dev->irq = pdev->irq;
1663 dev->base_addr = (unsigned long) ioaddr;
1664 dev->change_mtu = rtl8169_change_mtu;
1665
1666#ifdef CONFIG_R8169_NAPI
1667 dev->poll = rtl8169_poll;
1668 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1669#endif
1670
1671#ifdef CONFIG_R8169_VLAN
1672 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1673 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1674 dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
1675#endif
1676
1677#ifdef CONFIG_NET_POLL_CONTROLLER
1678 dev->poll_controller = rtl8169_netpoll;
1679#endif
1680
1681 tp->intr_mask = 0xffff;
1682 tp->pci_dev = pdev;
1683 tp->mmio_addr = ioaddr;
bcf0bf90 1684 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4 1685
2efa53f3
FR
1686 init_timer(&tp->timer);
1687 tp->timer.data = (unsigned long) dev;
1688 tp->timer.function = rtl8169_phy_timer;
1689
1da177e4
LT
1690 spin_lock_init(&tp->lock);
1691
1692 rc = register_netdev(dev);
4ff96fa6
FR
1693 if (rc < 0)
1694 goto err_out_unmap_5;
1da177e4
LT
1695
1696 pci_set_drvdata(pdev, dev);
1697
b57b7e5a
SH
1698 if (netif_msg_probe(tp)) {
1699 printk(KERN_INFO "%s: %s at 0x%lx, "
1700 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1701 "IRQ %d\n",
1702 dev->name,
bcf0bf90 1703 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1704 dev->base_addr,
1705 dev->dev_addr[0], dev->dev_addr[1],
1706 dev->dev_addr[2], dev->dev_addr[3],
1707 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1708 }
1da177e4 1709
4ff96fa6 1710 rtl8169_init_phy(dev, tp);
1da177e4 1711
4ff96fa6
FR
1712out:
1713 return rc;
1da177e4 1714
4ff96fa6
FR
1715err_out_unmap_5:
1716 iounmap(ioaddr);
1717err_out_free_res_4:
1718 pci_release_regions(pdev);
1719err_out_mwi_3:
1720 pci_clear_mwi(pdev);
1721err_out_disable_2:
1722 pci_disable_device(pdev);
1723err_out_free_dev_1:
1724 free_netdev(dev);
1725 goto out;
1da177e4
LT
1726}
1727
1728static void __devexit
1729rtl8169_remove_one(struct pci_dev *pdev)
1730{
1731 struct net_device *dev = pci_get_drvdata(pdev);
1732 struct rtl8169_private *tp = netdev_priv(dev);
1733
1734 assert(dev != NULL);
1735 assert(tp != NULL);
1736
eb2a021c
FR
1737 flush_scheduled_work();
1738
1da177e4
LT
1739 unregister_netdev(dev);
1740 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1741 pci_set_drvdata(pdev, NULL);
1742}
1743
1da177e4
LT
1744static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1745 struct net_device *dev)
1746{
1747 unsigned int mtu = dev->mtu;
1748
1749 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1750}
1751
1752static int rtl8169_open(struct net_device *dev)
1753{
1754 struct rtl8169_private *tp = netdev_priv(dev);
1755 struct pci_dev *pdev = tp->pci_dev;
99f252b0 1756 int retval = -ENOMEM;
1da177e4 1757
1da177e4 1758
99f252b0 1759 rtl8169_set_rxbufsize(tp, dev);
1da177e4
LT
1760
1761 /*
1762 * Rx and Tx desscriptors needs 256 bytes alignment.
1763 * pci_alloc_consistent provides more.
1764 */
1765 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1766 &tp->TxPhyAddr);
1767 if (!tp->TxDescArray)
99f252b0 1768 goto out;
1da177e4
LT
1769
1770 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1771 &tp->RxPhyAddr);
1772 if (!tp->RxDescArray)
99f252b0 1773 goto err_free_tx_0;
1da177e4
LT
1774
1775 retval = rtl8169_init_ring(dev);
1776 if (retval < 0)
99f252b0 1777 goto err_free_rx_1;
1da177e4 1778
c4028958 1779 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4 1780
99f252b0
FR
1781 smp_mb();
1782
1783 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED,
1784 dev->name, dev);
1785 if (retval < 0)
1786 goto err_release_ring_2;
1787
1da177e4
LT
1788 rtl8169_hw_start(dev);
1789
1790 rtl8169_request_timer(dev);
1791
1792 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1793out:
1794 return retval;
1795
99f252b0
FR
1796err_release_ring_2:
1797 rtl8169_rx_clear(tp);
1798err_free_rx_1:
1da177e4
LT
1799 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1800 tp->RxPhyAddr);
99f252b0 1801err_free_tx_0:
1da177e4
LT
1802 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1803 tp->TxPhyAddr);
1da177e4
LT
1804 goto out;
1805}
1806
1807static void rtl8169_hw_reset(void __iomem *ioaddr)
1808{
1809 /* Disable interrupts */
1810 rtl8169_irq_mask_and_ack(ioaddr);
1811
1812 /* Reset the chipset */
1813 RTL_W8(ChipCmd, CmdReset);
1814
1815 /* PCI commit */
1816 RTL_R8(ChipCmd);
1817}
1818
9cb427b6
FR
1819static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
1820{
1821 void __iomem *ioaddr = tp->mmio_addr;
1822 u32 cfg = rtl8169_rx_config;
1823
1824 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1825 RTL_W32(RxConfig, cfg);
1826
1827 /* Set DMA burst size and Interframe Gap Time */
1828 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1829 (InterFrameGap << TxInterFrameGapShift));
1830}
1831
1832static void rtl8169_hw_start(struct net_device *dev)
1da177e4
LT
1833{
1834 struct rtl8169_private *tp = netdev_priv(dev);
1835 void __iomem *ioaddr = tp->mmio_addr;
bcf0bf90 1836 struct pci_dev *pdev = tp->pci_dev;
9cb427b6 1837 u16 cmd;
1da177e4
LT
1838 u32 i;
1839
1840 /* Soft reset the chip. */
1841 RTL_W8(ChipCmd, CmdReset);
1842
1843 /* Check that the chip has finished the reset. */
b518fa8e 1844 for (i = 100; i > 0; i--) {
1da177e4
LT
1845 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1846 break;
b518fa8e 1847 msleep_interruptible(1);
1da177e4
LT
1848 }
1849
9cb427b6
FR
1850 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1851 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1852 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1853 }
1854
bcf0bf90
FR
1855 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1856 pci_write_config_word(pdev, 0x68, 0x00);
1857 pci_write_config_word(pdev, 0x69, 0x08);
1858 }
1859
1860 /* Undocumented stuff. */
1861 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1862 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1863 if ((RTL_R8(Config2) & 0x07) & 0x01)
1864 RTL_W32(0x7c, 0x0007ffff);
1865
1866 RTL_W32(0x7c, 0x0007ff00);
1867
1868 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1869 cmd = cmd & 0xef;
1870 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1871 }
1872
1873 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1874 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1875 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1876 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1877 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1878 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1879
1da177e4
LT
1880 RTL_W8(EarlyTxThres, EarlyTxThld);
1881
126fa4b9
FR
1882 /* Low hurts. Let's disable the filtering. */
1883 RTL_W16(RxMaxSize, 16383);
1da177e4 1884
9cb427b6
FR
1885 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1886 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1887 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1888 (tp->mac_version == RTL_GIGA_MAC_VER_04))
9cb427b6 1889 rtl8169_set_rx_tx_config_registers(tp);
1da177e4 1890
9cb427b6
FR
1891 cmd = RTL_R16(CPlusCmd);
1892 RTL_W16(CPlusCmd, cmd);
1da177e4 1893
9cb427b6 1894 tp->cp_cmd |= cmd | PCIMulRW;
1da177e4 1895
bcf0bf90
FR
1896 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1897 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1898 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1899 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1900 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1901 }
1902
bcf0bf90
FR
1903 RTL_W16(CPlusCmd, tp->cp_cmd);
1904
1da177e4
LT
1905 /*
1906 * Undocumented corner. Supposedly:
1907 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1908 */
1909 RTL_W16(IntrMitigate, 0x0000);
1910
b39fe41f
FR
1911 /*
1912 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1913 * register to be written before TxDescAddrLow to work.
1914 * Switching from MMIO to I/O access fixes the issue as well.
1915 */
1da177e4 1916 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
b39fe41f 1917 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1da177e4 1918 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
b39fe41f 1919 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
9cb427b6
FR
1920
1921 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1922 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1923 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1924 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1925 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1926 rtl8169_set_rx_tx_config_registers(tp);
1927 }
1928
1da177e4 1929 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1930
1931 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1932 RTL_R8(IntrMask);
1da177e4
LT
1933
1934 RTL_W32(RxMissed, 0);
1935
1936 rtl8169_set_rx_mode(dev);
1937
1938 /* no early-rx interrupts */
1939 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1940
1941 /* Enable all known interrupts by setting the interrupt mask. */
1942 RTL_W16(IntrMask, rtl8169_intr_mask);
1943
1944 netif_start_queue(dev);
1945}
1946
1947static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1948{
1949 struct rtl8169_private *tp = netdev_priv(dev);
1950 int ret = 0;
1951
1952 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
1953 return -EINVAL;
1954
1955 dev->mtu = new_mtu;
1956
1957 if (!netif_running(dev))
1958 goto out;
1959
1960 rtl8169_down(dev);
1961
1962 rtl8169_set_rxbufsize(tp, dev);
1963
1964 ret = rtl8169_init_ring(dev);
1965 if (ret < 0)
1966 goto out;
1967
1968 netif_poll_enable(dev);
1969
1970 rtl8169_hw_start(dev);
1971
1972 rtl8169_request_timer(dev);
1973
1974out:
1975 return ret;
1976}
1977
1978static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
1979{
1980 desc->addr = 0x0badbadbadbadbadull;
1981 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
1982}
1983
1984static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
1985 struct sk_buff **sk_buff, struct RxDesc *desc)
1986{
1987 struct pci_dev *pdev = tp->pci_dev;
1988
1989 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
1990 PCI_DMA_FROMDEVICE);
1991 dev_kfree_skb(*sk_buff);
1992 *sk_buff = NULL;
1993 rtl8169_make_unusable_by_asic(desc);
1994}
1995
1996static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
1997{
1998 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
1999
2000 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
2001}
2002
2003static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2004 u32 rx_buf_sz)
2005{
2006 desc->addr = cpu_to_le64(mapping);
2007 wmb();
2008 rtl8169_mark_to_asic(desc, rx_buf_sz);
2009}
2010
2011static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
bcf0bf90
FR
2012 struct RxDesc *desc, int rx_buf_sz,
2013 unsigned int align)
1da177e4
LT
2014{
2015 struct sk_buff *skb;
2016 dma_addr_t mapping;
2017 int ret = 0;
2018
bcf0bf90 2019 skb = dev_alloc_skb(rx_buf_sz + align);
1da177e4
LT
2020 if (!skb)
2021 goto err_out;
2022
dcb92f88 2023 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4
LT
2024 *sk_buff = skb;
2025
689be439 2026 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2027 PCI_DMA_FROMDEVICE);
2028
2029 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
2030
2031out:
2032 return ret;
2033
2034err_out:
2035 ret = -ENOMEM;
2036 rtl8169_make_unusable_by_asic(desc);
2037 goto out;
2038}
2039
2040static void rtl8169_rx_clear(struct rtl8169_private *tp)
2041{
2042 int i;
2043
2044 for (i = 0; i < NUM_RX_DESC; i++) {
2045 if (tp->Rx_skbuff[i]) {
2046 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2047 tp->RxDescArray + i);
2048 }
2049 }
2050}
2051
2052static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2053 u32 start, u32 end)
2054{
2055 u32 cur;
5b0384f4 2056
1da177e4
LT
2057 for (cur = start; end - cur > 0; cur++) {
2058 int ret, i = cur % NUM_RX_DESC;
2059
2060 if (tp->Rx_skbuff[i])
2061 continue;
bcf0bf90 2062
1da177e4 2063 ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
bcf0bf90 2064 tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
1da177e4
LT
2065 if (ret < 0)
2066 break;
2067 }
2068 return cur - start;
2069}
2070
2071static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2072{
2073 desc->opts1 |= cpu_to_le32(RingEnd);
2074}
2075
2076static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2077{
2078 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2079}
2080
2081static int rtl8169_init_ring(struct net_device *dev)
2082{
2083 struct rtl8169_private *tp = netdev_priv(dev);
2084
2085 rtl8169_init_ring_indexes(tp);
2086
2087 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2088 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2089
2090 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2091 goto err_out;
2092
2093 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2094
2095 return 0;
2096
2097err_out:
2098 rtl8169_rx_clear(tp);
2099 return -ENOMEM;
2100}
2101
2102static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2103 struct TxDesc *desc)
2104{
2105 unsigned int len = tx_skb->len;
2106
2107 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2108 desc->opts1 = 0x00;
2109 desc->opts2 = 0x00;
2110 desc->addr = 0x00;
2111 tx_skb->len = 0;
2112}
2113
2114static void rtl8169_tx_clear(struct rtl8169_private *tp)
2115{
2116 unsigned int i;
2117
2118 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2119 unsigned int entry = i % NUM_TX_DESC;
2120 struct ring_info *tx_skb = tp->tx_skb + entry;
2121 unsigned int len = tx_skb->len;
2122
2123 if (len) {
2124 struct sk_buff *skb = tx_skb->skb;
2125
2126 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2127 tp->TxDescArray + entry);
2128 if (skb) {
2129 dev_kfree_skb(skb);
2130 tx_skb->skb = NULL;
2131 }
2132 tp->stats.tx_dropped++;
2133 }
2134 }
2135 tp->cur_tx = tp->dirty_tx = 0;
2136}
2137
c4028958 2138static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2139{
2140 struct rtl8169_private *tp = netdev_priv(dev);
2141
c4028958 2142 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2143 schedule_delayed_work(&tp->task, 4);
2144}
2145
2146static void rtl8169_wait_for_quiescence(struct net_device *dev)
2147{
2148 struct rtl8169_private *tp = netdev_priv(dev);
2149 void __iomem *ioaddr = tp->mmio_addr;
2150
2151 synchronize_irq(dev->irq);
2152
2153 /* Wait for any pending NAPI task to complete */
2154 netif_poll_disable(dev);
2155
2156 rtl8169_irq_mask_and_ack(ioaddr);
2157
2158 netif_poll_enable(dev);
2159}
2160
c4028958 2161static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2162{
c4028958
DH
2163 struct rtl8169_private *tp =
2164 container_of(work, struct rtl8169_private, task.work);
2165 struct net_device *dev = tp->dev;
1da177e4
LT
2166 int ret;
2167
eb2a021c
FR
2168 rtnl_lock();
2169
2170 if (!netif_running(dev))
2171 goto out_unlock;
2172
2173 rtl8169_wait_for_quiescence(dev);
2174 rtl8169_close(dev);
1da177e4
LT
2175
2176 ret = rtl8169_open(dev);
2177 if (unlikely(ret < 0)) {
2178 if (net_ratelimit()) {
b57b7e5a
SH
2179 struct rtl8169_private *tp = netdev_priv(dev);
2180
2181 if (netif_msg_drv(tp)) {
2182 printk(PFX KERN_ERR
2183 "%s: reinit failure (status = %d)."
2184 " Rescheduling.\n", dev->name, ret);
2185 }
1da177e4
LT
2186 }
2187 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2188 }
eb2a021c
FR
2189
2190out_unlock:
2191 rtnl_unlock();
1da177e4
LT
2192}
2193
c4028958 2194static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2195{
c4028958
DH
2196 struct rtl8169_private *tp =
2197 container_of(work, struct rtl8169_private, task.work);
2198 struct net_device *dev = tp->dev;
1da177e4 2199
eb2a021c
FR
2200 rtnl_lock();
2201
1da177e4 2202 if (!netif_running(dev))
eb2a021c 2203 goto out_unlock;
1da177e4
LT
2204
2205 rtl8169_wait_for_quiescence(dev);
2206
2207 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2208 rtl8169_tx_clear(tp);
2209
2210 if (tp->dirty_rx == tp->cur_rx) {
2211 rtl8169_init_ring_indexes(tp);
2212 rtl8169_hw_start(dev);
2213 netif_wake_queue(dev);
2214 } else {
2215 if (net_ratelimit()) {
b57b7e5a
SH
2216 struct rtl8169_private *tp = netdev_priv(dev);
2217
2218 if (netif_msg_intr(tp)) {
2219 printk(PFX KERN_EMERG
2220 "%s: Rx buffers shortage\n", dev->name);
2221 }
1da177e4
LT
2222 }
2223 rtl8169_schedule_work(dev, rtl8169_reset_task);
2224 }
eb2a021c
FR
2225
2226out_unlock:
2227 rtnl_unlock();
1da177e4
LT
2228}
2229
2230static void rtl8169_tx_timeout(struct net_device *dev)
2231{
2232 struct rtl8169_private *tp = netdev_priv(dev);
2233
2234 rtl8169_hw_reset(tp->mmio_addr);
2235
2236 /* Let's wait a bit while any (async) irq lands on */
2237 rtl8169_schedule_work(dev, rtl8169_reset_task);
2238}
2239
2240static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2241 u32 opts1)
2242{
2243 struct skb_shared_info *info = skb_shinfo(skb);
2244 unsigned int cur_frag, entry;
2245 struct TxDesc *txd;
2246
2247 entry = tp->cur_tx;
2248 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2249 skb_frag_t *frag = info->frags + cur_frag;
2250 dma_addr_t mapping;
2251 u32 status, len;
2252 void *addr;
2253
2254 entry = (entry + 1) % NUM_TX_DESC;
2255
2256 txd = tp->TxDescArray + entry;
2257 len = frag->size;
2258 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2259 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2260
2261 /* anti gcc 2.95.3 bugware (sic) */
2262 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2263
2264 txd->opts1 = cpu_to_le32(status);
2265 txd->addr = cpu_to_le64(mapping);
2266
2267 tp->tx_skb[entry].len = len;
2268 }
2269
2270 if (cur_frag) {
2271 tp->tx_skb[entry].skb = skb;
2272 txd->opts1 |= cpu_to_le32(LastFrag);
2273 }
2274
2275 return cur_frag;
2276}
2277
2278static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2279{
2280 if (dev->features & NETIF_F_TSO) {
7967168c 2281 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2282
2283 if (mss)
2284 return LargeSend | ((mss & MSSMask) << MSSShift);
2285 }
84fa7933 2286 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
2287 const struct iphdr *ip = skb->nh.iph;
2288
2289 if (ip->protocol == IPPROTO_TCP)
2290 return IPCS | TCPCS;
2291 else if (ip->protocol == IPPROTO_UDP)
2292 return IPCS | UDPCS;
2293 WARN_ON(1); /* we need a WARN() */
2294 }
2295 return 0;
2296}
2297
2298static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2299{
2300 struct rtl8169_private *tp = netdev_priv(dev);
2301 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2302 struct TxDesc *txd = tp->TxDescArray + entry;
2303 void __iomem *ioaddr = tp->mmio_addr;
2304 dma_addr_t mapping;
2305 u32 status, len;
2306 u32 opts1;
188f4af0 2307 int ret = NETDEV_TX_OK;
5b0384f4 2308
1da177e4 2309 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2310 if (netif_msg_drv(tp)) {
2311 printk(KERN_ERR
2312 "%s: BUG! Tx Ring full when queue awake!\n",
2313 dev->name);
2314 }
1da177e4
LT
2315 goto err_stop;
2316 }
2317
2318 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2319 goto err_stop;
2320
2321 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2322
2323 frags = rtl8169_xmit_frags(tp, skb, opts1);
2324 if (frags) {
2325 len = skb_headlen(skb);
2326 opts1 |= FirstFrag;
2327 } else {
2328 len = skb->len;
2329
2330 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2331 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2332 goto err_update_stats;
2333 len = ETH_ZLEN;
2334 }
2335
2336 opts1 |= FirstFrag | LastFrag;
2337 tp->tx_skb[entry].skb = skb;
2338 }
2339
2340 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2341
2342 tp->tx_skb[entry].len = len;
2343 txd->addr = cpu_to_le64(mapping);
2344 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2345
2346 wmb();
2347
2348 /* anti gcc 2.95.3 bugware (sic) */
2349 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2350 txd->opts1 = cpu_to_le32(status);
2351
2352 dev->trans_start = jiffies;
2353
2354 tp->cur_tx += frags + 1;
2355
2356 smp_wmb();
2357
2358 RTL_W8(TxPoll, 0x40); /* set polling bit */
2359
2360 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2361 netif_stop_queue(dev);
2362 smp_rmb();
2363 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2364 netif_wake_queue(dev);
2365 }
2366
2367out:
2368 return ret;
2369
2370err_stop:
2371 netif_stop_queue(dev);
188f4af0 2372 ret = NETDEV_TX_BUSY;
1da177e4
LT
2373err_update_stats:
2374 tp->stats.tx_dropped++;
2375 goto out;
2376}
2377
2378static void rtl8169_pcierr_interrupt(struct net_device *dev)
2379{
2380 struct rtl8169_private *tp = netdev_priv(dev);
2381 struct pci_dev *pdev = tp->pci_dev;
2382 void __iomem *ioaddr = tp->mmio_addr;
2383 u16 pci_status, pci_cmd;
2384
2385 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2386 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2387
b57b7e5a
SH
2388 if (netif_msg_intr(tp)) {
2389 printk(KERN_ERR
2390 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2391 dev->name, pci_cmd, pci_status);
2392 }
1da177e4
LT
2393
2394 /*
2395 * The recovery sequence below admits a very elaborated explanation:
2396 * - it seems to work;
d03902b8
FR
2397 * - I did not see what else could be done;
2398 * - it makes iop3xx happy.
1da177e4
LT
2399 *
2400 * Feel free to adjust to your needs.
2401 */
a27993f3 2402 if (pdev->broken_parity_status)
d03902b8
FR
2403 pci_cmd &= ~PCI_COMMAND_PARITY;
2404 else
2405 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2406
2407 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2408
2409 pci_write_config_word(pdev, PCI_STATUS,
2410 pci_status & (PCI_STATUS_DETECTED_PARITY |
2411 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2412 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2413
2414 /* The infamous DAC f*ckup only happens at boot time */
2415 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2416 if (netif_msg_intr(tp))
2417 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2418 tp->cp_cmd &= ~PCIDAC;
2419 RTL_W16(CPlusCmd, tp->cp_cmd);
2420 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2421 }
2422
2423 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2424
2425 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2426}
2427
2428static void
2429rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2430 void __iomem *ioaddr)
2431{
2432 unsigned int dirty_tx, tx_left;
2433
2434 assert(dev != NULL);
2435 assert(tp != NULL);
2436 assert(ioaddr != NULL);
2437
2438 dirty_tx = tp->dirty_tx;
2439 smp_rmb();
2440 tx_left = tp->cur_tx - dirty_tx;
2441
2442 while (tx_left > 0) {
2443 unsigned int entry = dirty_tx % NUM_TX_DESC;
2444 struct ring_info *tx_skb = tp->tx_skb + entry;
2445 u32 len = tx_skb->len;
2446 u32 status;
2447
2448 rmb();
2449 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2450 if (status & DescOwn)
2451 break;
2452
2453 tp->stats.tx_bytes += len;
2454 tp->stats.tx_packets++;
2455
2456 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2457
2458 if (status & LastFrag) {
2459 dev_kfree_skb_irq(tx_skb->skb);
2460 tx_skb->skb = NULL;
2461 }
2462 dirty_tx++;
2463 tx_left--;
2464 }
2465
2466 if (tp->dirty_tx != dirty_tx) {
2467 tp->dirty_tx = dirty_tx;
2468 smp_wmb();
2469 if (netif_queue_stopped(dev) &&
2470 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2471 netif_wake_queue(dev);
2472 }
2473 }
2474}
2475
126fa4b9
FR
2476static inline int rtl8169_fragmented_frame(u32 status)
2477{
2478 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2479}
2480
1da177e4
LT
2481static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2482{
2483 u32 opts1 = le32_to_cpu(desc->opts1);
2484 u32 status = opts1 & RxProtoMask;
2485
2486 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2487 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2488 ((status == RxProtoIP) && !(opts1 & IPFail)))
2489 skb->ip_summed = CHECKSUM_UNNECESSARY;
2490 else
2491 skb->ip_summed = CHECKSUM_NONE;
2492}
2493
2494static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
bcf0bf90
FR
2495 struct RxDesc *desc, int rx_buf_sz,
2496 unsigned int align)
1da177e4
LT
2497{
2498 int ret = -1;
2499
2500 if (pkt_size < rx_copybreak) {
2501 struct sk_buff *skb;
2502
bcf0bf90 2503 skb = dev_alloc_skb(pkt_size + align);
1da177e4 2504 if (skb) {
dcb92f88 2505 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
689be439 2506 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
1da177e4
LT
2507 *sk_buff = skb;
2508 rtl8169_mark_to_asic(desc, rx_buf_sz);
2509 ret = 0;
2510 }
2511 }
2512 return ret;
2513}
2514
2515static int
2516rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2517 void __iomem *ioaddr)
2518{
2519 unsigned int cur_rx, rx_left;
2520 unsigned int delta, count;
2521
2522 assert(dev != NULL);
2523 assert(tp != NULL);
2524 assert(ioaddr != NULL);
2525
2526 cur_rx = tp->cur_rx;
2527 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2528 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2529
4dcb7d33 2530 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2531 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2532 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2533 u32 status;
2534
2535 rmb();
126fa4b9 2536 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2537
2538 if (status & DescOwn)
2539 break;
4dcb7d33 2540 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2541 if (netif_msg_rx_err(tp)) {
2542 printk(KERN_INFO
2543 "%s: Rx ERROR. status = %08x\n",
2544 dev->name, status);
2545 }
1da177e4
LT
2546 tp->stats.rx_errors++;
2547 if (status & (RxRWT | RxRUNT))
2548 tp->stats.rx_length_errors++;
2549 if (status & RxCRC)
2550 tp->stats.rx_crc_errors++;
9dccf611
FR
2551 if (status & RxFOVF) {
2552 rtl8169_schedule_work(dev, rtl8169_reset_task);
2553 tp->stats.rx_fifo_errors++;
2554 }
126fa4b9 2555 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2556 } else {
1da177e4
LT
2557 struct sk_buff *skb = tp->Rx_skbuff[entry];
2558 int pkt_size = (status & 0x00001FFF) - 4;
2559 void (*pci_action)(struct pci_dev *, dma_addr_t,
2560 size_t, int) = pci_dma_sync_single_for_device;
2561
126fa4b9
FR
2562 /*
2563 * The driver does not support incoming fragmented
2564 * frames. They are seen as a symptom of over-mtu
2565 * sized frames.
2566 */
2567 if (unlikely(rtl8169_fragmented_frame(status))) {
2568 tp->stats.rx_dropped++;
2569 tp->stats.rx_length_errors++;
2570 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2571 continue;
126fa4b9
FR
2572 }
2573
1da177e4 2574 rtl8169_rx_csum(skb, desc);
bcf0bf90 2575
1da177e4
LT
2576 pci_dma_sync_single_for_cpu(tp->pci_dev,
2577 le64_to_cpu(desc->addr), tp->rx_buf_sz,
2578 PCI_DMA_FROMDEVICE);
2579
2580 if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
bcf0bf90 2581 tp->rx_buf_sz, tp->align)) {
1da177e4
LT
2582 pci_action = pci_unmap_single;
2583 tp->Rx_skbuff[entry] = NULL;
2584 }
2585
2586 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2587 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2588
2589 skb->dev = dev;
2590 skb_put(skb, pkt_size);
2591 skb->protocol = eth_type_trans(skb, dev);
2592
2593 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2594 rtl8169_rx_skb(skb);
2595
2596 dev->last_rx = jiffies;
2597 tp->stats.rx_bytes += pkt_size;
2598 tp->stats.rx_packets++;
2599 }
1da177e4
LT
2600 }
2601
2602 count = cur_rx - tp->cur_rx;
2603 tp->cur_rx = cur_rx;
2604
2605 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2606 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2607 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2608 tp->dirty_rx += delta;
2609
2610 /*
2611 * FIXME: until there is periodic timer to try and refill the ring,
2612 * a temporary shortage may definitely kill the Rx process.
2613 * - disable the asic to try and avoid an overflow and kick it again
2614 * after refill ?
2615 * - how do others driver handle this condition (Uh oh...).
2616 */
b57b7e5a 2617 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2618 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2619
2620 return count;
2621}
2622
2623/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2624static irqreturn_t
7d12e780 2625rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2626{
2627 struct net_device *dev = (struct net_device *) dev_instance;
2628 struct rtl8169_private *tp = netdev_priv(dev);
2629 int boguscnt = max_interrupt_work;
2630 void __iomem *ioaddr = tp->mmio_addr;
2631 int status;
2632 int handled = 0;
2633
2634 do {
2635 status = RTL_R16(IntrStatus);
2636
2637 /* hotplug/major error/no more work/shared irq */
2638 if ((status == 0xFFFF) || !status)
2639 break;
2640
2641 handled = 1;
2642
2643 if (unlikely(!netif_running(dev))) {
2644 rtl8169_asic_down(ioaddr);
2645 goto out;
2646 }
2647
2648 status &= tp->intr_mask;
2649 RTL_W16(IntrStatus,
2650 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2651
2652 if (!(status & rtl8169_intr_mask))
2653 break;
2654
2655 if (unlikely(status & SYSErr)) {
2656 rtl8169_pcierr_interrupt(dev);
2657 break;
2658 }
2659
2660 if (status & LinkChg)
2661 rtl8169_check_link_status(dev, tp, ioaddr);
2662
2663#ifdef CONFIG_R8169_NAPI
2664 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2665 tp->intr_mask = ~rtl8169_napi_event;
2666
2667 if (likely(netif_rx_schedule_prep(dev)))
2668 __netif_rx_schedule(dev);
b57b7e5a 2669 else if (netif_msg_intr(tp)) {
1da177e4 2670 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2671 dev->name, status);
1da177e4
LT
2672 }
2673 break;
2674#else
2675 /* Rx interrupt */
2676 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2677 rtl8169_rx_interrupt(dev, tp, ioaddr);
2678 }
2679 /* Tx interrupt */
2680 if (status & (TxOK | TxErr))
2681 rtl8169_tx_interrupt(dev, tp, ioaddr);
2682#endif
2683
2684 boguscnt--;
2685 } while (boguscnt > 0);
2686
2687 if (boguscnt <= 0) {
7c8b2eb4 2688 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2689 printk(KERN_WARNING
2690 "%s: Too much work at interrupt!\n", dev->name);
2691 }
1da177e4
LT
2692 /* Clear all interrupt sources. */
2693 RTL_W16(IntrStatus, 0xffff);
2694 }
2695out:
2696 return IRQ_RETVAL(handled);
2697}
2698
2699#ifdef CONFIG_R8169_NAPI
2700static int rtl8169_poll(struct net_device *dev, int *budget)
2701{
2702 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2703 struct rtl8169_private *tp = netdev_priv(dev);
2704 void __iomem *ioaddr = tp->mmio_addr;
2705
2706 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2707 rtl8169_tx_interrupt(dev, tp, ioaddr);
2708
2709 *budget -= work_done;
2710 dev->quota -= work_done;
2711
2712 if (work_done < work_to_do) {
2713 netif_rx_complete(dev);
2714 tp->intr_mask = 0xffff;
2715 /*
2716 * 20040426: the barrier is not strictly required but the
2717 * behavior of the irq handler could be less predictable
2718 * without it. Btw, the lack of flush for the posted pci
2719 * write is safe - FR
2720 */
2721 smp_wmb();
2722 RTL_W16(IntrMask, rtl8169_intr_mask);
2723 }
2724
2725 return (work_done >= work_to_do);
2726}
2727#endif
2728
2729static void rtl8169_down(struct net_device *dev)
2730{
2731 struct rtl8169_private *tp = netdev_priv(dev);
2732 void __iomem *ioaddr = tp->mmio_addr;
2733 unsigned int poll_locked = 0;
733b736c 2734 unsigned int intrmask;
1da177e4
LT
2735
2736 rtl8169_delete_timer(dev);
2737
2738 netif_stop_queue(dev);
2739
1da177e4
LT
2740core_down:
2741 spin_lock_irq(&tp->lock);
2742
2743 rtl8169_asic_down(ioaddr);
2744
2745 /* Update the error counts. */
2746 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2747 RTL_W32(RxMissed, 0);
2748
2749 spin_unlock_irq(&tp->lock);
2750
2751 synchronize_irq(dev->irq);
2752
2753 if (!poll_locked) {
2754 netif_poll_disable(dev);
2755 poll_locked++;
2756 }
2757
2758 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2759 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2760
2761 /*
2762 * And now for the 50k$ question: are IRQ disabled or not ?
2763 *
2764 * Two paths lead here:
2765 * 1) dev->close
2766 * -> netif_running() is available to sync the current code and the
2767 * IRQ handler. See rtl8169_interrupt for details.
2768 * 2) dev->change_mtu
2769 * -> rtl8169_poll can not be issued again and re-enable the
2770 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2771 *
2772 * No loop if hotpluged or major error (0xffff).
1da177e4 2773 */
733b736c
AP
2774 intrmask = RTL_R16(IntrMask);
2775 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2776 goto core_down;
2777
2778 rtl8169_tx_clear(tp);
2779
2780 rtl8169_rx_clear(tp);
2781}
2782
2783static int rtl8169_close(struct net_device *dev)
2784{
2785 struct rtl8169_private *tp = netdev_priv(dev);
2786 struct pci_dev *pdev = tp->pci_dev;
2787
2788 rtl8169_down(dev);
2789
2790 free_irq(dev->irq, dev);
2791
2792 netif_poll_enable(dev);
2793
2794 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2795 tp->RxPhyAddr);
2796 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2797 tp->TxPhyAddr);
2798 tp->TxDescArray = NULL;
2799 tp->RxDescArray = NULL;
2800
2801 return 0;
2802}
2803
2804static void
2805rtl8169_set_rx_mode(struct net_device *dev)
2806{
2807 struct rtl8169_private *tp = netdev_priv(dev);
2808 void __iomem *ioaddr = tp->mmio_addr;
2809 unsigned long flags;
2810 u32 mc_filter[2]; /* Multicast hash filter */
2811 int i, rx_mode;
2812 u32 tmp = 0;
2813
2814 if (dev->flags & IFF_PROMISC) {
2815 /* Unconditionally log net taps. */
b57b7e5a
SH
2816 if (netif_msg_link(tp)) {
2817 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2818 dev->name);
2819 }
1da177e4
LT
2820 rx_mode =
2821 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2822 AcceptAllPhys;
2823 mc_filter[1] = mc_filter[0] = 0xffffffff;
2824 } else if ((dev->mc_count > multicast_filter_limit)
2825 || (dev->flags & IFF_ALLMULTI)) {
2826 /* Too many to filter perfectly -- accept all multicasts. */
2827 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2828 mc_filter[1] = mc_filter[0] = 0xffffffff;
2829 } else {
2830 struct dev_mc_list *mclist;
2831 rx_mode = AcceptBroadcast | AcceptMyPhys;
2832 mc_filter[1] = mc_filter[0] = 0;
2833 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2834 i++, mclist = mclist->next) {
2835 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2836 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2837 rx_mode |= AcceptMulticast;
2838 }
2839 }
2840
2841 spin_lock_irqsave(&tp->lock, flags);
2842
2843 tmp = rtl8169_rx_config | rx_mode |
2844 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2845
bcf0bf90
FR
2846 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2847 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2848 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2849 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2850 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2851 mc_filter[0] = 0xffffffff;
2852 mc_filter[1] = 0xffffffff;
2853 }
2854
1da177e4
LT
2855 RTL_W32(RxConfig, tmp);
2856 RTL_W32(MAR0 + 0, mc_filter[0]);
2857 RTL_W32(MAR0 + 4, mc_filter[1]);
2858
2859 spin_unlock_irqrestore(&tp->lock, flags);
2860}
2861
2862/**
2863 * rtl8169_get_stats - Get rtl8169 read/write statistics
2864 * @dev: The Ethernet Device to get statistics for
2865 *
2866 * Get TX/RX statistics for rtl8169
2867 */
2868static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2869{
2870 struct rtl8169_private *tp = netdev_priv(dev);
2871 void __iomem *ioaddr = tp->mmio_addr;
2872 unsigned long flags;
2873
2874 if (netif_running(dev)) {
2875 spin_lock_irqsave(&tp->lock, flags);
2876 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2877 RTL_W32(RxMissed, 0);
2878 spin_unlock_irqrestore(&tp->lock, flags);
2879 }
5b0384f4 2880
1da177e4
LT
2881 return &tp->stats;
2882}
2883
5d06a99f
FR
2884#ifdef CONFIG_PM
2885
2886static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2887{
2888 struct net_device *dev = pci_get_drvdata(pdev);
2889 struct rtl8169_private *tp = netdev_priv(dev);
2890 void __iomem *ioaddr = tp->mmio_addr;
2891
2892 if (!netif_running(dev))
2893 goto out;
2894
2895 netif_device_detach(dev);
2896 netif_stop_queue(dev);
2897
2898 spin_lock_irq(&tp->lock);
2899
2900 rtl8169_asic_down(ioaddr);
2901
2902 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2903 RTL_W32(RxMissed, 0);
2904
2905 spin_unlock_irq(&tp->lock);
2906
2907 pci_save_state(pdev);
61a4dcc2 2908 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f
FR
2909 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2910out:
2911 return 0;
2912}
2913
2914static int rtl8169_resume(struct pci_dev *pdev)
2915{
2916 struct net_device *dev = pci_get_drvdata(pdev);
2917
2918 if (!netif_running(dev))
2919 goto out;
2920
2921 netif_device_attach(dev);
2922
2923 pci_set_power_state(pdev, PCI_D0);
2924 pci_restore_state(pdev);
61a4dcc2 2925 pci_enable_wake(pdev, PCI_D0, 0);
5d06a99f
FR
2926
2927 rtl8169_schedule_work(dev, rtl8169_reset_task);
2928out:
2929 return 0;
2930}
2931
2932#endif /* CONFIG_PM */
2933
1da177e4
LT
2934static struct pci_driver rtl8169_pci_driver = {
2935 .name = MODULENAME,
2936 .id_table = rtl8169_pci_tbl,
2937 .probe = rtl8169_init_one,
2938 .remove = __devexit_p(rtl8169_remove_one),
2939#ifdef CONFIG_PM
2940 .suspend = rtl8169_suspend,
2941 .resume = rtl8169_resume,
2942#endif
2943};
2944
2945static int __init
2946rtl8169_init_module(void)
2947{
29917620 2948 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
2949}
2950
2951static void __exit
2952rtl8169_cleanup_module(void)
2953{
2954 pci_unregister_driver(&rtl8169_pci_driver);
2955}
2956
2957module_init(rtl8169_init_module);
2958module_exit(rtl8169_cleanup_module);