]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/sky2.c
sky2: dynamic size transmit ring
[net-next-2.6.git] / drivers / net / sky2.c
CommitLineData
cd28ab6a
SH
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
798b6b19 13 * the Free Software Foundation; either version 2 of the License.
cd28ab6a
SH
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
793b883e 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
cd28ab6a
SH
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
793b883e 25#include <linux/crc32.h>
cd28ab6a 26#include <linux/kernel.h>
cd28ab6a
SH
27#include <linux/module.h>
28#include <linux/netdevice.h>
d0bbccfa 29#include <linux/dma-mapping.h>
cd28ab6a
SH
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/pci.h>
33#include <linux/ip.h>
c9bdd4b5 34#include <net/ip.h>
cd28ab6a
SH
35#include <linux/tcp.h>
36#include <linux/in.h>
37#include <linux/delay.h>
91c86df5 38#include <linux/workqueue.h>
d1f13708 39#include <linux/if_vlan.h>
d70cd51a 40#include <linux/prefetch.h>
3cf26753 41#include <linux/debugfs.h>
ef743d33 42#include <linux/mii.h>
cd28ab6a
SH
43
44#include <asm/irq.h>
45
d1f13708
SH
46#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
47#define SKY2_VLAN_TAG_USED 1
48#endif
49
cd28ab6a
SH
50#include "sky2.h"
51
52#define DRV_NAME "sky2"
deeb16dc 53#define DRV_VERSION "1.24"
cd28ab6a
SH
54#define PFX DRV_NAME " "
55
56/*
57 * The Yukon II chipset takes 64 bit command blocks (called list elements)
58 * that are organized into three (receive, transmit, status) different rings
14d0263f 59 * similar to Tigon3.
cd28ab6a
SH
60 */
61
14d0263f 62#define RX_LE_SIZE 1024
cd28ab6a 63#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
14d0263f 64#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
13210ce5 65#define RX_DEF_PENDING RX_MAX_PENDING
793b883e 66
ee5f68fe
SH
67/* This is the worst case number of transmit list elements for a single skb:
68 VLAN + TSO + CKSUM + Data + skb_frags * DMA */
69#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
e9c1be80 70#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
ee5f68fe
SH
71#define TX_MAX_PENDING 4096
72#define TX_DEF_PENDING 127
cd28ab6a 73
793b883e 74#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
cd28ab6a 75#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
cd28ab6a
SH
76#define TX_WATCHDOG (5 * HZ)
77#define NAPI_WEIGHT 64
78#define PHY_RETRIES 1000
79
f4331a6d
SH
80#define SKY2_EEPROM_MAGIC 0x9955aabb
81
82
cb5d9547
SH
83#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
84
cd28ab6a 85static const u32 default_msg =
793b883e
SH
86 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
87 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
3be92a70 88 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
cd28ab6a 89
793b883e 90static int debug = -1; /* defaults above */
cd28ab6a
SH
91module_param(debug, int, 0);
92MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
93
14d0263f 94static int copybreak __read_mostly = 128;
bdb5c58e
SH
95module_param(copybreak, int, 0);
96MODULE_PARM_DESC(copybreak, "Receive copy threshold");
97
fb2690a9
SH
98static int disable_msi = 0;
99module_param(disable_msi, int, 0);
100MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
101
e6cac9ba 102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
e5b74c7d
SH
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
2d2a3871 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
2f4a66ad 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
508f89e7 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
f1a0b6f5 108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
e5b74c7d
SH
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
05745c4a 121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
a3b4fced 122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
e5b74c7d 123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
5a37a68d 124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
05745c4a 125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
e5b74c7d
SH
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
05745c4a 131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
e5b74c7d
SH
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
133 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
134 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
f1a0b6f5
SH
135 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
136 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
69161611 137 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
5a37a68d 138 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
ed4d4161
SH
139 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
0ce8b98d 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
cd28ab6a
SH
142 { 0 }
143};
793b883e 144
cd28ab6a
SH
145MODULE_DEVICE_TABLE(pci, sky2_id_table);
146
147/* Avoid conditionals by using array */
148static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
149static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
f4ea431b 150static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
cd28ab6a 151
d1b139c0
SH
152static void sky2_set_multicast(struct net_device *dev);
153
af043aa5 154/* Access to PHY via serial interconnect */
ef743d33 155static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
cd28ab6a
SH
156{
157 int i;
158
159 gma_write16(hw, port, GM_SMI_DATA, val);
160 gma_write16(hw, port, GM_SMI_CTRL,
161 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
162
163 for (i = 0; i < PHY_RETRIES; i++) {
af043aa5
SH
164 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
165 if (ctrl == 0xffff)
166 goto io_error;
167
168 if (!(ctrl & GM_SMI_CT_BUSY))
ef743d33 169 return 0;
af043aa5
SH
170
171 udelay(10);
cd28ab6a 172 }
ef743d33 173
af043aa5 174 dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name);
ef743d33 175 return -ETIMEDOUT;
af043aa5
SH
176
177io_error:
178 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
179 return -EIO;
cd28ab6a
SH
180}
181
ef743d33 182static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
cd28ab6a
SH
183{
184 int i;
185
793b883e 186 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
cd28ab6a
SH
187 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
188
189 for (i = 0; i < PHY_RETRIES; i++) {
af043aa5
SH
190 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
191 if (ctrl == 0xffff)
192 goto io_error;
193
194 if (ctrl & GM_SMI_CT_RD_VAL) {
ef743d33
SH
195 *val = gma_read16(hw, port, GM_SMI_DATA);
196 return 0;
197 }
198
af043aa5 199 udelay(10);
cd28ab6a
SH
200 }
201
af043aa5 202 dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
ef743d33 203 return -ETIMEDOUT;
af043aa5
SH
204io_error:
205 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
206 return -EIO;
ef743d33
SH
207}
208
af043aa5 209static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
ef743d33
SH
210{
211 u16 v;
af043aa5 212 __gm_phy_read(hw, port, reg, &v);
ef743d33 213 return v;
cd28ab6a
SH
214}
215
5afa0a9c 216
ae306cca
SH
217static void sky2_power_on(struct sky2_hw *hw)
218{
219 /* switch power to VCC (WA for VAUX problem) */
220 sky2_write8(hw, B0_POWER_CTRL,
221 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
5afa0a9c 222
ae306cca
SH
223 /* disable Core Clock Division, */
224 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
d3bcfbeb 225
ae306cca
SH
226 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
227 /* enable bits are inverted */
228 sky2_write8(hw, B2_Y2_CLK_GATE,
229 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
230 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
231 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
232 else
233 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
977bdf06 234
ea76e635 235 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
fc99fe06 236 u32 reg;
5afa0a9c 237
b32f40c4 238 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
b2345773 239
b32f40c4 240 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
fc99fe06
SH
241 /* set all bits to 0 except bits 15..12 and 8 */
242 reg &= P_ASPM_CONTROL_MSK;
b32f40c4 243 sky2_pci_write32(hw, PCI_DEV_REG4, reg);
fc99fe06 244
b32f40c4 245 reg = sky2_pci_read32(hw, PCI_DEV_REG5);
fc99fe06
SH
246 /* set all bits to 0 except bits 28 & 27 */
247 reg &= P_CTL_TIM_VMAIN_AV_MSK;
b32f40c4 248 sky2_pci_write32(hw, PCI_DEV_REG5, reg);
fc99fe06 249
b32f40c4 250 sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
8f70920f
SH
251
252 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
253 reg = sky2_read32(hw, B2_GP_IO);
254 reg |= GLB_GPIO_STAT_RACE_DIS;
255 sky2_write32(hw, B2_GP_IO, reg);
b2345773
SH
256
257 sky2_read32(hw, B2_GP_IO);
5afa0a9c 258 }
ae306cca 259}
5afa0a9c 260
ae306cca
SH
261static void sky2_power_aux(struct sky2_hw *hw)
262{
263 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
264 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
265 else
266 /* enable bits are inverted */
267 sky2_write8(hw, B2_Y2_CLK_GATE,
268 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
269 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
270 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
271
272 /* switch power to VAUX */
273 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
274 sky2_write8(hw, B0_POWER_CTRL,
275 (PC_VAUX_ENA | PC_VCC_ENA |
276 PC_VAUX_ON | PC_VCC_OFF));
5afa0a9c
SH
277}
278
d3bcfbeb 279static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
cd28ab6a
SH
280{
281 u16 reg;
282
283 /* disable all GMAC IRQ's */
284 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
793b883e 285
cd28ab6a
SH
286 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
287 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
288 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
289 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
290
291 reg = gma_read16(hw, port, GM_RX_CTRL);
292 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
293 gma_write16(hw, port, GM_RX_CTRL, reg);
294}
295
16ad91e1
SH
296/* flow control to advertise bits */
297static const u16 copper_fc_adv[] = {
298 [FC_NONE] = 0,
299 [FC_TX] = PHY_M_AN_ASP,
300 [FC_RX] = PHY_M_AN_PC,
301 [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
302};
303
304/* flow control to advertise bits when using 1000BaseX */
305static const u16 fiber_fc_adv[] = {
df3fe1f3 306 [FC_NONE] = PHY_M_P_NO_PAUSE_X,
16ad91e1
SH
307 [FC_TX] = PHY_M_P_ASYM_MD_X,
308 [FC_RX] = PHY_M_P_SYM_MD_X,
df3fe1f3 309 [FC_BOTH] = PHY_M_P_BOTH_MD_X,
16ad91e1
SH
310};
311
312/* flow control to GMA disable bits */
313static const u16 gm_fc_disable[] = {
314 [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
315 [FC_TX] = GM_GPCR_FC_RX_DIS,
316 [FC_RX] = GM_GPCR_FC_TX_DIS,
317 [FC_BOTH] = 0,
318};
319
320
cd28ab6a
SH
321static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
322{
323 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
2eaba1a2 324 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
cd28ab6a 325
0ea065e5 326 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
ea76e635 327 !(hw->flags & SKY2_HW_NEWER_PHY)) {
cd28ab6a
SH
328 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
329
330 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
793b883e 331 PHY_M_EC_MAC_S_MSK);
cd28ab6a
SH
332 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
333
53419c68 334 /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
cd28ab6a 335 if (hw->chip_id == CHIP_ID_YUKON_EC)
53419c68 336 /* set downshift counter to 3x and enable downshift */
cd28ab6a
SH
337 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
338 else
53419c68
SH
339 /* set master & slave downshift counter to 1x */
340 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
cd28ab6a
SH
341
342 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
343 }
344
345 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
b89165f2 346 if (sky2_is_copper(hw)) {
05745c4a 347 if (!(hw->flags & SKY2_HW_GIGABIT)) {
cd28ab6a
SH
348 /* enable automatic crossover */
349 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
6d3105d5
SH
350
351 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
352 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
353 u16 spec;
354
355 /* Enable Class A driver for FE+ A0 */
356 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
357 spec |= PHY_M_FESC_SEL_CL_A;
358 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
359 }
cd28ab6a
SH
360 } else {
361 /* disable energy detect */
362 ctrl &= ~PHY_M_PC_EN_DET_MSK;
363
364 /* enable automatic crossover */
365 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
366
53419c68 367 /* downshift on PHY 88E1112 and 88E1149 is changed */
0ea065e5 368 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED)
ea76e635 369 && (hw->flags & SKY2_HW_NEWER_PHY)) {
53419c68 370 /* set downshift counter to 3x and enable downshift */
cd28ab6a
SH
371 ctrl &= ~PHY_M_PC_DSC_MSK;
372 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
373 }
374 }
cd28ab6a
SH
375 } else {
376 /* workaround for deviation #4.88 (CRC errors) */
377 /* disable Automatic Crossover */
378
379 ctrl &= ~PHY_M_PC_MDIX_MSK;
b89165f2 380 }
cd28ab6a 381
b89165f2
SH
382 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
383
384 /* special setup for PHY 88E1112 Fiber */
ea76e635 385 if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
b89165f2 386 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
cd28ab6a 387
b89165f2
SH
388 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
389 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
390 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
391 ctrl &= ~PHY_M_MAC_MD_MSK;
392 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
393 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
394
395 if (hw->pmd_type == 'P') {
cd28ab6a
SH
396 /* select page 1 to access Fiber registers */
397 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
b89165f2
SH
398
399 /* for SFP-module set SIGDET polarity to low */
400 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
401 ctrl |= PHY_M_FIB_SIGD_POL;
34dd962b 402 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
cd28ab6a 403 }
b89165f2
SH
404
405 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
cd28ab6a
SH
406 }
407
7800fddc 408 ctrl = PHY_CT_RESET;
cd28ab6a
SH
409 ct1000 = 0;
410 adv = PHY_AN_CSMA;
2eaba1a2 411 reg = 0;
cd28ab6a 412
0ea065e5 413 if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
b89165f2 414 if (sky2_is_copper(hw)) {
cd28ab6a
SH
415 if (sky2->advertising & ADVERTISED_1000baseT_Full)
416 ct1000 |= PHY_M_1000C_AFD;
417 if (sky2->advertising & ADVERTISED_1000baseT_Half)
418 ct1000 |= PHY_M_1000C_AHD;
419 if (sky2->advertising & ADVERTISED_100baseT_Full)
420 adv |= PHY_M_AN_100_FD;
421 if (sky2->advertising & ADVERTISED_100baseT_Half)
422 adv |= PHY_M_AN_100_HD;
423 if (sky2->advertising & ADVERTISED_10baseT_Full)
424 adv |= PHY_M_AN_10_FD;
425 if (sky2->advertising & ADVERTISED_10baseT_Half)
426 adv |= PHY_M_AN_10_HD;
709c6e7b 427
b89165f2
SH
428 } else { /* special defines for FIBER (88E1040S only) */
429 if (sky2->advertising & ADVERTISED_1000baseT_Full)
430 adv |= PHY_M_AN_1000X_AFD;
431 if (sky2->advertising & ADVERTISED_1000baseT_Half)
432 adv |= PHY_M_AN_1000X_AHD;
709c6e7b 433 }
cd28ab6a
SH
434
435 /* Restart Auto-negotiation */
436 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
437 } else {
438 /* forced speed/duplex settings */
439 ct1000 = PHY_M_1000C_MSE;
440
0ea065e5
SH
441 /* Disable auto update for duplex flow control and duplex */
442 reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
cd28ab6a
SH
443
444 switch (sky2->speed) {
445 case SPEED_1000:
446 ctrl |= PHY_CT_SP1000;
2eaba1a2 447 reg |= GM_GPCR_SPEED_1000;
cd28ab6a
SH
448 break;
449 case SPEED_100:
450 ctrl |= PHY_CT_SP100;
2eaba1a2 451 reg |= GM_GPCR_SPEED_100;
cd28ab6a
SH
452 break;
453 }
454
2eaba1a2
SH
455 if (sky2->duplex == DUPLEX_FULL) {
456 reg |= GM_GPCR_DUP_FULL;
457 ctrl |= PHY_CT_DUP_MD;
16ad91e1
SH
458 } else if (sky2->speed < SPEED_1000)
459 sky2->flow_mode = FC_NONE;
0ea065e5 460 }
2eaba1a2 461
0ea065e5
SH
462 if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
463 if (sky2_is_copper(hw))
464 adv |= copper_fc_adv[sky2->flow_mode];
465 else
466 adv |= fiber_fc_adv[sky2->flow_mode];
467 } else {
468 reg |= GM_GPCR_AU_FCT_DIS;
16ad91e1 469 reg |= gm_fc_disable[sky2->flow_mode];
2eaba1a2
SH
470
471 /* Forward pause packets to GMAC? */
16ad91e1 472 if (sky2->flow_mode & FC_RX)
2eaba1a2
SH
473 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
474 else
475 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
cd28ab6a
SH
476 }
477
2eaba1a2
SH
478 gma_write16(hw, port, GM_GP_CTRL, reg);
479
05745c4a 480 if (hw->flags & SKY2_HW_GIGABIT)
cd28ab6a
SH
481 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
482
483 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
484 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
485
486 /* Setup Phy LED's */
487 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
488 ledover = 0;
489
490 switch (hw->chip_id) {
491 case CHIP_ID_YUKON_FE:
492 /* on 88E3082 these bits are at 11..9 (shifted left) */
493 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
494
495 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
496
497 /* delete ACT LED control bits */
498 ctrl &= ~PHY_M_FELP_LED1_MSK;
499 /* change ACT LED control to blink mode */
500 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
501 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
502 break;
503
05745c4a
SH
504 case CHIP_ID_YUKON_FE_P:
505 /* Enable Link Partner Next Page */
506 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
507 ctrl |= PHY_M_PC_ENA_LIP_NP;
508
509 /* disable Energy Detect and enable scrambler */
510 ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
511 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
512
513 /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
514 ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
515 PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
516 PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
517
518 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
519 break;
520
cd28ab6a 521 case CHIP_ID_YUKON_XL:
793b883e 522 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
cd28ab6a
SH
523
524 /* select page 3 to access LED control register */
525 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
526
527 /* set LED Function Control register */
ed6d32c7
SH
528 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
529 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
530 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
531 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
532 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
cd28ab6a
SH
533
534 /* set Polarity Control register */
535 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
793b883e
SH
536 (PHY_M_POLC_LS1_P_MIX(4) |
537 PHY_M_POLC_IS0_P_MIX(4) |
538 PHY_M_POLC_LOS_CTRL(2) |
539 PHY_M_POLC_INIT_CTRL(2) |
540 PHY_M_POLC_STA1_CTRL(2) |
541 PHY_M_POLC_STA0_CTRL(2)));
cd28ab6a
SH
542
543 /* restore page register */
793b883e 544 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
cd28ab6a 545 break;
93745494 546
ed6d32c7 547 case CHIP_ID_YUKON_EC_U:
93745494 548 case CHIP_ID_YUKON_EX:
ed4d4161 549 case CHIP_ID_YUKON_SUPR:
ed6d32c7
SH
550 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
551
552 /* select page 3 to access LED control register */
553 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
554
555 /* set LED Function Control register */
556 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
557 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
558 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
559 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
560 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
561
562 /* set Blink Rate in LED Timer Control Register */
563 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
564 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
565 /* restore page register */
566 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
567 break;
cd28ab6a
SH
568
569 default:
570 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
571 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
a84d0a3d 572
cd28ab6a 573 /* turn off the Rx LED (LED_RX) */
a84d0a3d 574 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
cd28ab6a
SH
575 }
576
0ce8b98d 577 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
977bdf06 578 /* apply fixes in PHY AFE */
ed6d32c7
SH
579 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
580
977bdf06 581 /* increase differential signal amplitude in 10BASE-T */
ed6d32c7
SH
582 gm_phy_write(hw, port, 0x18, 0xaa99);
583 gm_phy_write(hw, port, 0x17, 0x2011);
cd28ab6a 584
0ce8b98d
SH
585 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
586 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
587 gm_phy_write(hw, port, 0x18, 0xa204);
588 gm_phy_write(hw, port, 0x17, 0x2002);
589 }
977bdf06
SH
590
591 /* set page register to 0 */
9467a8fc 592 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
05745c4a
SH
593 } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
594 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
595 /* apply workaround for integrated resistors calibration */
596 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
597 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
e1a74b37
SH
598 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
599 hw->chip_id < CHIP_ID_YUKON_SUPR) {
05745c4a 600 /* no effect on Yukon-XL */
977bdf06 601 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
cd28ab6a 602
0ea065e5
SH
603 if ( !(sky2->flags & SKY2_FLAG_AUTO_SPEED)
604 || sky2->speed == SPEED_100) {
977bdf06 605 /* turn on 100 Mbps LED (LED_LINK100) */
a84d0a3d 606 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
977bdf06 607 }
cd28ab6a 608
977bdf06
SH
609 if (ledover)
610 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
611
612 }
2eaba1a2 613
d571b694 614 /* Enable phy interrupt on auto-negotiation complete (or link up) */
0ea065e5 615 if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
cd28ab6a
SH
616 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
617 else
618 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
619}
620
b96936da
SH
621static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
622static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
623
624static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
d3bcfbeb
SH
625{
626 u32 reg1;
d3bcfbeb 627
82637e80 628 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
b32f40c4 629 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
b96936da 630 reg1 &= ~phy_power[port];
d3bcfbeb 631
b96936da 632 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
ff35164e
SH
633 reg1 |= coma_mode[port];
634
b32f40c4 635 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
82637e80
SH
636 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
637 sky2_pci_read32(hw, PCI_DEV_REG1);
f71eb1a2
SH
638
639 if (hw->chip_id == CHIP_ID_YUKON_FE)
640 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
641 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
642 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
b96936da 643}
167f53d0 644
b96936da
SH
645static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
646{
647 u32 reg1;
db99b988
SH
648 u16 ctrl;
649
650 /* release GPHY Control reset */
651 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
652
653 /* release GMAC reset */
654 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
655
656 if (hw->flags & SKY2_HW_NEWER_PHY) {
657 /* select page 2 to access MAC control register */
658 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
659
660 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
661 /* allow GMII Power Down */
662 ctrl &= ~PHY_M_MAC_GMIF_PUP;
663 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
664
665 /* set page register back to 0 */
666 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
667 }
668
669 /* setup General Purpose Control Register */
670 gma_write16(hw, port, GM_GP_CTRL,
0ea065e5
SH
671 GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
672 GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
673 GM_GPCR_AU_SPD_DIS);
db99b988
SH
674
675 if (hw->chip_id != CHIP_ID_YUKON_EC) {
676 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
e484d5f5
RW
677 /* select page 2 to access MAC control register */
678 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
db99b988 679
e484d5f5 680 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
db99b988
SH
681 /* enable Power Down */
682 ctrl |= PHY_M_PC_POW_D_ENA;
683 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
e484d5f5
RW
684
685 /* set page register back to 0 */
686 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
db99b988
SH
687 }
688
689 /* set IEEE compatible Power Down Mode (dev. #4.99) */
690 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
691 }
b96936da
SH
692
693 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
694 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
db99b988 695 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
b96936da
SH
696 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
697 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
d3bcfbeb
SH
698}
699
1b537565
SH
700/* Force a renegotiation */
701static void sky2_phy_reinit(struct sky2_port *sky2)
702{
e07b1aa8 703 spin_lock_bh(&sky2->phy_lock);
1b537565 704 sky2_phy_init(sky2->hw, sky2->port);
e07b1aa8 705 spin_unlock_bh(&sky2->phy_lock);
1b537565
SH
706}
707
e3173832
SH
708/* Put device in state to listen for Wake On Lan */
709static void sky2_wol_init(struct sky2_port *sky2)
710{
711 struct sky2_hw *hw = sky2->hw;
712 unsigned port = sky2->port;
713 enum flow_control save_mode;
714 u16 ctrl;
715 u32 reg1;
716
717 /* Bring hardware out of reset */
718 sky2_write16(hw, B0_CTST, CS_RST_CLR);
719 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
720
721 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
722 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
723
724 /* Force to 10/100
725 * sky2_reset will re-enable on resume
726 */
727 save_mode = sky2->flow_mode;
728 ctrl = sky2->advertising;
729
730 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
731 sky2->flow_mode = FC_NONE;
b96936da
SH
732
733 spin_lock_bh(&sky2->phy_lock);
734 sky2_phy_power_up(hw, port);
735 sky2_phy_init(hw, port);
736 spin_unlock_bh(&sky2->phy_lock);
e3173832
SH
737
738 sky2->flow_mode = save_mode;
739 sky2->advertising = ctrl;
740
741 /* Set GMAC to no flow control and auto update for speed/duplex */
742 gma_write16(hw, port, GM_GP_CTRL,
743 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
744 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
745
746 /* Set WOL address */
747 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
748 sky2->netdev->dev_addr, ETH_ALEN);
749
750 /* Turn on appropriate WOL control bits */
751 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
752 ctrl = 0;
753 if (sky2->wol & WAKE_PHY)
754 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
755 else
756 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
757
758 if (sky2->wol & WAKE_MAGIC)
759 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
760 else
761 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
762
763 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
764 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
765
766 /* Turn on legacy PCI-Express PME mode */
b32f40c4 767 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
e3173832 768 reg1 |= PCI_Y2_PME_LEGACY;
b32f40c4 769 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
e3173832
SH
770
771 /* block receiver */
772 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
773
774}
775
69161611
SH
776static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
777{
05745c4a
SH
778 struct net_device *dev = hw->dev[port];
779
ed4d4161
SH
780 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
781 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
782 hw->chip_id == CHIP_ID_YUKON_FE_P ||
783 hw->chip_id == CHIP_ID_YUKON_SUPR) {
784 /* Yukon-Extreme B0 and further Extreme devices */
785 /* enable Store & Forward mode for TX */
05745c4a 786
ed4d4161
SH
787 if (dev->mtu <= ETH_DATA_LEN)
788 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
789 TX_JUMBO_DIS | TX_STFW_ENA);
69161611 790
ed4d4161
SH
791 else
792 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
793 TX_JUMBO_ENA| TX_STFW_ENA);
794 } else {
795 if (dev->mtu <= ETH_DATA_LEN)
796 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
797 else {
798 /* set Tx GMAC FIFO Almost Empty Threshold */
799 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
800 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
69161611 801
ed4d4161
SH
802 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
803
804 /* Can't do offload because of lack of store/forward */
805 dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
806 }
69161611
SH
807 }
808}
809
cd28ab6a
SH
810static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
811{
812 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
813 u16 reg;
25cccecc 814 u32 rx_reg;
cd28ab6a
SH
815 int i;
816 const u8 *addr = hw->dev[port]->dev_addr;
817
f350339c
SH
818 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
819 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
cd28ab6a
SH
820
821 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
822
793b883e 823 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
cd28ab6a
SH
824 /* WA DEV_472 -- looks like crossed wires on port 2 */
825 /* clear GMAC 1 Control reset */
826 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
827 do {
828 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
829 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
830 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
831 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
832 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
833 }
834
793b883e 835 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
cd28ab6a 836
2eaba1a2
SH
837 /* Enable Transmit FIFO Underrun */
838 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
839
e07b1aa8 840 spin_lock_bh(&sky2->phy_lock);
b96936da 841 sky2_phy_power_up(hw, port);
cd28ab6a 842 sky2_phy_init(hw, port);
e07b1aa8 843 spin_unlock_bh(&sky2->phy_lock);
cd28ab6a
SH
844
845 /* MIB clear */
846 reg = gma_read16(hw, port, GM_PHY_ADDR);
847 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
848
43f2f104
SH
849 for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
850 gma_read16(hw, port, i);
cd28ab6a
SH
851 gma_write16(hw, port, GM_PHY_ADDR, reg);
852
853 /* transmit control */
854 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
855
856 /* receive control reg: unicast + multicast + no FCS */
857 gma_write16(hw, port, GM_RX_CTRL,
793b883e 858 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
cd28ab6a
SH
859
860 /* transmit flow control */
861 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
862
863 /* transmit parameter */
864 gma_write16(hw, port, GM_TX_PARAM,
865 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
866 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
867 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
868 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
869
870 /* serial mode register */
871 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
6b1a3aef 872 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
cd28ab6a 873
6b1a3aef 874 if (hw->dev[port]->mtu > ETH_DATA_LEN)
cd28ab6a
SH
875 reg |= GM_SMOD_JUMBO_ENA;
876
877 gma_write16(hw, port, GM_SERIAL_MODE, reg);
878
cd28ab6a
SH
879 /* virtual address for data */
880 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
881
793b883e
SH
882 /* physical address: used for pause frames */
883 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
884
885 /* ignore counter overflows */
cd28ab6a
SH
886 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
887 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
888 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
889
890 /* Configure Rx MAC FIFO */
891 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
25cccecc 892 rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
05745c4a
SH
893 if (hw->chip_id == CHIP_ID_YUKON_EX ||
894 hw->chip_id == CHIP_ID_YUKON_FE_P)
25cccecc 895 rx_reg |= GMF_RX_OVER_ON;
69161611 896
25cccecc 897 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
cd28ab6a 898
798fdd07
SH
899 if (hw->chip_id == CHIP_ID_YUKON_XL) {
900 /* Hardware errata - clear flush mask */
901 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
902 } else {
903 /* Flush Rx MAC FIFO on any flow control or error */
904 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
905 }
cd28ab6a 906
8df9a876 907 /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
05745c4a
SH
908 reg = RX_GMF_FL_THR_DEF + 1;
909 /* Another magic mystery workaround from sk98lin */
910 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
911 hw->chip_rev == CHIP_REV_YU_FE2_A0)
912 reg = 0x178;
913 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
cd28ab6a
SH
914
915 /* Configure Tx MAC FIFO */
916 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
917 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
5a5b1ea0 918
e0c28116 919 /* On chips without ram buffer, pause is controled by MAC level */
39dbd958 920 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
8df9a876 921 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
5a5b1ea0 922 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
b628ed98 923
69161611 924 sky2_set_tx_stfwd(hw, port);
5a5b1ea0
SH
925 }
926
e970d1f8
SH
927 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
928 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
929 /* disable dynamic watermark */
930 reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
931 reg &= ~TX_DYN_WM_ENA;
932 sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
933 }
cd28ab6a
SH
934}
935
67712901
SH
936/* Assign Ram Buffer allocation to queue */
937static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
cd28ab6a 938{
67712901
SH
939 u32 end;
940
941 /* convert from K bytes to qwords used for hw register */
942 start *= 1024/8;
943 space *= 1024/8;
944 end = start + space - 1;
793b883e 945
cd28ab6a
SH
946 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
947 sky2_write32(hw, RB_ADDR(q, RB_START), start);
948 sky2_write32(hw, RB_ADDR(q, RB_END), end);
949 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
950 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
951
952 if (q == Q_R1 || q == Q_R2) {
1c28f6ba 953 u32 tp = space - space/4;
793b883e 954
1c28f6ba
SH
955 /* On receive queue's set the thresholds
956 * give receiver priority when > 3/4 full
957 * send pause when down to 2K
958 */
959 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
960 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
793b883e 961
1c28f6ba
SH
962 tp = space - 2048/8;
963 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
964 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
cd28ab6a
SH
965 } else {
966 /* Enable store & forward on Tx queue's because
967 * Tx FIFO is only 1K on Yukon
968 */
969 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
970 }
971
972 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
793b883e 973 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
cd28ab6a
SH
974}
975
cd28ab6a 976/* Setup Bus Memory Interface */
af4ed7e6 977static void sky2_qset(struct sky2_hw *hw, u16 q)
cd28ab6a
SH
978{
979 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
980 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
981 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
af4ed7e6 982 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
cd28ab6a
SH
983}
984
cd28ab6a
SH
985/* Setup prefetch unit registers. This is the interface between
986 * hardware and driver list elements
987 */
8cc048e3 988static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
d6e74b6b 989 dma_addr_t addr, u32 last)
cd28ab6a 990{
cd28ab6a
SH
991 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
992 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
d6e74b6b
SH
993 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
994 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
cd28ab6a
SH
995 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
996 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
793b883e
SH
997
998 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
cd28ab6a
SH
999}
1000
9b289c33 1001static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
793b883e 1002{
9b289c33 1003 struct sky2_tx_le *le = sky2->tx_le + *slot;
793b883e 1004
ee5f68fe 1005 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
291ea614 1006 le->ctrl = 0;
793b883e
SH
1007 return le;
1008}
cd28ab6a 1009
88f5f0ca
SH
1010static void tx_init(struct sky2_port *sky2)
1011{
1012 struct sky2_tx_le *le;
1013
1014 sky2->tx_prod = sky2->tx_cons = 0;
1015 sky2->tx_tcpsum = 0;
1016 sky2->tx_last_mss = 0;
1017
9b289c33 1018 le = get_tx_le(sky2, &sky2->tx_prod);
88f5f0ca
SH
1019 le->addr = 0;
1020 le->opcode = OP_ADDR64 | HW_OWNER;
5dce95e5 1021 sky2->tx_last_upper = 0;
88f5f0ca
SH
1022}
1023
291ea614
SH
1024static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
1025 struct sky2_tx_le *le)
1026{
1027 return sky2->tx_ring + (le - sky2->tx_le);
1028}
1029
290d4de5
SH
1030/* Update chip's next pointer */
1031static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
cd28ab6a 1032{
50432cb5 1033 /* Make sure write' to descriptors are complete before we tell hardware */
762c2de2 1034 wmb();
50432cb5
SH
1035 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
1036
1037 /* Synchronize I/O on since next processor may write to tail */
1038 mmiowb();
cd28ab6a
SH
1039}
1040
793b883e 1041
cd28ab6a
SH
1042static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1043{
1044 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
cb5d9547 1045 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
291ea614 1046 le->ctrl = 0;
cd28ab6a
SH
1047 return le;
1048}
1049
14d0263f
SH
1050/* Build description to hardware for one receive segment */
1051static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1052 dma_addr_t map, unsigned len)
cd28ab6a
SH
1053{
1054 struct sky2_rx_le *le;
1055
86c6887e 1056 if (sizeof(dma_addr_t) > sizeof(u32)) {
cd28ab6a 1057 le = sky2_next_rx(sky2);
86c6887e 1058 le->addr = cpu_to_le32(upper_32_bits(map));
cd28ab6a
SH
1059 le->opcode = OP_ADDR64 | HW_OWNER;
1060 }
793b883e 1061
cd28ab6a 1062 le = sky2_next_rx(sky2);
d6e74b6b 1063 le->addr = cpu_to_le32(lower_32_bits(map));
734d1868 1064 le->length = cpu_to_le16(len);
14d0263f 1065 le->opcode = op | HW_OWNER;
cd28ab6a
SH
1066}
1067
14d0263f
SH
1068/* Build description to hardware for one possibly fragmented skb */
1069static void sky2_rx_submit(struct sky2_port *sky2,
1070 const struct rx_ring_info *re)
1071{
1072 int i;
1073
1074 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1075
1076 for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1077 sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1078}
1079
1080
454e6cb6 1081static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
14d0263f
SH
1082 unsigned size)
1083{
1084 struct sk_buff *skb = re->skb;
1085 int i;
1086
1087 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
454e6cb6
SH
1088 if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
1089 return -EIO;
1090
14d0263f
SH
1091 pci_unmap_len_set(re, data_size, size);
1092
1093 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1094 re->frag_addr[i] = pci_map_page(pdev,
1095 skb_shinfo(skb)->frags[i].page,
1096 skb_shinfo(skb)->frags[i].page_offset,
1097 skb_shinfo(skb)->frags[i].size,
1098 PCI_DMA_FROMDEVICE);
454e6cb6 1099 return 0;
14d0263f
SH
1100}
1101
1102static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1103{
1104 struct sk_buff *skb = re->skb;
1105 int i;
1106
1107 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
1108 PCI_DMA_FROMDEVICE);
1109
1110 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1111 pci_unmap_page(pdev, re->frag_addr[i],
1112 skb_shinfo(skb)->frags[i].size,
1113 PCI_DMA_FROMDEVICE);
1114}
793b883e 1115
cd28ab6a
SH
1116/* Tell chip where to start receive checksum.
1117 * Actually has two checksums, but set both same to avoid possible byte
1118 * order problems.
1119 */
793b883e 1120static void rx_set_checksum(struct sky2_port *sky2)
cd28ab6a 1121{
ea76e635 1122 struct sky2_rx_le *le = sky2_next_rx(sky2);
793b883e 1123
ea76e635
SH
1124 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1125 le->ctrl = 0;
1126 le->opcode = OP_TCPSTART | HW_OWNER;
cd28ab6a 1127
ea76e635
SH
1128 sky2_write32(sky2->hw,
1129 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
0ea065e5
SH
1130 (sky2->flags & SKY2_FLAG_RX_CHECKSUM)
1131 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
cd28ab6a
SH
1132}
1133
6b1a3aef
SH
1134/*
1135 * The RX Stop command will not work for Yukon-2 if the BMU does not
1136 * reach the end of packet and since we can't make sure that we have
1137 * incoming data, we must reset the BMU while it is not doing a DMA
1138 * transfer. Since it is possible that the RX path is still active,
1139 * the RX RAM buffer will be stopped first, so any possible incoming
1140 * data will not trigger a DMA. After the RAM buffer is stopped, the
1141 * BMU is polled until any DMA in progress is ended and only then it
1142 * will be reset.
1143 */
1144static void sky2_rx_stop(struct sky2_port *sky2)
1145{
1146 struct sky2_hw *hw = sky2->hw;
1147 unsigned rxq = rxqaddr[sky2->port];
1148 int i;
1149
1150 /* disable the RAM Buffer receive queue */
1151 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1152
1153 for (i = 0; i < 0xffff; i++)
1154 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1155 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1156 goto stopped;
1157
1158 printk(KERN_WARNING PFX "%s: receiver stop failed\n",
1159 sky2->netdev->name);
1160stopped:
1161 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1162
1163 /* reset the Rx prefetch unit */
1164 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
3d1454dd 1165 mmiowb();
6b1a3aef 1166}
793b883e 1167
d571b694 1168/* Clean out receive buffer area, assumes receiver hardware stopped */
cd28ab6a
SH
1169static void sky2_rx_clean(struct sky2_port *sky2)
1170{
1171 unsigned i;
1172
1173 memset(sky2->rx_le, 0, RX_LE_BYTES);
793b883e 1174 for (i = 0; i < sky2->rx_pending; i++) {
291ea614 1175 struct rx_ring_info *re = sky2->rx_ring + i;
cd28ab6a
SH
1176
1177 if (re->skb) {
14d0263f 1178 sky2_rx_unmap_skb(sky2->hw->pdev, re);
cd28ab6a
SH
1179 kfree_skb(re->skb);
1180 re->skb = NULL;
1181 }
1182 }
bd1c6869 1183 skb_queue_purge(&sky2->rx_recycle);
cd28ab6a
SH
1184}
1185
ef743d33
SH
1186/* Basic MII support */
1187static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1188{
1189 struct mii_ioctl_data *data = if_mii(ifr);
1190 struct sky2_port *sky2 = netdev_priv(dev);
1191 struct sky2_hw *hw = sky2->hw;
1192 int err = -EOPNOTSUPP;
1193
1194 if (!netif_running(dev))
1195 return -ENODEV; /* Phy still in reset */
1196
d89e1343 1197 switch (cmd) {
ef743d33
SH
1198 case SIOCGMIIPHY:
1199 data->phy_id = PHY_ADDR_MARV;
1200
1201 /* fallthru */
1202 case SIOCGMIIREG: {
1203 u16 val = 0;
91c86df5 1204
e07b1aa8 1205 spin_lock_bh(&sky2->phy_lock);
ef743d33 1206 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
e07b1aa8 1207 spin_unlock_bh(&sky2->phy_lock);
91c86df5 1208
ef743d33
SH
1209 data->val_out = val;
1210 break;
1211 }
1212
1213 case SIOCSMIIREG:
1214 if (!capable(CAP_NET_ADMIN))
1215 return -EPERM;
1216
e07b1aa8 1217 spin_lock_bh(&sky2->phy_lock);
ef743d33
SH
1218 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1219 data->val_in);
e07b1aa8 1220 spin_unlock_bh(&sky2->phy_lock);
ef743d33
SH
1221 break;
1222 }
1223 return err;
1224}
1225
d1f13708 1226#ifdef SKY2_VLAN_TAG_USED
d494eacd 1227static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
d1f13708 1228{
d494eacd 1229 if (onoff) {
3d4e66f5
SH
1230 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1231 RX_VLAN_STRIP_ON);
1232 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1233 TX_VLAN_TAG_ON);
1234 } else {
1235 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1236 RX_VLAN_STRIP_OFF);
1237 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1238 TX_VLAN_TAG_OFF);
1239 }
d494eacd
SH
1240}
1241
1242static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1243{
1244 struct sky2_port *sky2 = netdev_priv(dev);
1245 struct sky2_hw *hw = sky2->hw;
1246 u16 port = sky2->port;
1247
1248 netif_tx_lock_bh(dev);
1249 napi_disable(&hw->napi);
1250
1251 sky2->vlgrp = grp;
1252 sky2_set_vlan_mode(hw, port, grp != NULL);
d1f13708 1253
d1d08d12 1254 sky2_read32(hw, B0_Y2_SP_LISR);
bea3348e 1255 napi_enable(&hw->napi);
2bb8c262 1256 netif_tx_unlock_bh(dev);
d1f13708
SH
1257}
1258#endif
1259
bd1c6869
SH
1260/* Amount of required worst case padding in rx buffer */
1261static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1262{
1263 return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
1264}
1265
82788c7a 1266/*
14d0263f
SH
1267 * Allocate an skb for receiving. If the MTU is large enough
1268 * make the skb non-linear with a fragment list of pages.
82788c7a 1269 */
14d0263f 1270static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
82788c7a
SH
1271{
1272 struct sk_buff *skb;
14d0263f 1273 int i;
82788c7a 1274
bd1c6869
SH
1275 skb = __skb_dequeue(&sky2->rx_recycle);
1276 if (!skb)
1277 skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size
1278 + sky2_rx_pad(sky2->hw));
1279 if (!skb)
1280 goto nomem;
1281
39dbd958 1282 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
f03b8654
SH
1283 unsigned char *start;
1284 /*
1285 * Workaround for a bug in FIFO that cause hang
1286 * if the FIFO if the receive buffer is not 64 byte aligned.
1287 * The buffer returned from netdev_alloc_skb is
1288 * aligned except if slab debugging is enabled.
1289 */
f03b8654
SH
1290 start = PTR_ALIGN(skb->data, 8);
1291 skb_reserve(skb, start - skb->data);
bd1c6869 1292 } else
f03b8654 1293 skb_reserve(skb, NET_IP_ALIGN);
14d0263f
SH
1294
1295 for (i = 0; i < sky2->rx_nfrags; i++) {
1296 struct page *page = alloc_page(GFP_ATOMIC);
1297
1298 if (!page)
1299 goto free_partial;
1300 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
82788c7a
SH
1301 }
1302
1303 return skb;
14d0263f
SH
1304free_partial:
1305 kfree_skb(skb);
1306nomem:
1307 return NULL;
82788c7a
SH
1308}
1309
55c9dd35
SH
1310static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1311{
1312 sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1313}
1314
cd28ab6a
SH
1315/*
1316 * Allocate and setup receiver buffer pool.
14d0263f
SH
1317 * Normal case this ends up creating one list element for skb
1318 * in the receive ring. Worst case if using large MTU and each
1319 * allocation falls on a different 64 bit region, that results
1320 * in 6 list elements per ring entry.
1321 * One element is used for checksum enable/disable, and one
1322 * extra to avoid wrap.
cd28ab6a 1323 */
6b1a3aef 1324static int sky2_rx_start(struct sky2_port *sky2)
cd28ab6a 1325{
6b1a3aef 1326 struct sky2_hw *hw = sky2->hw;
14d0263f 1327 struct rx_ring_info *re;
6b1a3aef 1328 unsigned rxq = rxqaddr[sky2->port];
5f06eba4 1329 unsigned i, size, thresh;
cd28ab6a 1330
6b1a3aef 1331 sky2->rx_put = sky2->rx_next = 0;
af4ed7e6 1332 sky2_qset(hw, rxq);
977bdf06 1333
c3905bc4
SH
1334 /* On PCI express lowering the watermark gives better performance */
1335 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
1336 sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1337
1338 /* These chips have no ram buffer?
1339 * MAC Rx RAM Read is controlled by hardware */
8df9a876 1340 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
c3905bc4
SH
1341 (hw->chip_rev == CHIP_REV_YU_EC_U_A1
1342 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
f449c7c1 1343 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
977bdf06 1344
6b1a3aef
SH
1345 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1346
ea76e635
SH
1347 if (!(hw->flags & SKY2_HW_NEW_LE))
1348 rx_set_checksum(sky2);
14d0263f
SH
1349
1350 /* Space needed for frame data + headers rounded up */
f957da2a 1351 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
14d0263f
SH
1352
1353 /* Stopping point for hardware truncation */
1354 thresh = (size - 8) / sizeof(u32);
1355
5f06eba4 1356 sky2->rx_nfrags = size >> PAGE_SHIFT;
14d0263f
SH
1357 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1358
5f06eba4
SH
1359 /* Compute residue after pages */
1360 size -= sky2->rx_nfrags << PAGE_SHIFT;
14d0263f 1361
5f06eba4
SH
1362 /* Optimize to handle small packets and headers */
1363 if (size < copybreak)
1364 size = copybreak;
1365 if (size < ETH_HLEN)
1366 size = ETH_HLEN;
14d0263f 1367
14d0263f
SH
1368 sky2->rx_data_size = size;
1369
bd1c6869
SH
1370 skb_queue_head_init(&sky2->rx_recycle);
1371
14d0263f 1372 /* Fill Rx ring */
793b883e 1373 for (i = 0; i < sky2->rx_pending; i++) {
14d0263f 1374 re = sky2->rx_ring + i;
cd28ab6a 1375
14d0263f 1376 re->skb = sky2_rx_alloc(sky2);
cd28ab6a
SH
1377 if (!re->skb)
1378 goto nomem;
1379
454e6cb6
SH
1380 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1381 dev_kfree_skb(re->skb);
1382 re->skb = NULL;
1383 goto nomem;
1384 }
1385
14d0263f 1386 sky2_rx_submit(sky2, re);
cd28ab6a
SH
1387 }
1388
a1433ac4
SH
1389 /*
1390 * The receiver hangs if it receives frames larger than the
1391 * packet buffer. As a workaround, truncate oversize frames, but
1392 * the register is limited to 9 bits, so if you do frames > 2052
1393 * you better get the MTU right!
1394 */
a1433ac4
SH
1395 if (thresh > 0x1ff)
1396 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1397 else {
1398 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1399 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1400 }
1401
6b1a3aef 1402 /* Tell chip about available buffers */
55c9dd35 1403 sky2_rx_update(sky2, rxq);
cd28ab6a
SH
1404 return 0;
1405nomem:
1406 sky2_rx_clean(sky2);
1407 return -ENOMEM;
1408}
1409
1410/* Bring up network interface. */
1411static int sky2_up(struct net_device *dev)
1412{
1413 struct sky2_port *sky2 = netdev_priv(dev);
1414 struct sky2_hw *hw = sky2->hw;
1415 unsigned port = sky2->port;
e0c28116 1416 u32 imask, ramsize;
ee7abb04 1417 int cap, err = -ENOMEM;
843a46f4 1418 struct net_device *otherdev = hw->dev[sky2->port^1];
cd28ab6a 1419
ee7abb04
SH
1420 /*
1421 * On dual port PCI-X card, there is an problem where status
1422 * can be received out of order due to split transactions
843a46f4 1423 */
ee7abb04
SH
1424 if (otherdev && netif_running(otherdev) &&
1425 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
ee7abb04
SH
1426 u16 cmd;
1427
b32f40c4 1428 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
ee7abb04 1429 cmd &= ~PCI_X_CMD_MAX_SPLIT;
b32f40c4
SH
1430 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1431
ee7abb04 1432 }
843a46f4 1433
55d7b4e6
SH
1434 netif_carrier_off(dev);
1435
cd28ab6a
SH
1436 /* must be power of 2 */
1437 sky2->tx_le = pci_alloc_consistent(hw->pdev,
ee5f68fe 1438 sky2->tx_ring_size *
793b883e 1439 sizeof(struct sky2_tx_le),
cd28ab6a
SH
1440 &sky2->tx_le_map);
1441 if (!sky2->tx_le)
1442 goto err_out;
1443
ee5f68fe 1444 sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
cd28ab6a
SH
1445 GFP_KERNEL);
1446 if (!sky2->tx_ring)
1447 goto err_out;
88f5f0ca
SH
1448
1449 tx_init(sky2);
cd28ab6a
SH
1450
1451 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1452 &sky2->rx_le_map);
1453 if (!sky2->rx_le)
1454 goto err_out;
1455 memset(sky2->rx_le, 0, RX_LE_BYTES);
1456
291ea614 1457 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
cd28ab6a
SH
1458 GFP_KERNEL);
1459 if (!sky2->rx_ring)
1460 goto err_out;
1461
1462 sky2_mac_init(hw, port);
1463
e0c28116
SH
1464 /* Register is number of 4K blocks on internal RAM buffer. */
1465 ramsize = sky2_read8(hw, B2_E_0) * 4;
1466 if (ramsize > 0) {
67712901 1467 u32 rxspace;
cd28ab6a 1468
39dbd958 1469 hw->flags |= SKY2_HW_RAM_BUFFER;
e0c28116 1470 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
67712901
SH
1471 if (ramsize < 16)
1472 rxspace = ramsize / 2;
1473 else
1474 rxspace = 8 + (2*(ramsize - 16))/3;
cd28ab6a 1475
67712901
SH
1476 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1477 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1478
1479 /* Make sure SyncQ is disabled */
1480 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1481 RB_RST_SET);
1482 }
793b883e 1483
af4ed7e6 1484 sky2_qset(hw, txqaddr[port]);
5a5b1ea0 1485
69161611
SH
1486 /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1487 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1488 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1489
977bdf06 1490 /* Set almost empty threshold */
c2716fb4
SH
1491 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1492 && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
b628ed98 1493 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
5a5b1ea0 1494
6b1a3aef 1495 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
ee5f68fe 1496 sky2->tx_ring_size - 1);
cd28ab6a 1497
d494eacd
SH
1498#ifdef SKY2_VLAN_TAG_USED
1499 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1500#endif
1501
6b1a3aef 1502 err = sky2_rx_start(sky2);
6de16237 1503 if (err)
cd28ab6a
SH
1504 goto err_out;
1505
cd28ab6a 1506 /* Enable interrupts from phy/mac for port */
e07b1aa8 1507 imask = sky2_read32(hw, B0_IMSK);
f4ea431b 1508 imask |= portirq_msk[port];
e07b1aa8 1509 sky2_write32(hw, B0_IMSK, imask);
1fd82f3c 1510 sky2_read32(hw, B0_IMSK);
e07b1aa8 1511
a11da890
AD
1512 if (netif_msg_ifup(sky2))
1513 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
af18d8b8 1514
cd28ab6a
SH
1515 return 0;
1516
1517err_out:
1b537565 1518 if (sky2->rx_le) {
cd28ab6a
SH
1519 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1520 sky2->rx_le, sky2->rx_le_map);
1b537565
SH
1521 sky2->rx_le = NULL;
1522 }
1523 if (sky2->tx_le) {
cd28ab6a 1524 pci_free_consistent(hw->pdev,
ee5f68fe 1525 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
cd28ab6a 1526 sky2->tx_le, sky2->tx_le_map);
1b537565
SH
1527 sky2->tx_le = NULL;
1528 }
1529 kfree(sky2->tx_ring);
1530 kfree(sky2->rx_ring);
cd28ab6a 1531
1b537565
SH
1532 sky2->tx_ring = NULL;
1533 sky2->rx_ring = NULL;
cd28ab6a
SH
1534 return err;
1535}
1536
793b883e 1537/* Modular subtraction in ring */
ee5f68fe 1538static inline int tx_inuse(const struct sky2_port *sky2)
793b883e 1539{
ee5f68fe 1540 return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
793b883e 1541}
cd28ab6a 1542
793b883e
SH
1543/* Number of list elements available for next tx */
1544static inline int tx_avail(const struct sky2_port *sky2)
cd28ab6a 1545{
ee5f68fe 1546 return sky2->tx_pending - tx_inuse(sky2);
cd28ab6a
SH
1547}
1548
793b883e 1549/* Estimate of number of transmit list elements required */
28bd181a 1550static unsigned tx_le_req(const struct sk_buff *skb)
cd28ab6a 1551{
793b883e
SH
1552 unsigned count;
1553
1554 count = sizeof(dma_addr_t) / sizeof(u32);
1555 count += skb_shinfo(skb)->nr_frags * count;
1556
89114afd 1557 if (skb_is_gso(skb))
793b883e
SH
1558 ++count;
1559
84fa7933 1560 if (skb->ip_summed == CHECKSUM_PARTIAL)
793b883e
SH
1561 ++count;
1562
1563 return count;
cd28ab6a
SH
1564}
1565
793b883e
SH
1566/*
1567 * Put one packet in ring for transmit.
1568 * A single packet can generate multiple list elements, and
1569 * the number of ring elements will probably be less than the number
1570 * of list elements used.
1571 */
cd28ab6a
SH
1572static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1573{
1574 struct sky2_port *sky2 = netdev_priv(dev);
1575 struct sky2_hw *hw = sky2->hw;
d1f13708 1576 struct sky2_tx_le *le = NULL;
6cdbbdf3 1577 struct tx_ring_info *re;
9b289c33 1578 unsigned i, len;
cd28ab6a 1579 dma_addr_t mapping;
5dce95e5
SH
1580 u32 upper;
1581 u16 slot;
cd28ab6a
SH
1582 u16 mss;
1583 u8 ctrl;
1584
2bb8c262
SH
1585 if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1586 return NETDEV_TX_BUSY;
cd28ab6a 1587
cd28ab6a
SH
1588 len = skb_headlen(skb);
1589 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
793b883e 1590
454e6cb6
SH
1591 if (pci_dma_mapping_error(hw->pdev, mapping))
1592 goto mapping_error;
1593
9b289c33 1594 slot = sky2->tx_prod;
454e6cb6
SH
1595 if (unlikely(netif_msg_tx_queued(sky2)))
1596 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
9b289c33 1597 dev->name, slot, skb->len);
454e6cb6 1598
86c6887e 1599 /* Send high bits if needed */
5dce95e5
SH
1600 upper = upper_32_bits(mapping);
1601 if (upper != sky2->tx_last_upper) {
9b289c33 1602 le = get_tx_le(sky2, &slot);
5dce95e5
SH
1603 le->addr = cpu_to_le32(upper);
1604 sky2->tx_last_upper = upper;
793b883e 1605 le->opcode = OP_ADDR64 | HW_OWNER;
793b883e 1606 }
cd28ab6a
SH
1607
1608 /* Check for TCP Segmentation Offload */
7967168c 1609 mss = skb_shinfo(skb)->gso_size;
793b883e 1610 if (mss != 0) {
ea76e635
SH
1611
1612 if (!(hw->flags & SKY2_HW_NEW_LE))
69161611
SH
1613 mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1614
1615 if (mss != sky2->tx_last_mss) {
9b289c33 1616 le = get_tx_le(sky2, &slot);
69161611 1617 le->addr = cpu_to_le32(mss);
ea76e635
SH
1618
1619 if (hw->flags & SKY2_HW_NEW_LE)
69161611
SH
1620 le->opcode = OP_MSS | HW_OWNER;
1621 else
1622 le->opcode = OP_LRGLEN | HW_OWNER;
e07560cd
SH
1623 sky2->tx_last_mss = mss;
1624 }
cd28ab6a
SH
1625 }
1626
cd28ab6a 1627 ctrl = 0;
d1f13708
SH
1628#ifdef SKY2_VLAN_TAG_USED
1629 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1630 if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1631 if (!le) {
9b289c33 1632 le = get_tx_le(sky2, &slot);
f65b138c 1633 le->addr = 0;
d1f13708 1634 le->opcode = OP_VLAN|HW_OWNER;
d1f13708
SH
1635 } else
1636 le->opcode |= OP_VLAN;
1637 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1638 ctrl |= INS_VLAN;
1639 }
1640#endif
1641
1642 /* Handle TCP checksum offload */
84fa7933 1643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
69161611 1644 /* On Yukon EX (some versions) encoding change. */
ea76e635 1645 if (hw->flags & SKY2_HW_AUTO_TX_SUM)
69161611
SH
1646 ctrl |= CALSUM; /* auto checksum */
1647 else {
1648 const unsigned offset = skb_transport_offset(skb);
1649 u32 tcpsum;
1650
1651 tcpsum = offset << 16; /* sum start */
1652 tcpsum |= offset + skb->csum_offset; /* sum write */
1653
1654 ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1655 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1656 ctrl |= UDPTCP;
1657
1658 if (tcpsum != sky2->tx_tcpsum) {
1659 sky2->tx_tcpsum = tcpsum;
1660
9b289c33 1661 le = get_tx_le(sky2, &slot);
69161611
SH
1662 le->addr = cpu_to_le32(tcpsum);
1663 le->length = 0; /* initial checksum value */
1664 le->ctrl = 1; /* one packet */
1665 le->opcode = OP_TCPLISW | HW_OWNER;
1666 }
1d179332 1667 }
cd28ab6a
SH
1668 }
1669
9b289c33 1670 le = get_tx_le(sky2, &slot);
d6e74b6b 1671 le->addr = cpu_to_le32(lower_32_bits(mapping));
cd28ab6a
SH
1672 le->length = cpu_to_le16(len);
1673 le->ctrl = ctrl;
793b883e 1674 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
cd28ab6a 1675
291ea614 1676 re = tx_le_re(sky2, le);
cd28ab6a 1677 re->skb = skb;
6cdbbdf3 1678 pci_unmap_addr_set(re, mapaddr, mapping);
291ea614 1679 pci_unmap_len_set(re, maplen, len);
cd28ab6a
SH
1680
1681 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
291ea614 1682 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
cd28ab6a
SH
1683
1684 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1685 frag->size, PCI_DMA_TODEVICE);
86c6887e 1686
454e6cb6
SH
1687 if (pci_dma_mapping_error(hw->pdev, mapping))
1688 goto mapping_unwind;
1689
5dce95e5
SH
1690 upper = upper_32_bits(mapping);
1691 if (upper != sky2->tx_last_upper) {
9b289c33 1692 le = get_tx_le(sky2, &slot);
5dce95e5
SH
1693 le->addr = cpu_to_le32(upper);
1694 sky2->tx_last_upper = upper;
793b883e 1695 le->opcode = OP_ADDR64 | HW_OWNER;
cd28ab6a
SH
1696 }
1697
9b289c33 1698 le = get_tx_le(sky2, &slot);
d6e74b6b 1699 le->addr = cpu_to_le32(lower_32_bits(mapping));
cd28ab6a
SH
1700 le->length = cpu_to_le16(frag->size);
1701 le->ctrl = ctrl;
793b883e 1702 le->opcode = OP_BUFFER | HW_OWNER;
cd28ab6a 1703
291ea614
SH
1704 re = tx_le_re(sky2, le);
1705 re->skb = skb;
1706 pci_unmap_addr_set(re, mapaddr, mapping);
1707 pci_unmap_len_set(re, maplen, frag->size);
cd28ab6a 1708 }
6cdbbdf3 1709
cd28ab6a
SH
1710 le->ctrl |= EOP;
1711
9b289c33
MM
1712 sky2->tx_prod = slot;
1713
97bda706
SH
1714 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1715 netif_stop_queue(dev);
b19666d9 1716
290d4de5 1717 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
cd28ab6a 1718
cd28ab6a 1719 return NETDEV_TX_OK;
454e6cb6
SH
1720
1721mapping_unwind:
ee5f68fe 1722 for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
454e6cb6
SH
1723 le = sky2->tx_le + i;
1724 re = sky2->tx_ring + i;
1725
1726 switch(le->opcode & ~HW_OWNER) {
1727 case OP_LARGESEND:
1728 case OP_PACKET:
1729 pci_unmap_single(hw->pdev,
1730 pci_unmap_addr(re, mapaddr),
1731 pci_unmap_len(re, maplen),
1732 PCI_DMA_TODEVICE);
1733 break;
1734 case OP_BUFFER:
1735 pci_unmap_page(hw->pdev, pci_unmap_addr(re, mapaddr),
1736 pci_unmap_len(re, maplen),
1737 PCI_DMA_TODEVICE);
1738 break;
1739 }
1740 }
1741
454e6cb6
SH
1742mapping_error:
1743 if (net_ratelimit())
1744 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
1745 dev_kfree_skb(skb);
1746 return NETDEV_TX_OK;
cd28ab6a
SH
1747}
1748
cd28ab6a 1749/*
793b883e
SH
1750 * Free ring elements from starting at tx_cons until "done"
1751 *
481cea4a
SH
1752 * NB:
1753 * 1. The hardware will tell us about partial completion of multi-part
291ea614 1754 * buffers so make sure not to free skb to early.
481cea4a
SH
1755 * 2. This may run in parallel start_xmit because the it only
1756 * looks at the tail of the queue of FIFO (tx_cons), not
1757 * the head (tx_prod)
cd28ab6a 1758 */
d11c13e7 1759static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
cd28ab6a 1760{
d11c13e7 1761 struct net_device *dev = sky2->netdev;
af2a58ac 1762 struct pci_dev *pdev = sky2->hw->pdev;
291ea614 1763 unsigned idx;
cd28ab6a 1764
ee5f68fe 1765 BUG_ON(done >= sky2->tx_ring_size);
2224795d 1766
291ea614 1767 for (idx = sky2->tx_cons; idx != done;
ee5f68fe 1768 idx = RING_NEXT(idx, sky2->tx_ring_size)) {
291ea614
SH
1769 struct sky2_tx_le *le = sky2->tx_le + idx;
1770 struct tx_ring_info *re = sky2->tx_ring + idx;
1771
1772 switch(le->opcode & ~HW_OWNER) {
1773 case OP_LARGESEND:
1774 case OP_PACKET:
1775 pci_unmap_single(pdev,
1776 pci_unmap_addr(re, mapaddr),
1777 pci_unmap_len(re, maplen),
1778 PCI_DMA_TODEVICE);
af2a58ac 1779 break;
291ea614
SH
1780 case OP_BUFFER:
1781 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1782 pci_unmap_len(re, maplen),
734d1868 1783 PCI_DMA_TODEVICE);
291ea614
SH
1784 break;
1785 }
1786
1787 if (le->ctrl & EOP) {
bd1c6869
SH
1788 struct sk_buff *skb = re->skb;
1789
291ea614
SH
1790 if (unlikely(netif_msg_tx_done(sky2)))
1791 printk(KERN_DEBUG "%s: tx done %u\n",
1792 dev->name, idx);
3cf26753 1793
7138a0f5 1794 dev->stats.tx_packets++;
bd1c6869
SH
1795 dev->stats.tx_bytes += skb->len;
1796
1797 if (skb_queue_len(&sky2->rx_recycle) < sky2->rx_pending
1798 && skb_recycle_check(skb, sky2->rx_data_size
1799 + sky2_rx_pad(sky2->hw)))
1800 __skb_queue_head(&sky2->rx_recycle, skb);
1801 else
1802 dev_kfree_skb_any(skb);
2bf56fe2 1803
ee5f68fe 1804 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
cd28ab6a 1805 }
793b883e 1806 }
793b883e 1807
291ea614 1808 sky2->tx_cons = idx;
50432cb5
SH
1809 smp_mb();
1810
22e11703 1811 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
cd28ab6a 1812 netif_wake_queue(dev);
cd28ab6a
SH
1813}
1814
264bb4fa 1815static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
a510996b 1816{
a510996b
MM
1817 /* Disable Force Sync bit and Enable Alloc bit */
1818 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1819 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1820
1821 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1822 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1823 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1824
1825 /* Reset the PCI FIFO of the async Tx queue */
1826 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1827 BMU_RST_SET | BMU_FIFO_RST);
1828
1829 /* Reset the Tx prefetch units */
1830 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1831 PREF_UNIT_RST_SET);
1832
1833 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1834 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1835}
1836
cd28ab6a
SH
1837/* Network shutdown */
1838static int sky2_down(struct net_device *dev)
1839{
1840 struct sky2_port *sky2 = netdev_priv(dev);
1841 struct sky2_hw *hw = sky2->hw;
1842 unsigned port = sky2->port;
1843 u16 ctrl;
e07b1aa8 1844 u32 imask;
cd28ab6a 1845
1b537565
SH
1846 /* Never really got started! */
1847 if (!sky2->tx_le)
1848 return 0;
1849
cd28ab6a
SH
1850 if (netif_msg_ifdown(sky2))
1851 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1852
d104acaf
SH
1853 /* Force flow control off */
1854 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
793b883e 1855
cd28ab6a
SH
1856 /* Stop transmitter */
1857 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1858 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1859
1860 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
793b883e 1861 RB_RST_SET | RB_DIS_OP_MD);
cd28ab6a
SH
1862
1863 ctrl = gma_read16(hw, port, GM_GP_CTRL);
793b883e 1864 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
cd28ab6a
SH
1865 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1866
1867 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1868
1869 /* Workaround shared GMAC reset */
793b883e
SH
1870 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1871 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
cd28ab6a
SH
1872 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1873
cd28ab6a 1874 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
cd28ab6a 1875
6c83504f
SH
1876 /* Force any delayed status interrrupt and NAPI */
1877 sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
1878 sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
1879 sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
1880 sky2_read8(hw, STAT_ISR_TIMER_CTRL);
1881
a947a39d
MM
1882 sky2_rx_stop(sky2);
1883
1884 /* Disable port IRQ */
1885 imask = sky2_read32(hw, B0_IMSK);
1886 imask &= ~portirq_msk[port];
1887 sky2_write32(hw, B0_IMSK, imask);
1888 sky2_read32(hw, B0_IMSK);
1889
6c83504f
SH
1890 synchronize_irq(hw->pdev->irq);
1891 napi_synchronize(&hw->napi);
1892
0da6d7b3 1893 spin_lock_bh(&sky2->phy_lock);
b96936da 1894 sky2_phy_power_down(hw, port);
0da6d7b3 1895 spin_unlock_bh(&sky2->phy_lock);
d3bcfbeb 1896
d571b694 1897 /* turn off LED's */
cd28ab6a
SH
1898 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1899
264bb4fa
MM
1900 sky2_tx_reset(hw, port);
1901
481cea4a
SH
1902 /* Free any pending frames stuck in HW queue */
1903 sky2_tx_complete(sky2, sky2->tx_prod);
1904
cd28ab6a
SH
1905 sky2_rx_clean(sky2);
1906
1907 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1908 sky2->rx_le, sky2->rx_le_map);
1909 kfree(sky2->rx_ring);
1910
1911 pci_free_consistent(hw->pdev,
ee5f68fe 1912 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
cd28ab6a
SH
1913 sky2->tx_le, sky2->tx_le_map);
1914 kfree(sky2->tx_ring);
1915
1b537565
SH
1916 sky2->tx_le = NULL;
1917 sky2->rx_le = NULL;
1918
1919 sky2->rx_ring = NULL;
1920 sky2->tx_ring = NULL;
1921
cd28ab6a
SH
1922 return 0;
1923}
1924
1925static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1926{
ea76e635 1927 if (hw->flags & SKY2_HW_FIBRE_PHY)
793b883e
SH
1928 return SPEED_1000;
1929
05745c4a
SH
1930 if (!(hw->flags & SKY2_HW_GIGABIT)) {
1931 if (aux & PHY_M_PS_SPEED_100)
1932 return SPEED_100;
1933 else
1934 return SPEED_10;
1935 }
cd28ab6a
SH
1936
1937 switch (aux & PHY_M_PS_SPEED_MSK) {
1938 case PHY_M_PS_SPEED_1000:
1939 return SPEED_1000;
1940 case PHY_M_PS_SPEED_100:
1941 return SPEED_100;
1942 default:
1943 return SPEED_10;
1944 }
1945}
1946
1947static void sky2_link_up(struct sky2_port *sky2)
1948{
1949 struct sky2_hw *hw = sky2->hw;
1950 unsigned port = sky2->port;
1951 u16 reg;
16ad91e1
SH
1952 static const char *fc_name[] = {
1953 [FC_NONE] = "none",
1954 [FC_TX] = "tx",
1955 [FC_RX] = "rx",
1956 [FC_BOTH] = "both",
1957 };
cd28ab6a 1958
cd28ab6a 1959 /* enable Rx/Tx */
2eaba1a2 1960 reg = gma_read16(hw, port, GM_GP_CTRL);
cd28ab6a
SH
1961 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1962 gma_write16(hw, port, GM_GP_CTRL, reg);
cd28ab6a
SH
1963
1964 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1965
1966 netif_carrier_on(sky2->netdev);
cd28ab6a 1967
75e80683 1968 mod_timer(&hw->watchdog_timer, jiffies + 1);
32c2c300 1969
cd28ab6a 1970 /* Turn on link LED */
793b883e 1971 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
cd28ab6a
SH
1972 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1973
1974 if (netif_msg_link(sky2))
1975 printk(KERN_INFO PFX
d571b694 1976 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
cd28ab6a
SH
1977 sky2->netdev->name, sky2->speed,
1978 sky2->duplex == DUPLEX_FULL ? "full" : "half",
16ad91e1 1979 fc_name[sky2->flow_status]);
cd28ab6a
SH
1980}
1981
1982static void sky2_link_down(struct sky2_port *sky2)
1983{
1984 struct sky2_hw *hw = sky2->hw;
1985 unsigned port = sky2->port;
1986 u16 reg;
1987
1988 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1989
1990 reg = gma_read16(hw, port, GM_GP_CTRL);
1991 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1992 gma_write16(hw, port, GM_GP_CTRL, reg);
cd28ab6a 1993
cd28ab6a 1994 netif_carrier_off(sky2->netdev);
cd28ab6a
SH
1995
1996 /* Turn on link LED */
1997 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1998
1999 if (netif_msg_link(sky2))
2000 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
2eaba1a2 2001
cd28ab6a
SH
2002 sky2_phy_init(hw, port);
2003}
2004
16ad91e1
SH
2005static enum flow_control sky2_flow(int rx, int tx)
2006{
2007 if (rx)
2008 return tx ? FC_BOTH : FC_RX;
2009 else
2010 return tx ? FC_TX : FC_NONE;
2011}
2012
793b883e
SH
2013static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
2014{
2015 struct sky2_hw *hw = sky2->hw;
2016 unsigned port = sky2->port;
da4c1ff4 2017 u16 advert, lpa;
793b883e 2018
da4c1ff4 2019 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
793b883e 2020 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
793b883e
SH
2021 if (lpa & PHY_M_AN_RF) {
2022 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
2023 return -1;
2024 }
2025
793b883e
SH
2026 if (!(aux & PHY_M_PS_SPDUP_RES)) {
2027 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
2028 sky2->netdev->name);
2029 return -1;
2030 }
2031
793b883e 2032 sky2->speed = sky2_phy_speed(hw, aux);
7c74ac1c 2033 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
793b883e 2034
da4c1ff4
SH
2035 /* Since the pause result bits seem to in different positions on
2036 * different chips. look at registers.
2037 */
ea76e635 2038 if (hw->flags & SKY2_HW_FIBRE_PHY) {
da4c1ff4
SH
2039 /* Shift for bits in fiber PHY */
2040 advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
2041 lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
2042
2043 if (advert & ADVERTISE_1000XPAUSE)
2044 advert |= ADVERTISE_PAUSE_CAP;
2045 if (advert & ADVERTISE_1000XPSE_ASYM)
2046 advert |= ADVERTISE_PAUSE_ASYM;
2047 if (lpa & LPA_1000XPAUSE)
2048 lpa |= LPA_PAUSE_CAP;
2049 if (lpa & LPA_1000XPAUSE_ASYM)
2050 lpa |= LPA_PAUSE_ASYM;
2051 }
793b883e 2052
da4c1ff4
SH
2053 sky2->flow_status = FC_NONE;
2054 if (advert & ADVERTISE_PAUSE_CAP) {
2055 if (lpa & LPA_PAUSE_CAP)
2056 sky2->flow_status = FC_BOTH;
2057 else if (advert & ADVERTISE_PAUSE_ASYM)
2058 sky2->flow_status = FC_RX;
2059 } else if (advert & ADVERTISE_PAUSE_ASYM) {
2060 if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
2061 sky2->flow_status = FC_TX;
2062 }
793b883e 2063
16ad91e1 2064 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
93745494 2065 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
16ad91e1 2066 sky2->flow_status = FC_NONE;
2eaba1a2 2067
da4c1ff4 2068 if (sky2->flow_status & FC_TX)
793b883e
SH
2069 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2070 else
2071 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2072
2073 return 0;
2074}
cd28ab6a 2075
e07b1aa8
SH
2076/* Interrupt from PHY */
2077static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
cd28ab6a 2078{
e07b1aa8
SH
2079 struct net_device *dev = hw->dev[port];
2080 struct sky2_port *sky2 = netdev_priv(dev);
cd28ab6a
SH
2081 u16 istatus, phystat;
2082
ebc646f6
SH
2083 if (!netif_running(dev))
2084 return;
2085
e07b1aa8
SH
2086 spin_lock(&sky2->phy_lock);
2087 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2088 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2089
cd28ab6a
SH
2090 if (netif_msg_intr(sky2))
2091 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
2092 sky2->netdev->name, istatus, phystat);
2093
0ea065e5 2094 if (istatus & PHY_M_IS_AN_COMPL) {
793b883e
SH
2095 if (sky2_autoneg_done(sky2, phystat) == 0)
2096 sky2_link_up(sky2);
2097 goto out;
2098 }
cd28ab6a 2099
793b883e
SH
2100 if (istatus & PHY_M_IS_LSP_CHANGE)
2101 sky2->speed = sky2_phy_speed(hw, phystat);
cd28ab6a 2102
793b883e
SH
2103 if (istatus & PHY_M_IS_DUP_CHANGE)
2104 sky2->duplex =
2105 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
cd28ab6a 2106
793b883e
SH
2107 if (istatus & PHY_M_IS_LST_CHANGE) {
2108 if (phystat & PHY_M_PS_LINK_UP)
cd28ab6a 2109 sky2_link_up(sky2);
793b883e
SH
2110 else
2111 sky2_link_down(sky2);
cd28ab6a 2112 }
793b883e 2113out:
e07b1aa8 2114 spin_unlock(&sky2->phy_lock);
cd28ab6a
SH
2115}
2116
62335ab0 2117/* Transmit timeout is only called if we are running, carrier is up
302d1252
SH
2118 * and tx queue is full (stopped).
2119 */
cd28ab6a
SH
2120static void sky2_tx_timeout(struct net_device *dev)
2121{
2122 struct sky2_port *sky2 = netdev_priv(dev);
8cc048e3 2123 struct sky2_hw *hw = sky2->hw;
cd28ab6a
SH
2124
2125 if (netif_msg_timer(sky2))
2126 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
2127
8f24664d 2128 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
62335ab0
SH
2129 dev->name, sky2->tx_cons, sky2->tx_prod,
2130 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2131 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
8f24664d 2132
81906791
SH
2133 /* can't restart safely under softirq */
2134 schedule_work(&hw->restart_work);
cd28ab6a
SH
2135}
2136
2137static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2138{
6b1a3aef
SH
2139 struct sky2_port *sky2 = netdev_priv(dev);
2140 struct sky2_hw *hw = sky2->hw;
b628ed98 2141 unsigned port = sky2->port;
6b1a3aef
SH
2142 int err;
2143 u16 ctl, mode;
e07b1aa8 2144 u32 imask;
cd28ab6a
SH
2145
2146 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2147 return -EINVAL;
2148
05745c4a
SH
2149 if (new_mtu > ETH_DATA_LEN &&
2150 (hw->chip_id == CHIP_ID_YUKON_FE ||
2151 hw->chip_id == CHIP_ID_YUKON_FE_P))
d2adf4f6
SH
2152 return -EINVAL;
2153
6b1a3aef
SH
2154 if (!netif_running(dev)) {
2155 dev->mtu = new_mtu;
2156 return 0;
2157 }
2158
e07b1aa8 2159 imask = sky2_read32(hw, B0_IMSK);
6b1a3aef
SH
2160 sky2_write32(hw, B0_IMSK, 0);
2161
018d1c66
SH
2162 dev->trans_start = jiffies; /* prevent tx timeout */
2163 netif_stop_queue(dev);
bea3348e 2164 napi_disable(&hw->napi);
018d1c66 2165
e07b1aa8
SH
2166 synchronize_irq(hw->pdev->irq);
2167
39dbd958 2168 if (!(hw->flags & SKY2_HW_RAM_BUFFER))
69161611 2169 sky2_set_tx_stfwd(hw, port);
b628ed98
SH
2170
2171 ctl = gma_read16(hw, port, GM_GP_CTRL);
2172 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
6b1a3aef
SH
2173 sky2_rx_stop(sky2);
2174 sky2_rx_clean(sky2);
cd28ab6a
SH
2175
2176 dev->mtu = new_mtu;
14d0263f 2177
6b1a3aef
SH
2178 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2179 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2180
2181 if (dev->mtu > ETH_DATA_LEN)
2182 mode |= GM_SMOD_JUMBO_ENA;
2183
b628ed98 2184 gma_write16(hw, port, GM_SERIAL_MODE, mode);
cd28ab6a 2185
b628ed98 2186 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
cd28ab6a 2187
6b1a3aef 2188 err = sky2_rx_start(sky2);
e07b1aa8 2189 sky2_write32(hw, B0_IMSK, imask);
018d1c66 2190
d1d08d12 2191 sky2_read32(hw, B0_Y2_SP_LISR);
bea3348e
SH
2192 napi_enable(&hw->napi);
2193
1b537565
SH
2194 if (err)
2195 dev_close(dev);
2196 else {
b628ed98 2197 gma_write16(hw, port, GM_GP_CTRL, ctl);
1b537565 2198
1b537565
SH
2199 netif_wake_queue(dev);
2200 }
2201
cd28ab6a
SH
2202 return err;
2203}
2204
14d0263f
SH
2205/* For small just reuse existing skb for next receive */
2206static struct sk_buff *receive_copy(struct sky2_port *sky2,
2207 const struct rx_ring_info *re,
2208 unsigned length)
2209{
2210 struct sk_buff *skb;
2211
2212 skb = netdev_alloc_skb(sky2->netdev, length + 2);
2213 if (likely(skb)) {
2214 skb_reserve(skb, 2);
2215 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2216 length, PCI_DMA_FROMDEVICE);
d626f62b 2217 skb_copy_from_linear_data(re->skb, skb->data, length);
14d0263f
SH
2218 skb->ip_summed = re->skb->ip_summed;
2219 skb->csum = re->skb->csum;
2220 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2221 length, PCI_DMA_FROMDEVICE);
2222 re->skb->ip_summed = CHECKSUM_NONE;
489b10c1 2223 skb_put(skb, length);
14d0263f
SH
2224 }
2225 return skb;
2226}
2227
2228/* Adjust length of skb with fragments to match received data */
2229static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2230 unsigned int length)
2231{
2232 int i, num_frags;
2233 unsigned int size;
2234
2235 /* put header into skb */
2236 size = min(length, hdr_space);
2237 skb->tail += size;
2238 skb->len += size;
2239 length -= size;
2240
2241 num_frags = skb_shinfo(skb)->nr_frags;
2242 for (i = 0; i < num_frags; i++) {
2243 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2244
2245 if (length == 0) {
2246 /* don't need this page */
2247 __free_page(frag->page);
2248 --skb_shinfo(skb)->nr_frags;
2249 } else {
2250 size = min(length, (unsigned) PAGE_SIZE);
2251
2252 frag->size = size;
2253 skb->data_len += size;
2254 skb->truesize += size;
2255 skb->len += size;
2256 length -= size;
2257 }
2258 }
2259}
2260
2261/* Normal packet - take skb from ring element and put in a new one */
2262static struct sk_buff *receive_new(struct sky2_port *sky2,
2263 struct rx_ring_info *re,
2264 unsigned int length)
2265{
2266 struct sk_buff *skb, *nskb;
2267 unsigned hdr_space = sky2->rx_data_size;
2268
14d0263f
SH
2269 /* Don't be tricky about reusing pages (yet) */
2270 nskb = sky2_rx_alloc(sky2);
2271 if (unlikely(!nskb))
2272 return NULL;
2273
2274 skb = re->skb;
2275 sky2_rx_unmap_skb(sky2->hw->pdev, re);
2276
2277 prefetch(skb->data);
2278 re->skb = nskb;
454e6cb6
SH
2279 if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
2280 dev_kfree_skb(nskb);
2281 re->skb = skb;
2282 return NULL;
2283 }
14d0263f
SH
2284
2285 if (skb_shinfo(skb)->nr_frags)
2286 skb_put_frags(skb, hdr_space, length);
2287 else
489b10c1 2288 skb_put(skb, length);
14d0263f
SH
2289 return skb;
2290}
2291
cd28ab6a
SH
2292/*
2293 * Receive one packet.
d571b694 2294 * For larger packets, get new buffer.
cd28ab6a 2295 */
497d7c86 2296static struct sk_buff *sky2_receive(struct net_device *dev,
cd28ab6a
SH
2297 u16 length, u32 status)
2298{
497d7c86 2299 struct sky2_port *sky2 = netdev_priv(dev);
291ea614 2300 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
79e57d32 2301 struct sk_buff *skb = NULL;
d6532232
SH
2302 u16 count = (status & GMR_FS_LEN) >> 16;
2303
2304#ifdef SKY2_VLAN_TAG_USED
2305 /* Account for vlan tag */
2306 if (sky2->vlgrp && (status & GMR_FS_VLAN))
2307 count -= VLAN_HLEN;
2308#endif
cd28ab6a
SH
2309
2310 if (unlikely(netif_msg_rx_status(sky2)))
2311 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
497d7c86 2312 dev->name, sky2->rx_next, status, length);
cd28ab6a 2313
793b883e 2314 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
d70cd51a 2315 prefetch(sky2->rx_ring + sky2->rx_next);
cd28ab6a 2316
3b12e014
SH
2317 /* This chip has hardware problems that generates bogus status.
2318 * So do only marginal checking and expect higher level protocols
2319 * to handle crap frames.
2320 */
2321 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2322 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2323 length != count)
2324 goto okay;
2325
42eeea01 2326 if (status & GMR_FS_ANY_ERR)
cd28ab6a
SH
2327 goto error;
2328
42eeea01
SH
2329 if (!(status & GMR_FS_RX_OK))
2330 goto resubmit;
2331
d6532232
SH
2332 /* if length reported by DMA does not match PHY, packet was truncated */
2333 if (length != count)
3b12e014 2334 goto len_error;
71749531 2335
3b12e014 2336okay:
14d0263f
SH
2337 if (length < copybreak)
2338 skb = receive_copy(sky2, re, length);
2339 else
2340 skb = receive_new(sky2, re, length);
793b883e 2341resubmit:
14d0263f 2342 sky2_rx_submit(sky2, re);
79e57d32 2343
cd28ab6a
SH
2344 return skb;
2345
3b12e014 2346len_error:
71749531
SH
2347 /* Truncation of overlength packets
2348 causes PHY length to not match MAC length */
7138a0f5 2349 ++dev->stats.rx_length_errors;
d6532232 2350 if (netif_msg_rx_err(sky2) && net_ratelimit())
3b12e014
SH
2351 pr_info(PFX "%s: rx length error: status %#x length %d\n",
2352 dev->name, status, length);
d6532232 2353 goto resubmit;
71749531 2354
cd28ab6a 2355error:
7138a0f5 2356 ++dev->stats.rx_errors;
b6d77734 2357 if (status & GMR_FS_RX_FF_OV) {
7138a0f5 2358 dev->stats.rx_over_errors++;
b6d77734
SH
2359 goto resubmit;
2360 }
6e15b712 2361
3be92a70 2362 if (netif_msg_rx_err(sky2) && net_ratelimit())
cd28ab6a 2363 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
497d7c86 2364 dev->name, status, length);
793b883e
SH
2365
2366 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
7138a0f5 2367 dev->stats.rx_length_errors++;
cd28ab6a 2368 if (status & GMR_FS_FRAGMENT)
7138a0f5 2369 dev->stats.rx_frame_errors++;
cd28ab6a 2370 if (status & GMR_FS_CRC_ERR)
7138a0f5 2371 dev->stats.rx_crc_errors++;
79e57d32 2372
793b883e 2373 goto resubmit;
cd28ab6a
SH
2374}
2375
e07b1aa8
SH
2376/* Transmit complete */
2377static inline void sky2_tx_done(struct net_device *dev, u16 last)
13b97b74 2378{
e07b1aa8 2379 struct sky2_port *sky2 = netdev_priv(dev);
302d1252 2380
49d4b8ba 2381 if (netif_running(dev))
e07b1aa8 2382 sky2_tx_complete(sky2, last);
cd28ab6a
SH
2383}
2384
37e5a243
SH
2385static inline void sky2_skb_rx(const struct sky2_port *sky2,
2386 u32 status, struct sk_buff *skb)
2387{
2388#ifdef SKY2_VLAN_TAG_USED
2389 u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
2390 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
2391 if (skb->ip_summed == CHECKSUM_NONE)
2392 vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
2393 else
2394 vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
2395 vlan_tag, skb);
2396 return;
2397 }
2398#endif
2399 if (skb->ip_summed == CHECKSUM_NONE)
2400 netif_receive_skb(skb);
2401 else
2402 napi_gro_receive(&sky2->hw->napi, skb);
2403}
2404
bf15fe99
SH
2405static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2406 unsigned packets, unsigned bytes)
2407{
2408 if (packets) {
2409 struct net_device *dev = hw->dev[port];
2410
2411 dev->stats.rx_packets += packets;
2412 dev->stats.rx_bytes += bytes;
2413 dev->last_rx = jiffies;
2414 sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2415 }
2416}
2417
e07b1aa8 2418/* Process status response ring */
26691830 2419static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
cd28ab6a 2420{
e07b1aa8 2421 int work_done = 0;
bf15fe99
SH
2422 unsigned int total_bytes[2] = { 0 };
2423 unsigned int total_packets[2] = { 0 };
a8fd6266 2424
af2a58ac 2425 rmb();
26691830 2426 do {
55c9dd35 2427 struct sky2_port *sky2;
13210ce5 2428 struct sky2_status_le *le = hw->st_le + hw->st_idx;
ab5adecb 2429 unsigned port;
13210ce5 2430 struct net_device *dev;
cd28ab6a 2431 struct sk_buff *skb;
cd28ab6a
SH
2432 u32 status;
2433 u16 length;
ab5adecb
SH
2434 u8 opcode = le->opcode;
2435
2436 if (!(opcode & HW_OWNER))
2437 break;
cd28ab6a 2438
cb5d9547 2439 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
bea86103 2440
ab5adecb 2441 port = le->css & CSS_LINK_BIT;
69161611 2442 dev = hw->dev[port];
13210ce5 2443 sky2 = netdev_priv(dev);
f65b138c
SH
2444 length = le16_to_cpu(le->length);
2445 status = le32_to_cpu(le->status);
cd28ab6a 2446
ab5adecb
SH
2447 le->opcode = 0;
2448 switch (opcode & ~HW_OWNER) {
cd28ab6a 2449 case OP_RXSTAT:
bf15fe99
SH
2450 total_packets[port]++;
2451 total_bytes[port] += length;
497d7c86 2452 skb = sky2_receive(dev, length, status);
3225b919 2453 if (unlikely(!skb)) {
7138a0f5 2454 dev->stats.rx_dropped++;
55c9dd35 2455 break;
3225b919 2456 }
13210ce5 2457
69161611 2458 /* This chip reports checksum status differently */
05745c4a 2459 if (hw->flags & SKY2_HW_NEW_LE) {
0ea065e5 2460 if ((sky2->flags & SKY2_FLAG_RX_CHECKSUM) &&
69161611
SH
2461 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2462 (le->css & CSS_TCPUDPCSOK))
2463 skb->ip_summed = CHECKSUM_UNNECESSARY;
2464 else
2465 skb->ip_summed = CHECKSUM_NONE;
2466 }
2467
13210ce5 2468 skb->protocol = eth_type_trans(skb, dev);
13210ce5 2469
37e5a243 2470 sky2_skb_rx(sky2, status, skb);
13210ce5 2471
22e11703 2472 /* Stop after net poll weight */
13210ce5
SH
2473 if (++work_done >= to_do)
2474 goto exit_loop;
cd28ab6a
SH
2475 break;
2476
d1f13708
SH
2477#ifdef SKY2_VLAN_TAG_USED
2478 case OP_RXVLAN:
2479 sky2->rx_tag = length;
2480 break;
2481
2482 case OP_RXCHKSVLAN:
2483 sky2->rx_tag = length;
2484 /* fall through */
2485#endif
cd28ab6a 2486 case OP_RXCHKS:
0ea065e5 2487 if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
87418307
SH
2488 break;
2489
05745c4a
SH
2490 /* If this happens then driver assuming wrong format */
2491 if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
2492 if (net_ratelimit())
2493 printk(KERN_NOTICE "%s: unexpected"
2494 " checksum status\n",
2495 dev->name);
69161611 2496 break;
05745c4a 2497 }
69161611 2498
87418307
SH
2499 /* Both checksum counters are programmed to start at
2500 * the same offset, so unless there is a problem they
2501 * should match. This failure is an early indication that
2502 * hardware receive checksumming won't work.
2503 */
2504 if (likely(status >> 16 == (status & 0xffff))) {
2505 skb = sky2->rx_ring[sky2->rx_next].skb;
2506 skb->ip_summed = CHECKSUM_COMPLETE;
b9389796 2507 skb->csum = le16_to_cpu(status);
87418307
SH
2508 } else {
2509 printk(KERN_NOTICE PFX "%s: hardware receive "
2510 "checksum problem (status = %#x)\n",
2511 dev->name, status);
0ea065e5
SH
2512 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
2513
87418307 2514 sky2_write32(sky2->hw,
69161611 2515 Q_ADDR(rxqaddr[port], Q_CSR),
87418307
SH
2516 BMU_DIS_RX_CHKSUM);
2517 }
cd28ab6a
SH
2518 break;
2519
2520 case OP_TXINDEXLE:
13b97b74 2521 /* TX index reports status for both ports */
f55925d7 2522 sky2_tx_done(hw->dev[0], status & 0xfff);
e07b1aa8
SH
2523 if (hw->dev[1])
2524 sky2_tx_done(hw->dev[1],
2525 ((status >> 24) & 0xff)
2526 | (u16)(length & 0xf) << 8);
cd28ab6a
SH
2527 break;
2528
cd28ab6a
SH
2529 default:
2530 if (net_ratelimit())
793b883e 2531 printk(KERN_WARNING PFX
ab5adecb 2532 "unknown status opcode 0x%x\n", opcode);
cd28ab6a 2533 }
26691830 2534 } while (hw->st_idx != idx);
cd28ab6a 2535
fe2a24df
SH
2536 /* Fully processed status ring so clear irq */
2537 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2538
13210ce5 2539exit_loop:
bf15fe99
SH
2540 sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2541 sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
22e11703 2542
e07b1aa8 2543 return work_done;
cd28ab6a
SH
2544}
2545
2546static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2547{
2548 struct net_device *dev = hw->dev[port];
2549
3be92a70
SH
2550 if (net_ratelimit())
2551 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
2552 dev->name, status);
cd28ab6a
SH
2553
2554 if (status & Y2_IS_PAR_RD1) {
3be92a70
SH
2555 if (net_ratelimit())
2556 printk(KERN_ERR PFX "%s: ram data read parity error\n",
2557 dev->name);
cd28ab6a
SH
2558 /* Clear IRQ */
2559 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2560 }
2561
2562 if (status & Y2_IS_PAR_WR1) {
3be92a70
SH
2563 if (net_ratelimit())
2564 printk(KERN_ERR PFX "%s: ram data write parity error\n",
2565 dev->name);
cd28ab6a
SH
2566
2567 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2568 }
2569
2570 if (status & Y2_IS_PAR_MAC1) {
3be92a70
SH
2571 if (net_ratelimit())
2572 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
cd28ab6a
SH
2573 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2574 }
2575
2576 if (status & Y2_IS_PAR_RX1) {
3be92a70
SH
2577 if (net_ratelimit())
2578 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
cd28ab6a
SH
2579 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2580 }
2581
2582 if (status & Y2_IS_TCP_TXA1) {
3be92a70
SH
2583 if (net_ratelimit())
2584 printk(KERN_ERR PFX "%s: TCP segmentation error\n",
2585 dev->name);
cd28ab6a
SH
2586 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2587 }
2588}
2589
2590static void sky2_hw_intr(struct sky2_hw *hw)
2591{
555382cb 2592 struct pci_dev *pdev = hw->pdev;
cd28ab6a 2593 u32 status = sky2_read32(hw, B0_HWE_ISRC);
555382cb
SH
2594 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2595
2596 status &= hwmsk;
cd28ab6a 2597
793b883e 2598 if (status & Y2_IS_TIST_OV)
cd28ab6a 2599 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
cd28ab6a
SH
2600
2601 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
793b883e
SH
2602 u16 pci_err;
2603
82637e80 2604 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
b32f40c4 2605 pci_err = sky2_pci_read16(hw, PCI_STATUS);
3be92a70 2606 if (net_ratelimit())
555382cb 2607 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
b02a9258 2608 pci_err);
cd28ab6a 2609
b32f40c4 2610 sky2_pci_write16(hw, PCI_STATUS,
167f53d0 2611 pci_err | PCI_STATUS_ERROR_BITS);
82637e80 2612 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
cd28ab6a
SH
2613 }
2614
2615 if (status & Y2_IS_PCI_EXP) {
d571b694 2616 /* PCI-Express uncorrectable Error occurred */
555382cb 2617 u32 err;
cd28ab6a 2618
82637e80 2619 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
7782c8c4
SH
2620 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2621 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2622 0xfffffffful);
3be92a70 2623 if (net_ratelimit())
555382cb 2624 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
cf06ffb4 2625
7782c8c4 2626 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
82637e80 2627 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
cd28ab6a
SH
2628 }
2629
2630 if (status & Y2_HWE_L1_MASK)
2631 sky2_hw_error(hw, 0, status);
2632 status >>= 8;
2633 if (status & Y2_HWE_L1_MASK)
2634 sky2_hw_error(hw, 1, status);
2635}
2636
2637static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2638{
2639 struct net_device *dev = hw->dev[port];
2640 struct sky2_port *sky2 = netdev_priv(dev);
2641 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2642
2643 if (netif_msg_intr(sky2))
2644 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2645 dev->name, status);
2646
a3caeada
SH
2647 if (status & GM_IS_RX_CO_OV)
2648 gma_read16(hw, port, GM_RX_IRQ_SRC);
2649
2650 if (status & GM_IS_TX_CO_OV)
2651 gma_read16(hw, port, GM_TX_IRQ_SRC);
2652
cd28ab6a 2653 if (status & GM_IS_RX_FF_OR) {
7138a0f5 2654 ++dev->stats.rx_fifo_errors;
cd28ab6a
SH
2655 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2656 }
2657
2658 if (status & GM_IS_TX_FF_UR) {
7138a0f5 2659 ++dev->stats.tx_fifo_errors;
cd28ab6a
SH
2660 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2661 }
cd28ab6a
SH
2662}
2663
40b01727 2664/* This should never happen it is a bug. */
c119731d 2665static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
d257924e
SH
2666{
2667 struct net_device *dev = hw->dev[port];
c119731d 2668 u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
d257924e 2669
c119731d
SH
2670 dev_err(&hw->pdev->dev, PFX
2671 "%s: descriptor error q=%#x get=%u put=%u\n",
2672 dev->name, (unsigned) q, (unsigned) idx,
2673 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
d257924e 2674
40b01727 2675 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
d257924e 2676}
cd28ab6a 2677
75e80683
SH
2678static int sky2_rx_hung(struct net_device *dev)
2679{
2680 struct sky2_port *sky2 = netdev_priv(dev);
2681 struct sky2_hw *hw = sky2->hw;
2682 unsigned port = sky2->port;
2683 unsigned rxq = rxqaddr[port];
2684 u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2685 u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2686 u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2687 u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2688
2689 /* If idle and MAC or PCI is stuck */
2690 if (sky2->check.last == dev->last_rx &&
2691 ((mac_rp == sky2->check.mac_rp &&
2692 mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2693 /* Check if the PCI RX hang */
2694 (fifo_rp == sky2->check.fifo_rp &&
2695 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2696 printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
2697 dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
2698 sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2699 return 1;
2700 } else {
2701 sky2->check.last = dev->last_rx;
2702 sky2->check.mac_rp = mac_rp;
2703 sky2->check.mac_lev = mac_lev;
2704 sky2->check.fifo_rp = fifo_rp;
2705 sky2->check.fifo_lev = fifo_lev;
2706 return 0;
2707 }
2708}
2709
32c2c300 2710static void sky2_watchdog(unsigned long arg)
d27ed387 2711{
01bd7564 2712 struct sky2_hw *hw = (struct sky2_hw *) arg;
d27ed387 2713
75e80683 2714 /* Check for lost IRQ once a second */
32c2c300 2715 if (sky2_read32(hw, B0_ISRC)) {
bea3348e 2716 napi_schedule(&hw->napi);
75e80683
SH
2717 } else {
2718 int i, active = 0;
2719
2720 for (i = 0; i < hw->ports; i++) {
bea3348e 2721 struct net_device *dev = hw->dev[i];
75e80683
SH
2722 if (!netif_running(dev))
2723 continue;
2724 ++active;
2725
2726 /* For chips with Rx FIFO, check if stuck */
39dbd958 2727 if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
75e80683
SH
2728 sky2_rx_hung(dev)) {
2729 pr_info(PFX "%s: receiver hang detected\n",
2730 dev->name);
2731 schedule_work(&hw->restart_work);
2732 return;
2733 }
2734 }
2735
2736 if (active == 0)
2737 return;
32c2c300 2738 }
01bd7564 2739
75e80683 2740 mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
d27ed387
SH
2741}
2742
40b01727
SH
2743/* Hardware/software error handling */
2744static void sky2_err_intr(struct sky2_hw *hw, u32 status)
cd28ab6a 2745{
40b01727
SH
2746 if (net_ratelimit())
2747 dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
cd28ab6a 2748
1e5f1283
SH
2749 if (status & Y2_IS_HW_ERR)
2750 sky2_hw_intr(hw);
d257924e 2751
1e5f1283
SH
2752 if (status & Y2_IS_IRQ_MAC1)
2753 sky2_mac_intr(hw, 0);
cd28ab6a 2754
1e5f1283
SH
2755 if (status & Y2_IS_IRQ_MAC2)
2756 sky2_mac_intr(hw, 1);
cd28ab6a 2757
1e5f1283 2758 if (status & Y2_IS_CHK_RX1)
c119731d 2759 sky2_le_error(hw, 0, Q_R1);
d257924e 2760
1e5f1283 2761 if (status & Y2_IS_CHK_RX2)
c119731d 2762 sky2_le_error(hw, 1, Q_R2);
d257924e 2763
1e5f1283 2764 if (status & Y2_IS_CHK_TXA1)
c119731d 2765 sky2_le_error(hw, 0, Q_XA1);
d257924e 2766
1e5f1283 2767 if (status & Y2_IS_CHK_TXA2)
c119731d 2768 sky2_le_error(hw, 1, Q_XA2);
40b01727
SH
2769}
2770
bea3348e 2771static int sky2_poll(struct napi_struct *napi, int work_limit)
40b01727 2772{
bea3348e 2773 struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
40b01727 2774 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
6f535763 2775 int work_done = 0;
26691830 2776 u16 idx;
40b01727
SH
2777
2778 if (unlikely(status & Y2_IS_ERROR))
2779 sky2_err_intr(hw, status);
2780
2781 if (status & Y2_IS_IRQ_PHY1)
2782 sky2_phy_intr(hw, 0);
2783
2784 if (status & Y2_IS_IRQ_PHY2)
2785 sky2_phy_intr(hw, 1);
cd28ab6a 2786
26691830
SH
2787 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2788 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
6f535763
DM
2789
2790 if (work_done >= work_limit)
26691830
SH
2791 goto done;
2792 }
6f535763 2793
26691830
SH
2794 napi_complete(napi);
2795 sky2_read32(hw, B0_Y2_SP_LISR);
2796done:
6f535763 2797
bea3348e 2798 return work_done;
e07b1aa8
SH
2799}
2800
7d12e780 2801static irqreturn_t sky2_intr(int irq, void *dev_id)
e07b1aa8
SH
2802{
2803 struct sky2_hw *hw = dev_id;
e07b1aa8
SH
2804 u32 status;
2805
2806 /* Reading this mask interrupts as side effect */
2807 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2808 if (status == 0 || status == ~0)
2809 return IRQ_NONE;
793b883e 2810
e07b1aa8 2811 prefetch(&hw->st_le[hw->st_idx]);
bea3348e
SH
2812
2813 napi_schedule(&hw->napi);
793b883e 2814
cd28ab6a
SH
2815 return IRQ_HANDLED;
2816}
2817
2818#ifdef CONFIG_NET_POLL_CONTROLLER
2819static void sky2_netpoll(struct net_device *dev)
2820{
2821 struct sky2_port *sky2 = netdev_priv(dev);
2822
bea3348e 2823 napi_schedule(&sky2->hw->napi);
cd28ab6a
SH
2824}
2825#endif
2826
2827/* Chip internal frequency for clock calculations */
05745c4a 2828static u32 sky2_mhz(const struct sky2_hw *hw)
cd28ab6a 2829{
793b883e 2830 switch (hw->chip_id) {
cd28ab6a 2831 case CHIP_ID_YUKON_EC:
5a5b1ea0 2832 case CHIP_ID_YUKON_EC_U:
93745494 2833 case CHIP_ID_YUKON_EX:
ed4d4161 2834 case CHIP_ID_YUKON_SUPR:
0ce8b98d 2835 case CHIP_ID_YUKON_UL_2:
05745c4a
SH
2836 return 125;
2837
cd28ab6a 2838 case CHIP_ID_YUKON_FE:
05745c4a
SH
2839 return 100;
2840
2841 case CHIP_ID_YUKON_FE_P:
2842 return 50;
2843
2844 case CHIP_ID_YUKON_XL:
2845 return 156;
2846
2847 default:
2848 BUG();
cd28ab6a
SH
2849 }
2850}
2851
fb17358f 2852static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
cd28ab6a 2853{
fb17358f 2854 return sky2_mhz(hw) * us;
cd28ab6a
SH
2855}
2856
fb17358f 2857static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
cd28ab6a 2858{
fb17358f 2859 return clk / sky2_mhz(hw);
cd28ab6a
SH
2860}
2861
fb17358f 2862
e3173832 2863static int __devinit sky2_init(struct sky2_hw *hw)
cd28ab6a 2864{
b89165f2 2865 u8 t8;
cd28ab6a 2866
167f53d0 2867 /* Enable all clocks and check for bad PCI access */
b32f40c4 2868 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
451af335 2869
cd28ab6a 2870 sky2_write8(hw, B0_CTST, CS_RST_CLR);
08c06d8a 2871
cd28ab6a 2872 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
ea76e635
SH
2873 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2874
2875 switch(hw->chip_id) {
2876 case CHIP_ID_YUKON_XL:
39dbd958 2877 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
ea76e635
SH
2878 break;
2879
2880 case CHIP_ID_YUKON_EC_U:
2881 hw->flags = SKY2_HW_GIGABIT
2882 | SKY2_HW_NEWER_PHY
2883 | SKY2_HW_ADV_POWER_CTL;
2884 break;
2885
2886 case CHIP_ID_YUKON_EX:
2887 hw->flags = SKY2_HW_GIGABIT
2888 | SKY2_HW_NEWER_PHY
2889 | SKY2_HW_NEW_LE
2890 | SKY2_HW_ADV_POWER_CTL;
2891
2892 /* New transmit checksum */
2893 if (hw->chip_rev != CHIP_REV_YU_EX_B0)
2894 hw->flags |= SKY2_HW_AUTO_TX_SUM;
2895 break;
2896
2897 case CHIP_ID_YUKON_EC:
2898 /* This rev is really old, and requires untested workarounds */
2899 if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
2900 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2901 return -EOPNOTSUPP;
2902 }
39dbd958 2903 hw->flags = SKY2_HW_GIGABIT;
ea76e635
SH
2904 break;
2905
2906 case CHIP_ID_YUKON_FE:
ea76e635
SH
2907 break;
2908
05745c4a
SH
2909 case CHIP_ID_YUKON_FE_P:
2910 hw->flags = SKY2_HW_NEWER_PHY
2911 | SKY2_HW_NEW_LE
2912 | SKY2_HW_AUTO_TX_SUM
2913 | SKY2_HW_ADV_POWER_CTL;
2914 break;
ed4d4161
SH
2915
2916 case CHIP_ID_YUKON_SUPR:
2917 hw->flags = SKY2_HW_GIGABIT
2918 | SKY2_HW_NEWER_PHY
2919 | SKY2_HW_NEW_LE
2920 | SKY2_HW_AUTO_TX_SUM
2921 | SKY2_HW_ADV_POWER_CTL;
2922 break;
2923
0ce8b98d
SH
2924 case CHIP_ID_YUKON_UL_2:
2925 hw->flags = SKY2_HW_GIGABIT
2926 | SKY2_HW_ADV_POWER_CTL;
2927 break;
2928
ea76e635 2929 default:
b02a9258
SH
2930 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2931 hw->chip_id);
cd28ab6a
SH
2932 return -EOPNOTSUPP;
2933 }
2934
ea76e635
SH
2935 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2936 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2937 hw->flags |= SKY2_HW_FIBRE_PHY;
290d4de5 2938
e3173832
SH
2939 hw->ports = 1;
2940 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2941 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2942 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2943 ++hw->ports;
2944 }
2945
2946 return 0;
2947}
2948
2949static void sky2_reset(struct sky2_hw *hw)
2950{
555382cb 2951 struct pci_dev *pdev = hw->pdev;
e3173832 2952 u16 status;
555382cb
SH
2953 int i, cap;
2954 u32 hwe_mask = Y2_HWE_ALL_MASK;
e3173832 2955
cd28ab6a 2956 /* disable ASF */
4f44d8ba
SH
2957 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2958 status = sky2_read16(hw, HCU_CCSR);
2959 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2960 HCU_CCSR_UC_STATE_MSK);
2961 sky2_write16(hw, HCU_CCSR, status);
2962 } else
2963 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2964 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
cd28ab6a
SH
2965
2966 /* do a SW reset */
2967 sky2_write8(hw, B0_CTST, CS_RST_SET);
2968 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2969
ac93a394
SH
2970 /* allow writes to PCI config */
2971 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2972
cd28ab6a 2973 /* clear PCI errors, if any */
b32f40c4 2974 status = sky2_pci_read16(hw, PCI_STATUS);
167f53d0 2975 status |= PCI_STATUS_ERROR_BITS;
b32f40c4 2976 sky2_pci_write16(hw, PCI_STATUS, status);
cd28ab6a
SH
2977
2978 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2979
555382cb
SH
2980 cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2981 if (cap) {
7782c8c4
SH
2982 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2983 0xfffffffful);
555382cb
SH
2984
2985 /* If error bit is stuck on ignore it */
2986 if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
2987 dev_info(&pdev->dev, "ignoring stuck error report bit\n");
7782c8c4 2988 else
555382cb
SH
2989 hwe_mask |= Y2_IS_PCI_EXP;
2990 }
cd28ab6a 2991
ae306cca 2992 sky2_power_on(hw);
82637e80 2993 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
cd28ab6a
SH
2994
2995 for (i = 0; i < hw->ports; i++) {
2996 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2997 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
69161611 2998
ed4d4161
SH
2999 if (hw->chip_id == CHIP_ID_YUKON_EX ||
3000 hw->chip_id == CHIP_ID_YUKON_SUPR)
69161611
SH
3001 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
3002 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
3003 | GMC_BYP_RETR_ON);
cd28ab6a
SH
3004 }
3005
793b883e
SH
3006 /* Clear I2C IRQ noise */
3007 sky2_write32(hw, B2_I2C_IRQ, 1);
cd28ab6a
SH
3008
3009 /* turn off hardware timer (unused) */
3010 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
3011 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
793b883e 3012
cd28ab6a
SH
3013 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
3014
69634ee7
SH
3015 /* Turn off descriptor polling */
3016 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
cd28ab6a
SH
3017
3018 /* Turn off receive timestamp */
3019 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
793b883e 3020 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
cd28ab6a
SH
3021
3022 /* enable the Tx Arbiters */
3023 for (i = 0; i < hw->ports; i++)
3024 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3025
3026 /* Initialize ram interface */
3027 for (i = 0; i < hw->ports; i++) {
793b883e 3028 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
cd28ab6a
SH
3029
3030 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
3031 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
3032 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
3033 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
3034 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
3035 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
3036 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
3037 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
3038 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
3039 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
3040 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
3041 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
3042 }
3043
555382cb 3044 sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
cd28ab6a 3045
cd28ab6a 3046 for (i = 0; i < hw->ports; i++)
d3bcfbeb 3047 sky2_gmac_reset(hw, i);
cd28ab6a 3048
cd28ab6a
SH
3049 memset(hw->st_le, 0, STATUS_LE_BYTES);
3050 hw->st_idx = 0;
3051
3052 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
3053 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
3054
3055 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
793b883e 3056 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
cd28ab6a
SH
3057
3058 /* Set the list last index */
793b883e 3059 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
cd28ab6a 3060
290d4de5
SH
3061 sky2_write16(hw, STAT_TX_IDX_TH, 10);
3062 sky2_write8(hw, STAT_FIFO_WM, 16);
cd28ab6a 3063
290d4de5
SH
3064 /* set Status-FIFO ISR watermark */
3065 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
3066 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
3067 else
3068 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
cd28ab6a 3069
290d4de5 3070 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
77b3d6a2
SH
3071 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
3072 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
cd28ab6a 3073
793b883e 3074 /* enable status unit */
cd28ab6a
SH
3075 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
3076
3077 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3078 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3079 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
e3173832
SH
3080}
3081
af18d8b8
SH
3082/* Take device down (offline).
3083 * Equivalent to doing dev_stop() but this does not
3084 * inform upper layers of the transistion.
3085 */
3086static void sky2_detach(struct net_device *dev)
3087{
3088 if (netif_running(dev)) {
3089 netif_device_detach(dev); /* stop txq */
3090 sky2_down(dev);
3091 }
3092}
3093
3094/* Bring device back after doing sky2_detach */
3095static int sky2_reattach(struct net_device *dev)
3096{
3097 int err = 0;
3098
3099 if (netif_running(dev)) {
3100 err = sky2_up(dev);
3101 if (err) {
3102 printk(KERN_INFO PFX "%s: could not restart %d\n",
3103 dev->name, err);
3104 dev_close(dev);
3105 } else {
3106 netif_device_attach(dev);
3107 sky2_set_multicast(dev);
3108 }
3109 }
3110
3111 return err;
3112}
3113
81906791
SH
3114static void sky2_restart(struct work_struct *work)
3115{
3116 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
af18d8b8 3117 int i;
81906791 3118
81906791 3119 rtnl_lock();
af18d8b8
SH
3120 for (i = 0; i < hw->ports; i++)
3121 sky2_detach(hw->dev[i]);
81906791 3122
8cfcbe99
SH
3123 napi_disable(&hw->napi);
3124 sky2_write32(hw, B0_IMSK, 0);
81906791
SH
3125 sky2_reset(hw);
3126 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
6de16237 3127 napi_enable(&hw->napi);
81906791 3128
af18d8b8
SH
3129 for (i = 0; i < hw->ports; i++)
3130 sky2_reattach(hw->dev[i]);
81906791 3131
81906791
SH
3132 rtnl_unlock();
3133}
3134
e3173832
SH
3135static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3136{
3137 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3138}
3139
3140static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3141{
3142 const struct sky2_port *sky2 = netdev_priv(dev);
3143
3144 wol->supported = sky2_wol_supported(sky2->hw);
3145 wol->wolopts = sky2->wol;
3146}
3147
3148static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3149{
3150 struct sky2_port *sky2 = netdev_priv(dev);
3151 struct sky2_hw *hw = sky2->hw;
cd28ab6a 3152
9d731d77
RW
3153 if ((wol->wolopts & ~sky2_wol_supported(sky2->hw))
3154 || !device_can_wakeup(&hw->pdev->dev))
e3173832
SH
3155 return -EOPNOTSUPP;
3156
3157 sky2->wol = wol->wolopts;
3158
05745c4a
SH
3159 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3160 hw->chip_id == CHIP_ID_YUKON_EX ||
3161 hw->chip_id == CHIP_ID_YUKON_FE_P)
e3173832
SH
3162 sky2_write32(hw, B0_CTST, sky2->wol
3163 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3164
9d731d77
RW
3165 device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
3166
e3173832
SH
3167 if (!netif_running(dev))
3168 sky2_wol_init(sky2);
cd28ab6a
SH
3169 return 0;
3170}
3171
28bd181a 3172static u32 sky2_supported_modes(const struct sky2_hw *hw)
cd28ab6a 3173{
b89165f2
SH
3174 if (sky2_is_copper(hw)) {
3175 u32 modes = SUPPORTED_10baseT_Half
3176 | SUPPORTED_10baseT_Full
3177 | SUPPORTED_100baseT_Half
3178 | SUPPORTED_100baseT_Full
3179 | SUPPORTED_Autoneg | SUPPORTED_TP;
cd28ab6a 3180
ea76e635 3181 if (hw->flags & SKY2_HW_GIGABIT)
cd28ab6a 3182 modes |= SUPPORTED_1000baseT_Half
b89165f2
SH
3183 | SUPPORTED_1000baseT_Full;
3184 return modes;
cd28ab6a 3185 } else
b89165f2
SH
3186 return SUPPORTED_1000baseT_Half
3187 | SUPPORTED_1000baseT_Full
3188 | SUPPORTED_Autoneg
3189 | SUPPORTED_FIBRE;
cd28ab6a
SH
3190}
3191
793b883e 3192static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
cd28ab6a
SH
3193{
3194 struct sky2_port *sky2 = netdev_priv(dev);
3195 struct sky2_hw *hw = sky2->hw;
3196
3197 ecmd->transceiver = XCVR_INTERNAL;
3198 ecmd->supported = sky2_supported_modes(hw);
3199 ecmd->phy_address = PHY_ADDR_MARV;
b89165f2 3200 if (sky2_is_copper(hw)) {
cd28ab6a 3201 ecmd->port = PORT_TP;
b89165f2
SH
3202 ecmd->speed = sky2->speed;
3203 } else {
3204 ecmd->speed = SPEED_1000;
cd28ab6a 3205 ecmd->port = PORT_FIBRE;
b89165f2 3206 }
cd28ab6a
SH
3207
3208 ecmd->advertising = sky2->advertising;
0ea065e5
SH
3209 ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
3210 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
cd28ab6a
SH
3211 ecmd->duplex = sky2->duplex;
3212 return 0;
3213}
3214
3215static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3216{
3217 struct sky2_port *sky2 = netdev_priv(dev);
3218 const struct sky2_hw *hw = sky2->hw;
3219 u32 supported = sky2_supported_modes(hw);
3220
3221 if (ecmd->autoneg == AUTONEG_ENABLE) {
0ea065e5 3222 sky2->flags |= SKY2_FLAG_AUTO_SPEED;
cd28ab6a
SH
3223 ecmd->advertising = supported;
3224 sky2->duplex = -1;
3225 sky2->speed = -1;
3226 } else {
3227 u32 setting;
3228
793b883e 3229 switch (ecmd->speed) {
cd28ab6a
SH
3230 case SPEED_1000:
3231 if (ecmd->duplex == DUPLEX_FULL)
3232 setting = SUPPORTED_1000baseT_Full;
3233 else if (ecmd->duplex == DUPLEX_HALF)
3234 setting = SUPPORTED_1000baseT_Half;
3235 else
3236 return -EINVAL;
3237 break;
3238 case SPEED_100:
3239 if (ecmd->duplex == DUPLEX_FULL)
3240 setting = SUPPORTED_100baseT_Full;
3241 else if (ecmd->duplex == DUPLEX_HALF)
3242 setting = SUPPORTED_100baseT_Half;
3243 else
3244 return -EINVAL;
3245 break;
3246
3247 case SPEED_10:
3248 if (ecmd->duplex == DUPLEX_FULL)
3249 setting = SUPPORTED_10baseT_Full;
3250 else if (ecmd->duplex == DUPLEX_HALF)
3251 setting = SUPPORTED_10baseT_Half;
3252 else
3253 return -EINVAL;
3254 break;
3255 default:
3256 return -EINVAL;
3257 }
3258
3259 if ((setting & supported) == 0)
3260 return -EINVAL;
3261
3262 sky2->speed = ecmd->speed;
3263 sky2->duplex = ecmd->duplex;
0ea065e5 3264 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
cd28ab6a
SH
3265 }
3266
cd28ab6a
SH
3267 sky2->advertising = ecmd->advertising;
3268
d1b139c0 3269 if (netif_running(dev)) {
1b537565 3270 sky2_phy_reinit(sky2);
d1b139c0
SH
3271 sky2_set_multicast(dev);
3272 }
cd28ab6a
SH
3273
3274 return 0;
3275}
3276
3277static void sky2_get_drvinfo(struct net_device *dev,
3278 struct ethtool_drvinfo *info)
3279{
3280 struct sky2_port *sky2 = netdev_priv(dev);
3281
3282 strcpy(info->driver, DRV_NAME);
3283 strcpy(info->version, DRV_VERSION);
3284 strcpy(info->fw_version, "N/A");
3285 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
3286}
3287
3288static const struct sky2_stat {
793b883e
SH
3289 char name[ETH_GSTRING_LEN];
3290 u16 offset;
cd28ab6a
SH
3291} sky2_stats[] = {
3292 { "tx_bytes", GM_TXO_OK_HI },
3293 { "rx_bytes", GM_RXO_OK_HI },
3294 { "tx_broadcast", GM_TXF_BC_OK },
3295 { "rx_broadcast", GM_RXF_BC_OK },
3296 { "tx_multicast", GM_TXF_MC_OK },
3297 { "rx_multicast", GM_RXF_MC_OK },
3298 { "tx_unicast", GM_TXF_UC_OK },
3299 { "rx_unicast", GM_RXF_UC_OK },
3300 { "tx_mac_pause", GM_TXF_MPAUSE },
3301 { "rx_mac_pause", GM_RXF_MPAUSE },
eadfa7dd 3302 { "collisions", GM_TXF_COL },
cd28ab6a
SH
3303 { "late_collision",GM_TXF_LAT_COL },
3304 { "aborted", GM_TXF_ABO_COL },
eadfa7dd 3305 { "single_collisions", GM_TXF_SNG_COL },
cd28ab6a 3306 { "multi_collisions", GM_TXF_MUL_COL },
eadfa7dd 3307
d2604540 3308 { "rx_short", GM_RXF_SHT },
cd28ab6a 3309 { "rx_runt", GM_RXE_FRAG },
eadfa7dd
SH
3310 { "rx_64_byte_packets", GM_RXF_64B },
3311 { "rx_65_to_127_byte_packets", GM_RXF_127B },
3312 { "rx_128_to_255_byte_packets", GM_RXF_255B },
3313 { "rx_256_to_511_byte_packets", GM_RXF_511B },
3314 { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3315 { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3316 { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
cd28ab6a 3317 { "rx_too_long", GM_RXF_LNG_ERR },
eadfa7dd
SH
3318 { "rx_fifo_overflow", GM_RXE_FIFO_OV },
3319 { "rx_jabber", GM_RXF_JAB_PKT },
cd28ab6a 3320 { "rx_fcs_error", GM_RXF_FCS_ERR },
eadfa7dd
SH
3321
3322 { "tx_64_byte_packets", GM_TXF_64B },
3323 { "tx_65_to_127_byte_packets", GM_TXF_127B },
3324 { "tx_128_to_255_byte_packets", GM_TXF_255B },
3325 { "tx_256_to_511_byte_packets", GM_TXF_511B },
3326 { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3327 { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3328 { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3329 { "tx_fifo_underrun", GM_TXE_FIFO_UR },
cd28ab6a
SH
3330};
3331
cd28ab6a
SH
3332static u32 sky2_get_rx_csum(struct net_device *dev)
3333{
3334 struct sky2_port *sky2 = netdev_priv(dev);
3335
0ea065e5 3336 return !!(sky2->flags & SKY2_FLAG_RX_CHECKSUM);
cd28ab6a
SH
3337}
3338
3339static int sky2_set_rx_csum(struct net_device *dev, u32 data)
3340{
3341 struct sky2_port *sky2 = netdev_priv(dev);
3342
0ea065e5
SH
3343 if (data)
3344 sky2->flags |= SKY2_FLAG_RX_CHECKSUM;
3345 else
3346 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
793b883e 3347
cd28ab6a
SH
3348 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
3349 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
3350
3351 return 0;
3352}
3353
3354static u32 sky2_get_msglevel(struct net_device *netdev)
3355{
3356 struct sky2_port *sky2 = netdev_priv(netdev);
3357 return sky2->msg_enable;
3358}
3359
9a7ae0a9
SH
3360static int sky2_nway_reset(struct net_device *dev)
3361{
3362 struct sky2_port *sky2 = netdev_priv(dev);
9a7ae0a9 3363
0ea065e5 3364 if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
9a7ae0a9
SH
3365 return -EINVAL;
3366
1b537565 3367 sky2_phy_reinit(sky2);
d1b139c0 3368 sky2_set_multicast(dev);
9a7ae0a9
SH
3369
3370 return 0;
3371}
3372
793b883e 3373static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
cd28ab6a
SH
3374{
3375 struct sky2_hw *hw = sky2->hw;
3376 unsigned port = sky2->port;
3377 int i;
3378
3379 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
793b883e 3380 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
cd28ab6a 3381 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
793b883e 3382 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
cd28ab6a 3383
793b883e 3384 for (i = 2; i < count; i++)
cd28ab6a
SH
3385 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
3386}
3387
cd28ab6a
SH
3388static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3389{
3390 struct sky2_port *sky2 = netdev_priv(netdev);
3391 sky2->msg_enable = value;
3392}
3393
b9f2c044 3394static int sky2_get_sset_count(struct net_device *dev, int sset)
cd28ab6a 3395{
b9f2c044
JG
3396 switch (sset) {
3397 case ETH_SS_STATS:
3398 return ARRAY_SIZE(sky2_stats);
3399 default:
3400 return -EOPNOTSUPP;
3401 }
cd28ab6a
SH
3402}
3403
3404static void sky2_get_ethtool_stats(struct net_device *dev,
793b883e 3405 struct ethtool_stats *stats, u64 * data)
cd28ab6a
SH
3406{
3407 struct sky2_port *sky2 = netdev_priv(dev);
3408
793b883e 3409 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
cd28ab6a
SH
3410}
3411
793b883e 3412static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
cd28ab6a
SH
3413{
3414 int i;
3415
3416 switch (stringset) {
3417 case ETH_SS_STATS:
3418 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3419 memcpy(data + i * ETH_GSTRING_LEN,
3420 sky2_stats[i].name, ETH_GSTRING_LEN);
3421 break;
3422 }
3423}
3424
cd28ab6a
SH
3425static int sky2_set_mac_address(struct net_device *dev, void *p)
3426{
3427 struct sky2_port *sky2 = netdev_priv(dev);
a8ab1ec0
SH
3428 struct sky2_hw *hw = sky2->hw;
3429 unsigned port = sky2->port;
3430 const struct sockaddr *addr = p;
cd28ab6a
SH
3431
3432 if (!is_valid_ether_addr(addr->sa_data))
3433 return -EADDRNOTAVAIL;
3434
cd28ab6a 3435 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
a8ab1ec0 3436 memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
cd28ab6a 3437 dev->dev_addr, ETH_ALEN);
a8ab1ec0 3438 memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
cd28ab6a 3439 dev->dev_addr, ETH_ALEN);
1b537565 3440
a8ab1ec0
SH
3441 /* virtual address for data */
3442 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3443
3444 /* physical address: used for pause frames */
3445 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
1b537565
SH
3446
3447 return 0;
cd28ab6a
SH
3448}
3449
a052b52f
SH
3450static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
3451{
3452 u32 bit;
3453
3454 bit = ether_crc(ETH_ALEN, addr) & 63;
3455 filter[bit >> 3] |= 1 << (bit & 7);
3456}
3457
cd28ab6a
SH
3458static void sky2_set_multicast(struct net_device *dev)
3459{
3460 struct sky2_port *sky2 = netdev_priv(dev);
3461 struct sky2_hw *hw = sky2->hw;
3462 unsigned port = sky2->port;
3463 struct dev_mc_list *list = dev->mc_list;
3464 u16 reg;
3465 u8 filter[8];
a052b52f
SH
3466 int rx_pause;
3467 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
cd28ab6a 3468
a052b52f 3469 rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
cd28ab6a
SH
3470 memset(filter, 0, sizeof(filter));
3471
3472 reg = gma_read16(hw, port, GM_RX_CTRL);
3473 reg |= GM_RXCR_UCF_ENA;
3474
d571b694 3475 if (dev->flags & IFF_PROMISC) /* promiscuous */
cd28ab6a 3476 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
a052b52f 3477 else if (dev->flags & IFF_ALLMULTI)
cd28ab6a 3478 memset(filter, 0xff, sizeof(filter));
a052b52f 3479 else if (dev->mc_count == 0 && !rx_pause)
cd28ab6a
SH
3480 reg &= ~GM_RXCR_MCF_ENA;
3481 else {
3482 int i;
3483 reg |= GM_RXCR_MCF_ENA;
3484
a052b52f
SH
3485 if (rx_pause)
3486 sky2_add_filter(filter, pause_mc_addr);
3487
3488 for (i = 0; list && i < dev->mc_count; i++, list = list->next)
3489 sky2_add_filter(filter, list->dmi_addr);
cd28ab6a
SH
3490 }
3491
cd28ab6a 3492 gma_write16(hw, port, GM_MC_ADDR_H1,
793b883e 3493 (u16) filter[0] | ((u16) filter[1] << 8));
cd28ab6a 3494 gma_write16(hw, port, GM_MC_ADDR_H2,
793b883e 3495 (u16) filter[2] | ((u16) filter[3] << 8));
cd28ab6a 3496 gma_write16(hw, port, GM_MC_ADDR_H3,
793b883e 3497 (u16) filter[4] | ((u16) filter[5] << 8));
cd28ab6a 3498 gma_write16(hw, port, GM_MC_ADDR_H4,
793b883e 3499 (u16) filter[6] | ((u16) filter[7] << 8));
cd28ab6a
SH
3500
3501 gma_write16(hw, port, GM_RX_CTRL, reg);
3502}
3503
3504/* Can have one global because blinking is controlled by
3505 * ethtool and that is always under RTNL mutex
3506 */
a84d0a3d 3507static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
cd28ab6a 3508{
a84d0a3d
SH
3509 struct sky2_hw *hw = sky2->hw;
3510 unsigned port = sky2->port;
793b883e 3511
a84d0a3d
SH
3512 spin_lock_bh(&sky2->phy_lock);
3513 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3514 hw->chip_id == CHIP_ID_YUKON_EX ||
3515 hw->chip_id == CHIP_ID_YUKON_SUPR) {
3516 u16 pg;
793b883e
SH
3517 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3518 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
793b883e 3519
a84d0a3d
SH
3520 switch (mode) {
3521 case MO_LED_OFF:
3522 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3523 PHY_M_LEDC_LOS_CTRL(8) |
3524 PHY_M_LEDC_INIT_CTRL(8) |
3525 PHY_M_LEDC_STA1_CTRL(8) |
3526 PHY_M_LEDC_STA0_CTRL(8));
3527 break;
3528 case MO_LED_ON:
3529 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3530 PHY_M_LEDC_LOS_CTRL(9) |
3531 PHY_M_LEDC_INIT_CTRL(9) |
3532 PHY_M_LEDC_STA1_CTRL(9) |
3533 PHY_M_LEDC_STA0_CTRL(9));
3534 break;
3535 case MO_LED_BLINK:
3536 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3537 PHY_M_LEDC_LOS_CTRL(0xa) |
3538 PHY_M_LEDC_INIT_CTRL(0xa) |
3539 PHY_M_LEDC_STA1_CTRL(0xa) |
3540 PHY_M_LEDC_STA0_CTRL(0xa));
3541 break;
3542 case MO_LED_NORM:
3543 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3544 PHY_M_LEDC_LOS_CTRL(1) |
3545 PHY_M_LEDC_INIT_CTRL(8) |
3546 PHY_M_LEDC_STA1_CTRL(7) |
3547 PHY_M_LEDC_STA0_CTRL(7));
3548 }
793b883e 3549
a84d0a3d
SH
3550 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3551 } else
7d2e3cb7 3552 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
a84d0a3d
SH
3553 PHY_M_LED_MO_DUP(mode) |
3554 PHY_M_LED_MO_10(mode) |
3555 PHY_M_LED_MO_100(mode) |
3556 PHY_M_LED_MO_1000(mode) |
3557 PHY_M_LED_MO_RX(mode) |
3558 PHY_M_LED_MO_TX(mode));
3559
3560 spin_unlock_bh(&sky2->phy_lock);
cd28ab6a
SH
3561}
3562
3563/* blink LED's for finding board */
3564static int sky2_phys_id(struct net_device *dev, u32 data)
3565{
3566 struct sky2_port *sky2 = netdev_priv(dev);
a84d0a3d 3567 unsigned int i;
cd28ab6a 3568
a84d0a3d
SH
3569 if (data == 0)
3570 data = UINT_MAX;
cd28ab6a 3571
a84d0a3d
SH
3572 for (i = 0; i < data; i++) {
3573 sky2_led(sky2, MO_LED_ON);
3574 if (msleep_interruptible(500))
3575 break;
3576 sky2_led(sky2, MO_LED_OFF);
3577 if (msleep_interruptible(500))
3578 break;
793b883e 3579 }
a84d0a3d 3580 sky2_led(sky2, MO_LED_NORM);
cd28ab6a
SH
3581
3582 return 0;
3583}
3584
3585static void sky2_get_pauseparam(struct net_device *dev,
3586 struct ethtool_pauseparam *ecmd)
3587{
3588 struct sky2_port *sky2 = netdev_priv(dev);
3589
16ad91e1
SH
3590 switch (sky2->flow_mode) {
3591 case FC_NONE:
3592 ecmd->tx_pause = ecmd->rx_pause = 0;
3593 break;
3594 case FC_TX:
3595 ecmd->tx_pause = 1, ecmd->rx_pause = 0;
3596 break;
3597 case FC_RX:
3598 ecmd->tx_pause = 0, ecmd->rx_pause = 1;
3599 break;
3600 case FC_BOTH:
3601 ecmd->tx_pause = ecmd->rx_pause = 1;
3602 }
3603
0ea065e5
SH
3604 ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
3605 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
cd28ab6a
SH
3606}
3607
3608static int sky2_set_pauseparam(struct net_device *dev,
3609 struct ethtool_pauseparam *ecmd)
3610{
3611 struct sky2_port *sky2 = netdev_priv(dev);
cd28ab6a 3612
0ea065e5
SH
3613 if (ecmd->autoneg == AUTONEG_ENABLE)
3614 sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
3615 else
3616 sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
3617
16ad91e1 3618 sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
cd28ab6a 3619
16ad91e1
SH
3620 if (netif_running(dev))
3621 sky2_phy_reinit(sky2);
cd28ab6a 3622
2eaba1a2 3623 return 0;
cd28ab6a
SH
3624}
3625
fb17358f
SH
3626static int sky2_get_coalesce(struct net_device *dev,
3627 struct ethtool_coalesce *ecmd)
3628{
3629 struct sky2_port *sky2 = netdev_priv(dev);
3630 struct sky2_hw *hw = sky2->hw;
3631
3632 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
3633 ecmd->tx_coalesce_usecs = 0;
3634 else {
3635 u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
3636 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
3637 }
3638 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
3639
3640 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
3641 ecmd->rx_coalesce_usecs = 0;
3642 else {
3643 u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
3644 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
3645 }
3646 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
3647
3648 if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
3649 ecmd->rx_coalesce_usecs_irq = 0;
3650 else {
3651 u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
3652 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
3653 }
3654
3655 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
3656
3657 return 0;
3658}
3659
3660/* Note: this affect both ports */
3661static int sky2_set_coalesce(struct net_device *dev,
3662 struct ethtool_coalesce *ecmd)
3663{
3664 struct sky2_port *sky2 = netdev_priv(dev);
3665 struct sky2_hw *hw = sky2->hw;
77b3d6a2 3666 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
fb17358f 3667
77b3d6a2
SH
3668 if (ecmd->tx_coalesce_usecs > tmax ||
3669 ecmd->rx_coalesce_usecs > tmax ||
3670 ecmd->rx_coalesce_usecs_irq > tmax)
fb17358f
SH
3671 return -EINVAL;
3672
ee5f68fe 3673 if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
fb17358f 3674 return -EINVAL;
ff81fbbe 3675 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
fb17358f 3676 return -EINVAL;
ff81fbbe 3677 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
fb17358f
SH
3678 return -EINVAL;
3679
3680 if (ecmd->tx_coalesce_usecs == 0)
3681 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
3682 else {
3683 sky2_write32(hw, STAT_TX_TIMER_INI,
3684 sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
3685 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3686 }
3687 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
3688
3689 if (ecmd->rx_coalesce_usecs == 0)
3690 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
3691 else {
3692 sky2_write32(hw, STAT_LEV_TIMER_INI,
3693 sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
3694 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3695 }
3696 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
3697
3698 if (ecmd->rx_coalesce_usecs_irq == 0)
3699 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
3700 else {
d28d4870 3701 sky2_write32(hw, STAT_ISR_TIMER_INI,
fb17358f
SH
3702 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
3703 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3704 }
3705 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
3706 return 0;
3707}
3708
793b883e
SH
3709static void sky2_get_ringparam(struct net_device *dev,
3710 struct ethtool_ringparam *ering)
3711{
3712 struct sky2_port *sky2 = netdev_priv(dev);
3713
3714 ering->rx_max_pending = RX_MAX_PENDING;
3715 ering->rx_mini_max_pending = 0;
3716 ering->rx_jumbo_max_pending = 0;
ee5f68fe 3717 ering->tx_max_pending = TX_MAX_PENDING;
793b883e
SH
3718
3719 ering->rx_pending = sky2->rx_pending;
3720 ering->rx_mini_pending = 0;
3721 ering->rx_jumbo_pending = 0;
3722 ering->tx_pending = sky2->tx_pending;
3723}
3724
3725static int sky2_set_ringparam(struct net_device *dev,
3726 struct ethtool_ringparam *ering)
3727{
3728 struct sky2_port *sky2 = netdev_priv(dev);
793b883e
SH
3729
3730 if (ering->rx_pending > RX_MAX_PENDING ||
3731 ering->rx_pending < 8 ||
ee5f68fe
SH
3732 ering->tx_pending < TX_MIN_PENDING ||
3733 ering->tx_pending > TX_MAX_PENDING)
793b883e
SH
3734 return -EINVAL;
3735
af18d8b8 3736 sky2_detach(dev);
793b883e
SH
3737
3738 sky2->rx_pending = ering->rx_pending;
3739 sky2->tx_pending = ering->tx_pending;
ee5f68fe 3740 sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1);
793b883e 3741
af18d8b8 3742 return sky2_reattach(dev);
793b883e
SH
3743}
3744
793b883e
SH
3745static int sky2_get_regs_len(struct net_device *dev)
3746{
6e4cbb34 3747 return 0x4000;
793b883e
SH
3748}
3749
3750/*
3751 * Returns copy of control register region
3ead5db7 3752 * Note: ethtool_get_regs always provides full size (16k) buffer
793b883e
SH
3753 */
3754static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3755 void *p)
3756{
3757 const struct sky2_port *sky2 = netdev_priv(dev);
793b883e 3758 const void __iomem *io = sky2->hw->regs;
295b54c4 3759 unsigned int b;
793b883e
SH
3760
3761 regs->version = 1;
793b883e 3762
295b54c4
SH
3763 for (b = 0; b < 128; b++) {
3764 /* This complicated switch statement is to make sure and
3765 * only access regions that are unreserved.
3766 * Some blocks are only valid on dual port cards.
3767 * and block 3 has some special diagnostic registers that
3768 * are poison.
3769 */
3770 switch (b) {
3771 case 3:
3772 /* skip diagnostic ram region */
3773 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3774 break;
3ead5db7 3775
295b54c4
SH
3776 /* dual port cards only */
3777 case 5: /* Tx Arbiter 2 */
3778 case 9: /* RX2 */
3779 case 14 ... 15: /* TX2 */
3780 case 17: case 19: /* Ram Buffer 2 */
3781 case 22 ... 23: /* Tx Ram Buffer 2 */
3782 case 25: /* Rx MAC Fifo 1 */
3783 case 27: /* Tx MAC Fifo 2 */
3784 case 31: /* GPHY 2 */
3785 case 40 ... 47: /* Pattern Ram 2 */
3786 case 52: case 54: /* TCP Segmentation 2 */
3787 case 112 ... 116: /* GMAC 2 */
3788 if (sky2->hw->ports == 1)
3789 goto reserved;
3790 /* fall through */
3791 case 0: /* Control */
3792 case 2: /* Mac address */
3793 case 4: /* Tx Arbiter 1 */
3794 case 7: /* PCI express reg */
3795 case 8: /* RX1 */
3796 case 12 ... 13: /* TX1 */
3797 case 16: case 18:/* Rx Ram Buffer 1 */
3798 case 20 ... 21: /* Tx Ram Buffer 1 */
3799 case 24: /* Rx MAC Fifo 1 */
3800 case 26: /* Tx MAC Fifo 1 */
3801 case 28 ... 29: /* Descriptor and status unit */
3802 case 30: /* GPHY 1*/
3803 case 32 ... 39: /* Pattern Ram 1 */
3804 case 48: case 50: /* TCP Segmentation 1 */
3805 case 56 ... 60: /* PCI space */
3806 case 80 ... 84: /* GMAC 1 */
3807 memcpy_fromio(p, io, 128);
3808 break;
3809 default:
3810reserved:
3811 memset(p, 0, 128);
3812 }
3ead5db7 3813
295b54c4
SH
3814 p += 128;
3815 io += 128;
3816 }
793b883e 3817}
cd28ab6a 3818
b628ed98
SH
3819/* In order to do Jumbo packets on these chips, need to turn off the
3820 * transmit store/forward. Therefore checksum offload won't work.
3821 */
3822static int no_tx_offload(struct net_device *dev)
3823{
3824 const struct sky2_port *sky2 = netdev_priv(dev);
3825 const struct sky2_hw *hw = sky2->hw;
3826
69161611 3827 return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
b628ed98
SH
3828}
3829
3830static int sky2_set_tx_csum(struct net_device *dev, u32 data)
3831{
3832 if (data && no_tx_offload(dev))
3833 return -EINVAL;
3834
3835 return ethtool_op_set_tx_csum(dev, data);
3836}
3837
3838
3839static int sky2_set_tso(struct net_device *dev, u32 data)
3840{
3841 if (data && no_tx_offload(dev))
3842 return -EINVAL;
3843
3844 return ethtool_op_set_tso(dev, data);
3845}
3846
f4331a6d
SH
3847static int sky2_get_eeprom_len(struct net_device *dev)
3848{
3849 struct sky2_port *sky2 = netdev_priv(dev);
b32f40c4 3850 struct sky2_hw *hw = sky2->hw;
f4331a6d
SH
3851 u16 reg2;
3852
b32f40c4 3853 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
f4331a6d
SH
3854 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3855}
3856
1413235c 3857static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
f4331a6d 3858{
1413235c 3859 unsigned long start = jiffies;
f4331a6d 3860
1413235c
SH
3861 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
3862 /* Can take up to 10.6 ms for write */
3863 if (time_after(jiffies, start + HZ/4)) {
3864 dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
3865 return -ETIMEDOUT;
3866 }
3867 mdelay(1);
3868 }
167f53d0 3869
1413235c
SH
3870 return 0;
3871}
167f53d0 3872
1413235c
SH
3873static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
3874 u16 offset, size_t length)
3875{
3876 int rc = 0;
3877
3878 while (length > 0) {
3879 u32 val;
3880
3881 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
3882 rc = sky2_vpd_wait(hw, cap, 0);
3883 if (rc)
3884 break;
3885
3886 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
3887
3888 memcpy(data, &val, min(sizeof(val), length));
3889 offset += sizeof(u32);
3890 data += sizeof(u32);
3891 length -= sizeof(u32);
3892 }
3893
3894 return rc;
f4331a6d
SH
3895}
3896
1413235c
SH
3897static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
3898 u16 offset, unsigned int length)
f4331a6d 3899{
1413235c
SH
3900 unsigned int i;
3901 int rc = 0;
3902
3903 for (i = 0; i < length; i += sizeof(u32)) {
3904 u32 val = *(u32 *)(data + i);
3905
3906 sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
3907 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
3908
3909 rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
3910 if (rc)
3911 break;
3912 }
3913 return rc;
f4331a6d
SH
3914}
3915
3916static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3917 u8 *data)
3918{
3919 struct sky2_port *sky2 = netdev_priv(dev);
3920 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
f4331a6d
SH
3921
3922 if (!cap)
3923 return -EINVAL;
3924
3925 eeprom->magic = SKY2_EEPROM_MAGIC;
3926
1413235c 3927 return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
f4331a6d
SH
3928}
3929
3930static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3931 u8 *data)
3932{
3933 struct sky2_port *sky2 = netdev_priv(dev);
3934 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
f4331a6d
SH
3935
3936 if (!cap)
3937 return -EINVAL;
3938
3939 if (eeprom->magic != SKY2_EEPROM_MAGIC)
3940 return -EINVAL;
3941
1413235c
SH
3942 /* Partial writes not supported */
3943 if ((eeprom->offset & 3) || (eeprom->len & 3))
3944 return -EINVAL;
f4331a6d 3945
1413235c 3946 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
f4331a6d
SH
3947}
3948
3949
7282d491 3950static const struct ethtool_ops sky2_ethtool_ops = {
f4331a6d
SH
3951 .get_settings = sky2_get_settings,
3952 .set_settings = sky2_set_settings,
3953 .get_drvinfo = sky2_get_drvinfo,
3954 .get_wol = sky2_get_wol,
3955 .set_wol = sky2_set_wol,
3956 .get_msglevel = sky2_get_msglevel,
3957 .set_msglevel = sky2_set_msglevel,
3958 .nway_reset = sky2_nway_reset,
3959 .get_regs_len = sky2_get_regs_len,
3960 .get_regs = sky2_get_regs,
3961 .get_link = ethtool_op_get_link,
3962 .get_eeprom_len = sky2_get_eeprom_len,
3963 .get_eeprom = sky2_get_eeprom,
3964 .set_eeprom = sky2_set_eeprom,
f4331a6d 3965 .set_sg = ethtool_op_set_sg,
f4331a6d 3966 .set_tx_csum = sky2_set_tx_csum,
f4331a6d
SH
3967 .set_tso = sky2_set_tso,
3968 .get_rx_csum = sky2_get_rx_csum,
3969 .set_rx_csum = sky2_set_rx_csum,
3970 .get_strings = sky2_get_strings,
3971 .get_coalesce = sky2_get_coalesce,
3972 .set_coalesce = sky2_set_coalesce,
3973 .get_ringparam = sky2_get_ringparam,
3974 .set_ringparam = sky2_set_ringparam,
cd28ab6a
SH
3975 .get_pauseparam = sky2_get_pauseparam,
3976 .set_pauseparam = sky2_set_pauseparam,
f4331a6d 3977 .phys_id = sky2_phys_id,
b9f2c044 3978 .get_sset_count = sky2_get_sset_count,
cd28ab6a
SH
3979 .get_ethtool_stats = sky2_get_ethtool_stats,
3980};
3981
3cf26753
SH
3982#ifdef CONFIG_SKY2_DEBUG
3983
3984static struct dentry *sky2_debug;
3985
e4c2abe2
SH
3986
3987/*
3988 * Read and parse the first part of Vital Product Data
3989 */
3990#define VPD_SIZE 128
3991#define VPD_MAGIC 0x82
3992
3993static const struct vpd_tag {
3994 char tag[2];
3995 char *label;
3996} vpd_tags[] = {
3997 { "PN", "Part Number" },
3998 { "EC", "Engineering Level" },
3999 { "MN", "Manufacturer" },
4000 { "SN", "Serial Number" },
4001 { "YA", "Asset Tag" },
4002 { "VL", "First Error Log Message" },
4003 { "VF", "Second Error Log Message" },
4004 { "VB", "Boot Agent ROM Configuration" },
4005 { "VE", "EFI UNDI Configuration" },
4006};
4007
4008static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
4009{
4010 size_t vpd_size;
4011 loff_t offs;
4012 u8 len;
4013 unsigned char *buf;
4014 u16 reg2;
4015
4016 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
4017 vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
4018
4019 seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
4020 buf = kmalloc(vpd_size, GFP_KERNEL);
4021 if (!buf) {
4022 seq_puts(seq, "no memory!\n");
4023 return;
4024 }
4025
4026 if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
4027 seq_puts(seq, "VPD read failed\n");
4028 goto out;
4029 }
4030
4031 if (buf[0] != VPD_MAGIC) {
4032 seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
4033 goto out;
4034 }
4035 len = buf[1];
4036 if (len == 0 || len > vpd_size - 4) {
4037 seq_printf(seq, "Invalid id length: %d\n", len);
4038 goto out;
4039 }
4040
4041 seq_printf(seq, "%.*s\n", len, buf + 3);
4042 offs = len + 3;
4043
4044 while (offs < vpd_size - 4) {
4045 int i;
4046
4047 if (!memcmp("RW", buf + offs, 2)) /* end marker */
4048 break;
4049 len = buf[offs + 2];
4050 if (offs + len + 3 >= vpd_size)
4051 break;
4052
4053 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4054 if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
4055 seq_printf(seq, " %s: %.*s\n",
4056 vpd_tags[i].label, len, buf + offs + 3);
4057 break;
4058 }
4059 }
4060 offs += len + 3;
4061 }
4062out:
4063 kfree(buf);
4064}
4065
3cf26753
SH
4066static int sky2_debug_show(struct seq_file *seq, void *v)
4067{
4068 struct net_device *dev = seq->private;
4069 const struct sky2_port *sky2 = netdev_priv(dev);
bea3348e 4070 struct sky2_hw *hw = sky2->hw;
3cf26753
SH
4071 unsigned port = sky2->port;
4072 unsigned idx, last;
4073 int sop;
4074
e4c2abe2 4075 sky2_show_vpd(seq, hw);
3cf26753 4076
e4c2abe2 4077 seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
3cf26753
SH
4078 sky2_read32(hw, B0_ISRC),
4079 sky2_read32(hw, B0_IMSK),
4080 sky2_read32(hw, B0_Y2_SP_ICR));
4081
e4c2abe2
SH
4082 if (!netif_running(dev)) {
4083 seq_printf(seq, "network not running\n");
4084 return 0;
4085 }
4086
bea3348e 4087 napi_disable(&hw->napi);
3cf26753
SH
4088 last = sky2_read16(hw, STAT_PUT_IDX);
4089
4090 if (hw->st_idx == last)
4091 seq_puts(seq, "Status ring (empty)\n");
4092 else {
4093 seq_puts(seq, "Status ring\n");
4094 for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
4095 idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
4096 const struct sky2_status_le *le = hw->st_le + idx;
4097 seq_printf(seq, "[%d] %#x %d %#x\n",
4098 idx, le->opcode, le->length, le->status);
4099 }
4100 seq_puts(seq, "\n");
4101 }
4102
4103 seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
4104 sky2->tx_cons, sky2->tx_prod,
4105 sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
4106 sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
4107
4108 /* Dump contents of tx ring */
4109 sop = 1;
ee5f68fe
SH
4110 for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
4111 idx = RING_NEXT(idx, sky2->tx_ring_size)) {
3cf26753
SH
4112 const struct sky2_tx_le *le = sky2->tx_le + idx;
4113 u32 a = le32_to_cpu(le->addr);
4114
4115 if (sop)
4116 seq_printf(seq, "%u:", idx);
4117 sop = 0;
4118
4119 switch(le->opcode & ~HW_OWNER) {
4120 case OP_ADDR64:
4121 seq_printf(seq, " %#x:", a);
4122 break;
4123 case OP_LRGLEN:
4124 seq_printf(seq, " mtu=%d", a);
4125 break;
4126 case OP_VLAN:
4127 seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
4128 break;
4129 case OP_TCPLISW:
4130 seq_printf(seq, " csum=%#x", a);
4131 break;
4132 case OP_LARGESEND:
4133 seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
4134 break;
4135 case OP_PACKET:
4136 seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
4137 break;
4138 case OP_BUFFER:
4139 seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
4140 break;
4141 default:
4142 seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
4143 a, le16_to_cpu(le->length));
4144 }
4145
4146 if (le->ctrl & EOP) {
4147 seq_putc(seq, '\n');
4148 sop = 1;
4149 }
4150 }
4151
4152 seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
4153 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
c409c34b 4154 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
3cf26753
SH
4155 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
4156
d1d08d12 4157 sky2_read32(hw, B0_Y2_SP_LISR);
bea3348e 4158 napi_enable(&hw->napi);
3cf26753
SH
4159 return 0;
4160}
4161
4162static int sky2_debug_open(struct inode *inode, struct file *file)
4163{
4164 return single_open(file, sky2_debug_show, inode->i_private);
4165}
4166
4167static const struct file_operations sky2_debug_fops = {
4168 .owner = THIS_MODULE,
4169 .open = sky2_debug_open,
4170 .read = seq_read,
4171 .llseek = seq_lseek,
4172 .release = single_release,
4173};
4174
4175/*
4176 * Use network device events to create/remove/rename
4177 * debugfs file entries
4178 */
4179static int sky2_device_event(struct notifier_block *unused,
4180 unsigned long event, void *ptr)
4181{
4182 struct net_device *dev = ptr;
5b296bc9 4183 struct sky2_port *sky2 = netdev_priv(dev);
3cf26753 4184
1436b301 4185 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
5b296bc9 4186 return NOTIFY_DONE;
3cf26753 4187
5b296bc9
SH
4188 switch(event) {
4189 case NETDEV_CHANGENAME:
4190 if (sky2->debugfs) {
4191 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
4192 sky2_debug, dev->name);
4193 }
4194 break;
3cf26753 4195
5b296bc9
SH
4196 case NETDEV_GOING_DOWN:
4197 if (sky2->debugfs) {
4198 printk(KERN_DEBUG PFX "%s: remove debugfs\n",
4199 dev->name);
4200 debugfs_remove(sky2->debugfs);
4201 sky2->debugfs = NULL;
3cf26753 4202 }
5b296bc9
SH
4203 break;
4204
4205 case NETDEV_UP:
4206 sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
4207 sky2_debug, dev,
4208 &sky2_debug_fops);
4209 if (IS_ERR(sky2->debugfs))
4210 sky2->debugfs = NULL;
3cf26753
SH
4211 }
4212
4213 return NOTIFY_DONE;
4214}
4215
4216static struct notifier_block sky2_notifier = {
4217 .notifier_call = sky2_device_event,
4218};
4219
4220
4221static __init void sky2_debug_init(void)
4222{
4223 struct dentry *ent;
4224
4225 ent = debugfs_create_dir("sky2", NULL);
4226 if (!ent || IS_ERR(ent))
4227 return;
4228
4229 sky2_debug = ent;
4230 register_netdevice_notifier(&sky2_notifier);
4231}
4232
4233static __exit void sky2_debug_cleanup(void)
4234{
4235 if (sky2_debug) {
4236 unregister_netdevice_notifier(&sky2_notifier);
4237 debugfs_remove(sky2_debug);
4238 sky2_debug = NULL;
4239 }
4240}
4241
4242#else
4243#define sky2_debug_init()
4244#define sky2_debug_cleanup()
4245#endif
4246
1436b301
SH
4247/* Two copies of network device operations to handle special case of
4248 not allowing netpoll on second port */
4249static const struct net_device_ops sky2_netdev_ops[2] = {
4250 {
4251 .ndo_open = sky2_up,
4252 .ndo_stop = sky2_down,
00829823 4253 .ndo_start_xmit = sky2_xmit_frame,
1436b301
SH
4254 .ndo_do_ioctl = sky2_ioctl,
4255 .ndo_validate_addr = eth_validate_addr,
4256 .ndo_set_mac_address = sky2_set_mac_address,
4257 .ndo_set_multicast_list = sky2_set_multicast,
4258 .ndo_change_mtu = sky2_change_mtu,
4259 .ndo_tx_timeout = sky2_tx_timeout,
4260#ifdef SKY2_VLAN_TAG_USED
4261 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4262#endif
4263#ifdef CONFIG_NET_POLL_CONTROLLER
4264 .ndo_poll_controller = sky2_netpoll,
4265#endif
4266 },
4267 {
4268 .ndo_open = sky2_up,
4269 .ndo_stop = sky2_down,
00829823 4270 .ndo_start_xmit = sky2_xmit_frame,
1436b301
SH
4271 .ndo_do_ioctl = sky2_ioctl,
4272 .ndo_validate_addr = eth_validate_addr,
4273 .ndo_set_mac_address = sky2_set_mac_address,
4274 .ndo_set_multicast_list = sky2_set_multicast,
4275 .ndo_change_mtu = sky2_change_mtu,
4276 .ndo_tx_timeout = sky2_tx_timeout,
4277#ifdef SKY2_VLAN_TAG_USED
4278 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4279#endif
4280 },
4281};
3cf26753 4282
cd28ab6a
SH
4283/* Initialize network device */
4284static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
e3173832 4285 unsigned port,
be63a21c 4286 int highmem, int wol)
cd28ab6a
SH
4287{
4288 struct sky2_port *sky2;
4289 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4290
4291 if (!dev) {
898eb71c 4292 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
cd28ab6a
SH
4293 return NULL;
4294 }
4295
cd28ab6a 4296 SET_NETDEV_DEV(dev, &hw->pdev->dev);
ef743d33 4297 dev->irq = hw->pdev->irq;
cd28ab6a 4298 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
cd28ab6a 4299 dev->watchdog_timeo = TX_WATCHDOG;
1436b301 4300 dev->netdev_ops = &sky2_netdev_ops[port];
cd28ab6a
SH
4301
4302 sky2 = netdev_priv(dev);
4303 sky2->netdev = dev;
4304 sky2->hw = hw;
4305 sky2->msg_enable = netif_msg_init(debug, default_msg);
4306
cd28ab6a 4307 /* Auto speed and flow control */
0ea065e5
SH
4308 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
4309 if (hw->chip_id != CHIP_ID_YUKON_XL)
4310 sky2->flags |= SKY2_FLAG_RX_CHECKSUM;
4311
16ad91e1
SH
4312 sky2->flow_mode = FC_BOTH;
4313
cd28ab6a
SH
4314 sky2->duplex = -1;
4315 sky2->speed = -1;
4316 sky2->advertising = sky2_supported_modes(hw);
be63a21c 4317 sky2->wol = wol;
75d070c5 4318
e07b1aa8 4319 spin_lock_init(&sky2->phy_lock);
ee5f68fe 4320
793b883e 4321 sky2->tx_pending = TX_DEF_PENDING;
ee5f68fe 4322 sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1);
290d4de5 4323 sky2->rx_pending = RX_DEF_PENDING;
cd28ab6a
SH
4324
4325 hw->dev[port] = dev;
4326
4327 sky2->port = port;
4328
4a50a876 4329 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
cd28ab6a
SH
4330 if (highmem)
4331 dev->features |= NETIF_F_HIGHDMA;
cd28ab6a 4332
d1f13708 4333#ifdef SKY2_VLAN_TAG_USED
d6c9bc1e
SH
4334 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4335 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4336 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4337 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
d6c9bc1e 4338 }
d1f13708
SH
4339#endif
4340
cd28ab6a 4341 /* read the mac address */
793b883e 4342 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
2995bfb7 4343 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
cd28ab6a 4344
cd28ab6a
SH
4345 return dev;
4346}
4347
28bd181a 4348static void __devinit sky2_show_addr(struct net_device *dev)
cd28ab6a
SH
4349{
4350 const struct sky2_port *sky2 = netdev_priv(dev);
4351
4352 if (netif_msg_probe(sky2))
e174961c
JB
4353 printk(KERN_INFO PFX "%s: addr %pM\n",
4354 dev->name, dev->dev_addr);
cd28ab6a
SH
4355}
4356
fb2690a9 4357/* Handle software interrupt used during MSI test */
7d12e780 4358static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
fb2690a9
SH
4359{
4360 struct sky2_hw *hw = dev_id;
4361 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4362
4363 if (status == 0)
4364 return IRQ_NONE;
4365
4366 if (status & Y2_IS_IRQ_SW) {
ea76e635 4367 hw->flags |= SKY2_HW_USE_MSI;
fb2690a9
SH
4368 wake_up(&hw->msi_wait);
4369 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4370 }
4371 sky2_write32(hw, B0_Y2_SP_ICR, 2);
4372
4373 return IRQ_HANDLED;
4374}
4375
4376/* Test interrupt path by forcing a a software IRQ */
4377static int __devinit sky2_test_msi(struct sky2_hw *hw)
4378{
4379 struct pci_dev *pdev = hw->pdev;
4380 int err;
4381
bb507fe1
SH
4382 init_waitqueue_head (&hw->msi_wait);
4383
fb2690a9
SH
4384 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4385
b0a20ded 4386 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
fb2690a9 4387 if (err) {
b02a9258 4388 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
fb2690a9
SH
4389 return err;
4390 }
4391
fb2690a9 4392 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
bb507fe1 4393 sky2_read8(hw, B0_CTST);
fb2690a9 4394
ea76e635 4395 wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
fb2690a9 4396
ea76e635 4397 if (!(hw->flags & SKY2_HW_USE_MSI)) {
fb2690a9 4398 /* MSI test failed, go back to INTx mode */
b02a9258
SH
4399 dev_info(&pdev->dev, "No interrupt generated using MSI, "
4400 "switching to INTx mode.\n");
fb2690a9
SH
4401
4402 err = -EOPNOTSUPP;
4403 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4404 }
4405
4406 sky2_write32(hw, B0_IMSK, 0);
2bffc23a 4407 sky2_read32(hw, B0_IMSK);
fb2690a9
SH
4408
4409 free_irq(pdev->irq, hw);
4410
4411 return err;
4412}
4413
c7127a34
SH
4414/* This driver supports yukon2 chipset only */
4415static const char *sky2_name(u8 chipid, char *buf, int sz)
4416{
4417 const char *name[] = {
4418 "XL", /* 0xb3 */
4419 "EC Ultra", /* 0xb4 */
4420 "Extreme", /* 0xb5 */
4421 "EC", /* 0xb6 */
4422 "FE", /* 0xb7 */
4423 "FE+", /* 0xb8 */
4424 "Supreme", /* 0xb9 */
0ce8b98d 4425 "UL 2", /* 0xba */
c7127a34
SH
4426 };
4427
0ce8b98d 4428 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2)
c7127a34
SH
4429 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4430 else
4431 snprintf(buf, sz, "(chip %#x)", chipid);
4432 return buf;
4433}
4434
cd28ab6a
SH
4435static int __devinit sky2_probe(struct pci_dev *pdev,
4436 const struct pci_device_id *ent)
4437{
7f60c64b 4438 struct net_device *dev;
cd28ab6a 4439 struct sky2_hw *hw;
be63a21c 4440 int err, using_dac = 0, wol_default;
3834507d 4441 u32 reg;
c7127a34 4442 char buf1[16];
cd28ab6a 4443
793b883e
SH
4444 err = pci_enable_device(pdev);
4445 if (err) {
b02a9258 4446 dev_err(&pdev->dev, "cannot enable PCI device\n");
cd28ab6a
SH
4447 goto err_out;
4448 }
4449
6cc90a5a
SH
4450 /* Get configuration information
4451 * Note: only regular PCI config access once to test for HW issues
4452 * other PCI access through shared memory for speed and to
4453 * avoid MMCONFIG problems.
4454 */
4455 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4456 if (err) {
4457 dev_err(&pdev->dev, "PCI read config failed\n");
4458 goto err_out;
4459 }
4460
4461 if (~reg == 0) {
4462 dev_err(&pdev->dev, "PCI configuration read error\n");
4463 goto err_out;
4464 }
4465
793b883e
SH
4466 err = pci_request_regions(pdev, DRV_NAME);
4467 if (err) {
b02a9258 4468 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
44a1d2e5 4469 goto err_out_disable;
cd28ab6a
SH
4470 }
4471
4472 pci_set_master(pdev);
4473
d1f3d4dd 4474 if (sizeof(dma_addr_t) > sizeof(u32) &&
6a35528a 4475 !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
d1f3d4dd 4476 using_dac = 1;
6a35528a 4477 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
d1f3d4dd 4478 if (err < 0) {
b02a9258
SH
4479 dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4480 "for consistent allocations\n");
d1f3d4dd
SH
4481 goto err_out_free_regions;
4482 }
d1f3d4dd 4483 } else {
284901a9 4484 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
cd28ab6a 4485 if (err) {
b02a9258 4486 dev_err(&pdev->dev, "no usable DMA configuration\n");
cd28ab6a
SH
4487 goto err_out_free_regions;
4488 }
4489 }
d1f3d4dd 4490
3834507d
SH
4491
4492#ifdef __BIG_ENDIAN
4493 /* The sk98lin vendor driver uses hardware byte swapping but
4494 * this driver uses software swapping.
4495 */
4496 reg &= ~PCI_REV_DESC;
4497 err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg);
4498 if (err) {
4499 dev_err(&pdev->dev, "PCI write config failed\n");
4500 goto err_out_free_regions;
4501 }
4502#endif
4503
9d731d77 4504 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
be63a21c 4505
cd28ab6a 4506 err = -ENOMEM;
6aad85d6 4507 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
cd28ab6a 4508 if (!hw) {
b02a9258 4509 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
cd28ab6a
SH
4510 goto err_out_free_regions;
4511 }
4512
cd28ab6a 4513 hw->pdev = pdev;
cd28ab6a
SH
4514
4515 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4516 if (!hw->regs) {
b02a9258 4517 dev_err(&pdev->dev, "cannot map device registers\n");
cd28ab6a
SH
4518 goto err_out_free_hw;
4519 }
4520
08c06d8a 4521 /* ring for status responses */
167f53d0 4522 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
08c06d8a
SH
4523 if (!hw->st_le)
4524 goto err_out_iounmap;
4525
e3173832 4526 err = sky2_init(hw);
cd28ab6a 4527 if (err)
793b883e 4528 goto err_out_iounmap;
cd28ab6a 4529
c844d483
SH
4530 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4531 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
cd28ab6a 4532
e3173832
SH
4533 sky2_reset(hw);
4534
be63a21c 4535 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
7f60c64b 4536 if (!dev) {
4537 err = -ENOMEM;
cd28ab6a 4538 goto err_out_free_pci;
7f60c64b 4539 }
cd28ab6a 4540
9fa1b1f3
SH
4541 if (!disable_msi && pci_enable_msi(pdev) == 0) {
4542 err = sky2_test_msi(hw);
4543 if (err == -EOPNOTSUPP)
4544 pci_disable_msi(pdev);
4545 else if (err)
4546 goto err_out_free_netdev;
4547 }
4548
793b883e
SH
4549 err = register_netdev(dev);
4550 if (err) {
b02a9258 4551 dev_err(&pdev->dev, "cannot register net device\n");
cd28ab6a
SH
4552 goto err_out_free_netdev;
4553 }
4554
6de16237
SH
4555 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4556
ea76e635
SH
4557 err = request_irq(pdev->irq, sky2_intr,
4558 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
b0a20ded 4559 dev->name, hw);
9fa1b1f3 4560 if (err) {
b02a9258 4561 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
9fa1b1f3
SH
4562 goto err_out_unregister;
4563 }
4564 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
6de16237 4565 napi_enable(&hw->napi);
9fa1b1f3 4566
cd28ab6a
SH
4567 sky2_show_addr(dev);
4568
7f60c64b 4569 if (hw->ports > 1) {
4570 struct net_device *dev1;
4571
be63a21c 4572 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
b02a9258
SH
4573 if (!dev1)
4574 dev_warn(&pdev->dev, "allocation for second device failed\n");
4575 else if ((err = register_netdev(dev1))) {
4576 dev_warn(&pdev->dev,
4577 "register of second port failed (%d)\n", err);
cd28ab6a
SH
4578 hw->dev[1] = NULL;
4579 free_netdev(dev1);
b02a9258
SH
4580 } else
4581 sky2_show_addr(dev1);
cd28ab6a
SH
4582 }
4583
32c2c300 4584 setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
81906791
SH
4585 INIT_WORK(&hw->restart_work, sky2_restart);
4586
793b883e
SH
4587 pci_set_drvdata(pdev, hw);
4588
cd28ab6a
SH
4589 return 0;
4590
793b883e 4591err_out_unregister:
ea76e635 4592 if (hw->flags & SKY2_HW_USE_MSI)
b0a20ded 4593 pci_disable_msi(pdev);
793b883e 4594 unregister_netdev(dev);
cd28ab6a
SH
4595err_out_free_netdev:
4596 free_netdev(dev);
cd28ab6a 4597err_out_free_pci:
793b883e 4598 sky2_write8(hw, B0_CTST, CS_RST_SET);
167f53d0 4599 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
cd28ab6a
SH
4600err_out_iounmap:
4601 iounmap(hw->regs);
4602err_out_free_hw:
4603 kfree(hw);
4604err_out_free_regions:
4605 pci_release_regions(pdev);
44a1d2e5 4606err_out_disable:
cd28ab6a 4607 pci_disable_device(pdev);
cd28ab6a 4608err_out:
549a68c3 4609 pci_set_drvdata(pdev, NULL);
cd28ab6a
SH
4610 return err;
4611}
4612
4613static void __devexit sky2_remove(struct pci_dev *pdev)
4614{
793b883e 4615 struct sky2_hw *hw = pci_get_drvdata(pdev);
6de16237 4616 int i;
cd28ab6a 4617
793b883e 4618 if (!hw)
cd28ab6a
SH
4619 return;
4620
32c2c300 4621 del_timer_sync(&hw->watchdog_timer);
6de16237 4622 cancel_work_sync(&hw->restart_work);
d27ed387 4623
b877fe28 4624 for (i = hw->ports-1; i >= 0; --i)
6de16237 4625 unregister_netdev(hw->dev[i]);
81906791 4626
d27ed387 4627 sky2_write32(hw, B0_IMSK, 0);
cd28ab6a 4628
ae306cca
SH
4629 sky2_power_aux(hw);
4630
cd28ab6a 4631 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
793b883e 4632 sky2_write8(hw, B0_CTST, CS_RST_SET);
5afa0a9c 4633 sky2_read8(hw, B0_CTST);
cd28ab6a
SH
4634
4635 free_irq(pdev->irq, hw);
ea76e635 4636 if (hw->flags & SKY2_HW_USE_MSI)
b0a20ded 4637 pci_disable_msi(pdev);
793b883e 4638 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
cd28ab6a
SH
4639 pci_release_regions(pdev);
4640 pci_disable_device(pdev);
793b883e 4641
b877fe28 4642 for (i = hw->ports-1; i >= 0; --i)
6de16237
SH
4643 free_netdev(hw->dev[i]);
4644
cd28ab6a
SH
4645 iounmap(hw->regs);
4646 kfree(hw);
5afa0a9c 4647
cd28ab6a
SH
4648 pci_set_drvdata(pdev, NULL);
4649}
4650
4651#ifdef CONFIG_PM
4652static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4653{
793b883e 4654 struct sky2_hw *hw = pci_get_drvdata(pdev);
e3173832 4655 int i, wol = 0;
cd28ab6a 4656
549a68c3
SH
4657 if (!hw)
4658 return 0;
4659
063a0b38
SH
4660 del_timer_sync(&hw->watchdog_timer);
4661 cancel_work_sync(&hw->restart_work);
4662
19720737 4663 rtnl_lock();
f05267e7 4664 for (i = 0; i < hw->ports; i++) {
cd28ab6a 4665 struct net_device *dev = hw->dev[i];
e3173832 4666 struct sky2_port *sky2 = netdev_priv(dev);
cd28ab6a 4667
af18d8b8 4668 sky2_detach(dev);
e3173832
SH
4669
4670 if (sky2->wol)
4671 sky2_wol_init(sky2);
4672
4673 wol |= sky2->wol;
cd28ab6a
SH
4674 }
4675
8ab8fca2 4676 sky2_write32(hw, B0_IMSK, 0);
6de16237 4677 napi_disable(&hw->napi);
ae306cca 4678 sky2_power_aux(hw);
19720737 4679 rtnl_unlock();
e3173832 4680
d374c1c1 4681 pci_save_state(pdev);
e3173832 4682 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
f71eb1a2 4683 pci_set_power_state(pdev, pci_choose_state(pdev, state));
ae306cca 4684
2ccc99b7 4685 return 0;
cd28ab6a
SH
4686}
4687
4688static int sky2_resume(struct pci_dev *pdev)
4689{
793b883e 4690 struct sky2_hw *hw = pci_get_drvdata(pdev);
08c06d8a 4691 int i, err;
cd28ab6a 4692
549a68c3
SH
4693 if (!hw)
4694 return 0;
4695
f71eb1a2
SH
4696 err = pci_set_power_state(pdev, PCI_D0);
4697 if (err)
4698 goto out;
ae306cca
SH
4699
4700 err = pci_restore_state(pdev);
4701 if (err)
4702 goto out;
4703
cd28ab6a 4704 pci_enable_wake(pdev, PCI_D0, 0);
1ad5b4a5
SH
4705
4706 /* Re-enable all clocks */
05745c4a
SH
4707 if (hw->chip_id == CHIP_ID_YUKON_EX ||
4708 hw->chip_id == CHIP_ID_YUKON_EC_U ||
4709 hw->chip_id == CHIP_ID_YUKON_FE_P)
b32f40c4 4710 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
1ad5b4a5 4711
e3173832 4712 sky2_reset(hw);
8ab8fca2 4713 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
6de16237 4714 napi_enable(&hw->napi);
8ab8fca2 4715
af18d8b8 4716 rtnl_lock();
f05267e7 4717 for (i = 0; i < hw->ports; i++) {
af18d8b8
SH
4718 err = sky2_reattach(hw->dev[i]);
4719 if (err)
4720 goto out;
cd28ab6a 4721 }
af18d8b8 4722 rtnl_unlock();
eb35cf60 4723
ae306cca 4724 return 0;
08c06d8a 4725out:
af18d8b8
SH
4726 rtnl_unlock();
4727
b02a9258 4728 dev_err(&pdev->dev, "resume failed (%d)\n", err);
ae306cca 4729 pci_disable_device(pdev);
08c06d8a 4730 return err;
cd28ab6a
SH
4731}
4732#endif
4733
e3173832
SH
4734static void sky2_shutdown(struct pci_dev *pdev)
4735{
4736 struct sky2_hw *hw = pci_get_drvdata(pdev);
4737 int i, wol = 0;
4738
549a68c3
SH
4739 if (!hw)
4740 return;
4741
19720737 4742 rtnl_lock();
5c0d6b34 4743 del_timer_sync(&hw->watchdog_timer);
e3173832
SH
4744
4745 for (i = 0; i < hw->ports; i++) {
4746 struct net_device *dev = hw->dev[i];
4747 struct sky2_port *sky2 = netdev_priv(dev);
4748
4749 if (sky2->wol) {
4750 wol = 1;
4751 sky2_wol_init(sky2);
4752 }
4753 }
4754
4755 if (wol)
4756 sky2_power_aux(hw);
19720737 4757 rtnl_unlock();
e3173832
SH
4758
4759 pci_enable_wake(pdev, PCI_D3hot, wol);
4760 pci_enable_wake(pdev, PCI_D3cold, wol);
4761
4762 pci_disable_device(pdev);
f71eb1a2 4763 pci_set_power_state(pdev, PCI_D3hot);
e3173832
SH
4764}
4765
cd28ab6a 4766static struct pci_driver sky2_driver = {
793b883e
SH
4767 .name = DRV_NAME,
4768 .id_table = sky2_id_table,
4769 .probe = sky2_probe,
4770 .remove = __devexit_p(sky2_remove),
cd28ab6a 4771#ifdef CONFIG_PM
793b883e
SH
4772 .suspend = sky2_suspend,
4773 .resume = sky2_resume,
cd28ab6a 4774#endif
e3173832 4775 .shutdown = sky2_shutdown,
cd28ab6a
SH
4776};
4777
4778static int __init sky2_init_module(void)
4779{
c844d483
SH
4780 pr_info(PFX "driver version " DRV_VERSION "\n");
4781
3cf26753 4782 sky2_debug_init();
50241c4c 4783 return pci_register_driver(&sky2_driver);
cd28ab6a
SH
4784}
4785
4786static void __exit sky2_cleanup_module(void)
4787{
4788 pci_unregister_driver(&sky2_driver);
3cf26753 4789 sky2_debug_cleanup();
cd28ab6a
SH
4790}
4791
4792module_init(sky2_init_module);
4793module_exit(sky2_cleanup_module);
4794
4795MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
65ebe634 4796MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
cd28ab6a 4797MODULE_LICENSE("GPL");
5f4f9dc1 4798MODULE_VERSION(DRV_VERSION);