]> bbs.cooldavid.org Git - jme.git/blame - jme.c
Import jme 0.9d-msix source
[jme.git] / jme.c
CommitLineData
d7699f87
GFT
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
3bf61c55
GFT
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
d7699f87
GFT
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
79ce639c 24/*
b3821cc5 25 * TODO:
b3821cc5 26 * - Decode register dump for ethtool.
d7699f87
GFT
27 */
28
4330c2f2 29#include <linux/version.h>
d7699f87
GFT
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/pci.h>
942ed503 33#include <linux/irq.h>
d7699f87
GFT
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/crc32.h>
4330c2f2 39#include <linux/delay.h>
29bdd921 40#include <linux/spinlock.h>
942ed503 41#include <linux/net.h>
8c198884
GFT
42#include <linux/in.h>
43#include <linux/ip.h>
79ce639c
GFT
44#include <linux/ipv6.h>
45#include <linux/tcp.h>
46#include <linux/udp.h>
42b1055e 47#include <linux/if_vlan.h>
d7699f87
GFT
48#include "jme.h"
49
4330c2f2 50#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
3bf61c55
GFT
51static struct net_device_stats *
52jme_get_stats(struct net_device *netdev)
4330c2f2
GFT
53{
54 struct jme_adapter *jme = netdev_priv(netdev);
55 return &jme->stats;
56}
57#endif
58
3bf61c55
GFT
59static int
60jme_mdio_read(struct net_device *netdev, int phy, int reg)
d7699f87
GFT
61{
62 struct jme_adapter *jme = netdev_priv(netdev);
186fc259 63 int i, val, again = (reg == MII_BMSR)?1:0;
d7699f87 64
186fc259 65read_again:
d7699f87 66 jwrite32(jme, JME_SMI, SMI_OP_REQ |
3bf61c55
GFT
67 smi_phy_addr(phy) |
68 smi_reg_addr(reg));
d7699f87
GFT
69
70 wmb();
cdcdc9eb
GFT
71 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
72 udelay(20);
b3821cc5
GFT
73 val = jread32(jme, JME_SMI);
74 if ((val & SMI_OP_REQ) == 0)
3bf61c55 75 break;
d7699f87
GFT
76 }
77
78 if (i == 0) {
cdcdc9eb 79 jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
3bf61c55 80 return 0;
d7699f87
GFT
81 }
82
186fc259
GFT
83 if(again--)
84 goto read_again;
85
3bf61c55 86 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
d7699f87
GFT
87}
88
3bf61c55
GFT
89static void
90jme_mdio_write(struct net_device *netdev,
91 int phy, int reg, int val)
d7699f87
GFT
92{
93 struct jme_adapter *jme = netdev_priv(netdev);
94 int i;
95
3bf61c55
GFT
96 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
97 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
98 smi_phy_addr(phy) | smi_reg_addr(reg));
d7699f87
GFT
99
100 wmb();
cdcdc9eb
GFT
101 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
102 udelay(20);
8d27293f 103 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
3bf61c55
GFT
104 break;
105 }
d7699f87 106
3bf61c55 107 if (i == 0)
cdcdc9eb 108 jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
d7699f87 109
3bf61c55 110 return;
d7699f87
GFT
111}
112
3bf61c55
GFT
113__always_inline static void
114jme_reset_phy_processor(struct jme_adapter *jme)
d7699f87 115{
fcf45b4c 116 __u32 val;
3bf61c55
GFT
117
118 jme_mdio_write(jme->dev,
119 jme->mii_if.phy_id,
8c198884
GFT
120 MII_ADVERTISE, ADVERTISE_ALL |
121 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3bf61c55 122
8d27293f
GFT
123 if(jme->pdev->device == JME_GE_DEVICE)
124 jme_mdio_write(jme->dev,
125 jme->mii_if.phy_id,
126 MII_CTRL1000,
127 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
3bf61c55 128
fcf45b4c
GFT
129 val = jme_mdio_read(jme->dev,
130 jme->mii_if.phy_id,
131 MII_BMCR);
132
133 jme_mdio_write(jme->dev,
134 jme->mii_if.phy_id,
135 MII_BMCR, val | BMCR_RESET);
136
3bf61c55
GFT
137 return;
138}
139
b3821cc5
GFT
140static void
141jme_setup_wakeup_frame(struct jme_adapter *jme,
142 __u32 *mask, __u32 crc, int fnr)
143{
144 int i;
145
146 /*
147 * Setup CRC pattern
148 */
149 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
150 wmb();
151 jwrite32(jme, JME_WFODP, crc);
152 wmb();
153
154 /*
155 * Setup Mask
156 */
157 for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
158 jwrite32(jme, JME_WFOI,
159 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
160 (fnr & WFOI_FRAME_SEL));
161 wmb();
162 jwrite32(jme, JME_WFODP, mask[i]);
163 wmb();
164 }
165}
3bf61c55
GFT
166
167__always_inline static void
168jme_reset_mac_processor(struct jme_adapter *jme)
169{
b3821cc5
GFT
170 __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
171 __u32 crc = 0xCDCDCDCD;
cdcdc9eb 172 __u32 gpreg0;
b3821cc5
GFT
173 int i;
174
3bf61c55 175 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
d7699f87 176 udelay(2);
3bf61c55 177 jwrite32(jme, JME_GHC, jme->reg_ghc);
4330c2f2
GFT
178 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
179 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
b3821cc5
GFT
180 for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
181 jme_setup_wakeup_frame(jme, mask, crc, i);
cdcdc9eb
GFT
182 if(jme->fpgaver)
183 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
184 else
185 gpreg0 = GPREG0_DEFAULT;
186 jwrite32(jme, JME_GPREG0, gpreg0);
4330c2f2 187 jwrite32(jme, JME_GPREG1, 0);
d7699f87
GFT
188}
189
3bf61c55
GFT
190__always_inline static void
191jme_clear_pm(struct jme_adapter *jme)
d7699f87 192{
29bdd921 193 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
4330c2f2 194 pci_set_power_state(jme->pdev, PCI_D0);
42b1055e 195 pci_enable_wake(jme->pdev, PCI_D0, false);
d7699f87
GFT
196}
197
3bf61c55
GFT
198static int
199jme_reload_eeprom(struct jme_adapter *jme)
d7699f87
GFT
200{
201 __u32 val;
202 int i;
203
204 val = jread32(jme, JME_SMBCSR);
205
206 if(val & SMBCSR_EEPROMD)
207 {
208 val |= SMBCSR_CNACK;
209 jwrite32(jme, JME_SMBCSR, val);
210 val |= SMBCSR_RELOAD;
211 jwrite32(jme, JME_SMBCSR, val);
212 mdelay(12);
213
186fc259 214 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i)
d7699f87
GFT
215 {
216 mdelay(1);
217 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
218 break;
219 }
220
221 if(i == 0) {
186fc259 222 jeprintk("jme", "eeprom reload timeout\n");
d7699f87
GFT
223 return -EIO;
224 }
225 }
3bf61c55 226
d7699f87
GFT
227 return 0;
228}
229
3bf61c55
GFT
230static void
231jme_load_macaddr(struct net_device *netdev)
d7699f87
GFT
232{
233 struct jme_adapter *jme = netdev_priv(netdev);
234 unsigned char macaddr[6];
235 __u32 val;
236
fcf45b4c 237 spin_lock(&jme->macaddr_lock);
4330c2f2 238 val = jread32(jme, JME_RXUMA_LO);
d7699f87
GFT
239 macaddr[0] = (val >> 0) & 0xFF;
240 macaddr[1] = (val >> 8) & 0xFF;
241 macaddr[2] = (val >> 16) & 0xFF;
242 macaddr[3] = (val >> 24) & 0xFF;
4330c2f2 243 val = jread32(jme, JME_RXUMA_HI);
d7699f87
GFT
244 macaddr[4] = (val >> 0) & 0xFF;
245 macaddr[5] = (val >> 8) & 0xFF;
246 memcpy(netdev->dev_addr, macaddr, 6);
fcf45b4c 247 spin_unlock(&jme->macaddr_lock);
3bf61c55
GFT
248}
249
fcf45b4c 250__always_inline static void
3bf61c55
GFT
251jme_set_rx_pcc(struct jme_adapter *jme, int p)
252{
253 switch(p) {
192570e0
GFT
254 case PCC_OFF:
255 jwrite32(jme, JME_PCCRX0,
256 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
257 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
258 break;
3bf61c55
GFT
259 case PCC_P1:
260 jwrite32(jme, JME_PCCRX0,
261 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
262 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
263 break;
264 case PCC_P2:
265 jwrite32(jme, JME_PCCRX0,
266 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
267 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
268 break;
269 case PCC_P3:
270 jwrite32(jme, JME_PCCRX0,
271 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
272 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
273 break;
274 default:
275 break;
276 }
192570e0 277 wmb();
3bf61c55 278
192570e0
GFT
279 if(!(jme->flags & JME_FLAG_POLL))
280 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
d7699f87
GFT
281}
282
fcf45b4c 283static void
3bf61c55 284jme_start_irq(struct jme_adapter *jme)
d7699f87 285{
3bf61c55
GFT
286 register struct dynpcc_info *dpi = &(jme->dpi);
287
288 jme_set_rx_pcc(jme, PCC_P1);
3bf61c55
GFT
289 dpi->cur = PCC_P1;
290 dpi->attempt = PCC_P1;
291 dpi->cnt = 0;
292
293 jwrite32(jme, JME_PCCTX,
8c198884
GFT
294 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
295 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
3bf61c55
GFT
296 PCCTXQ0_EN
297 );
298
d7699f87
GFT
299 /*
300 * Enable Interrupts
301 */
302 jwrite32(jme, JME_IENS, INTR_ENABLE);
303}
304
3bf61c55
GFT
305__always_inline static void
306jme_stop_irq(struct jme_adapter *jme)
d7699f87
GFT
307{
308 /*
309 * Disable Interrupts
310 */
311 jwrite32(jme, JME_IENC, INTR_ENABLE);
312}
313
4330c2f2 314
3bf61c55
GFT
315__always_inline static void
316jme_enable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
317{
318 jwrite32(jme,
319 JME_SHBA_LO,
320 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
321}
322
3bf61c55
GFT
323__always_inline static void
324jme_disable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
325{
326 jwrite32(jme, JME_SHBA_LO, 0x0);
327}
328
cdcdc9eb
GFT
329static __u32
330jme_linkstat_from_phy(struct jme_adapter *jme)
331{
332 __u32 phylink, bmsr;
333
334 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
335 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
8d27293f 336 if(bmsr & BMSR_ANCOMP)
cdcdc9eb
GFT
337 phylink |= PHY_LINK_AUTONEG_COMPLETE;
338
339 return phylink;
340}
341
fcf45b4c
GFT
342static int
343jme_check_link(struct net_device *netdev, int testonly)
d7699f87
GFT
344{
345 struct jme_adapter *jme = netdev_priv(netdev);
8c198884 346 __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
79ce639c 347 char linkmsg[64];
fcf45b4c 348 int rc = 0;
d7699f87 349
b3821cc5 350 linkmsg[0] = '\0';
cdcdc9eb
GFT
351
352 if(jme->fpgaver)
353 phylink = jme_linkstat_from_phy(jme);
354 else
355 phylink = jread32(jme, JME_PHY_LINK);
d7699f87
GFT
356
357 if (phylink & PHY_LINK_UP) {
8c198884
GFT
358 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
359 /*
360 * If we did not enable AN
361 * Speed/Duplex Info should be obtained from SMI
362 */
363 phylink = PHY_LINK_UP;
364
365 bmcr = jme_mdio_read(jme->dev,
366 jme->mii_if.phy_id,
367 MII_BMCR);
368
79ce639c 369
8c198884
GFT
370 phylink |= ((bmcr & BMCR_SPEED1000) &&
371 (bmcr & BMCR_SPEED100) == 0) ?
372 PHY_LINK_SPEED_1000M :
373 (bmcr & BMCR_SPEED100) ?
374 PHY_LINK_SPEED_100M :
375 PHY_LINK_SPEED_10M;
376
377 phylink |= (bmcr & BMCR_FULLDPLX) ?
378 PHY_LINK_DUPLEX : 0;
79ce639c 379
b3821cc5 380 strcat(linkmsg, "Forced: ");
8c198884
GFT
381 }
382 else {
383 /*
384 * Keep polling for speed/duplex resolve complete
385 */
386 while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
387 --cnt) {
388
389 udelay(1);
8c198884 390
cdcdc9eb
GFT
391 if(jme->fpgaver)
392 phylink = jme_linkstat_from_phy(jme);
393 else
394 phylink = jread32(jme, JME_PHY_LINK);
8c198884
GFT
395 }
396
397 if(!cnt)
398 jeprintk(netdev->name,
399 "Waiting speed resolve timeout.\n");
79ce639c 400
b3821cc5 401 strcat(linkmsg, "ANed: ");
d7699f87
GFT
402 }
403
fcf45b4c
GFT
404 if(jme->phylink == phylink) {
405 rc = 1;
406 goto out;
407 }
408 if(testonly)
409 goto out;
410
411 jme->phylink = phylink;
412
cdcdc9eb
GFT
413 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
414 GHC_SPEED_100M |
415 GHC_SPEED_1000M |
416 GHC_DPX);
d7699f87
GFT
417 switch(phylink & PHY_LINK_SPEED_MASK) {
418 case PHY_LINK_SPEED_10M:
cdcdc9eb 419 ghc |= GHC_SPEED_10M;
b3821cc5 420 strcat(linkmsg, "10 Mbps, ");
d7699f87
GFT
421 break;
422 case PHY_LINK_SPEED_100M:
cdcdc9eb 423 ghc |= GHC_SPEED_100M;
b3821cc5 424 strcat(linkmsg, "100 Mbps, ");
d7699f87
GFT
425 break;
426 case PHY_LINK_SPEED_1000M:
cdcdc9eb 427 ghc |= GHC_SPEED_1000M;
b3821cc5 428 strcat(linkmsg, "1000 Mbps, ");
d7699f87
GFT
429 break;
430 default:
d7699f87
GFT
431 break;
432 }
433 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
fcf45b4c 434
d7699f87 435 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
fcf45b4c
GFT
436 "Full-Duplex, " :
437 "Half-Duplex, ");
438
439 if(phylink & PHY_LINK_MDI_STAT)
fcf45b4c 440 strcat(linkmsg, "MDI-X");
8c198884
GFT
441 else
442 strcat(linkmsg, "MDI");
d7699f87
GFT
443
444 if(phylink & PHY_LINK_DUPLEX)
445 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
8c198884 446 else {
d7699f87 447 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
3bf61c55
GFT
448 TXMCS_BACKOFF |
449 TXMCS_CARRIERSENSE |
450 TXMCS_COLLISION);
8c198884
GFT
451 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
452 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
453 TXTRHD_TXREN |
454 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
455 }
d7699f87 456
fcf45b4c
GFT
457 jme->reg_ghc = ghc;
458 jwrite32(jme, JME_GHC, ghc);
459
4330c2f2 460 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
d7699f87
GFT
461 netif_carrier_on(netdev);
462 }
463 else {
fcf45b4c
GFT
464 if(testonly)
465 goto out;
466
4330c2f2 467 jprintk(netdev->name, "Link is down.\n");
fcf45b4c 468 jme->phylink = 0;
d7699f87
GFT
469 netif_carrier_off(netdev);
470 }
fcf45b4c
GFT
471
472out:
473 return rc;
d7699f87
GFT
474}
475
3bf61c55
GFT
476static int
477jme_setup_tx_resources(struct jme_adapter *jme)
d7699f87 478{
d7699f87
GFT
479 struct jme_ring *txring = &(jme->txring[0]);
480
481 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
b3821cc5
GFT
482 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
483 &(txring->dmaalloc),
484 GFP_ATOMIC);
fcf45b4c 485
4330c2f2
GFT
486 if(!txring->alloc) {
487 txring->desc = NULL;
488 txring->dmaalloc = 0;
489 txring->dma = 0;
d7699f87 490 return -ENOMEM;
4330c2f2 491 }
d7699f87
GFT
492
493 /*
494 * 16 Bytes align
495 */
3bf61c55
GFT
496 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
497 RING_DESC_ALIGN);
4330c2f2 498 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
d7699f87 499 txring->next_to_use = 0;
cdcdc9eb 500 atomic_set(&txring->next_to_clean, 0);
b3821cc5 501 atomic_set(&txring->nr_free, jme->tx_ring_size);
d7699f87
GFT
502
503 /*
b3821cc5 504 * Initialize Transmit Descriptors
d7699f87 505 */
b3821cc5 506 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
3bf61c55 507 memset(txring->bufinf, 0,
b3821cc5 508 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
d7699f87
GFT
509
510 return 0;
511}
512
3bf61c55
GFT
513static void
514jme_free_tx_resources(struct jme_adapter *jme)
d7699f87
GFT
515{
516 int i;
517 struct jme_ring *txring = &(jme->txring[0]);
4330c2f2 518 struct jme_buffer_info *txbi = txring->bufinf;
d7699f87
GFT
519
520 if(txring->alloc) {
b3821cc5 521 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
4330c2f2
GFT
522 txbi = txring->bufinf + i;
523 if(txbi->skb) {
524 dev_kfree_skb(txbi->skb);
525 txbi->skb = NULL;
d7699f87 526 }
3bf61c55
GFT
527 txbi->mapping = 0;
528 txbi->len = 0;
529 txbi->nr_desc = 0;
d7699f87
GFT
530 }
531
532 dma_free_coherent(&(jme->pdev->dev),
b3821cc5 533 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
d7699f87
GFT
534 txring->alloc,
535 txring->dmaalloc);
3bf61c55
GFT
536
537 txring->alloc = NULL;
538 txring->desc = NULL;
539 txring->dmaalloc = 0;
540 txring->dma = 0;
d7699f87 541 }
3bf61c55 542 txring->next_to_use = 0;
cdcdc9eb 543 atomic_set(&txring->next_to_clean, 0);
79ce639c 544 atomic_set(&txring->nr_free, 0);
d7699f87
GFT
545
546}
547
3bf61c55
GFT
548__always_inline static void
549jme_enable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
550{
551 /*
552 * Select Queue 0
553 */
554 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
555
556 /*
557 * Setup TX Queue 0 DMA Bass Address
558 */
fcf45b4c 559 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
3bf61c55 560 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
fcf45b4c 561 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
562
563 /*
564 * Setup TX Descptor Count
565 */
b3821cc5 566 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
d7699f87
GFT
567
568 /*
569 * Enable TX Engine
570 */
571 wmb();
4330c2f2
GFT
572 jwrite32(jme, JME_TXCS, jme->reg_txcs |
573 TXCS_SELECT_QUEUE0 |
574 TXCS_ENABLE);
d7699f87
GFT
575
576}
577
29bdd921
GFT
578__always_inline static void
579jme_restart_tx_engine(struct jme_adapter *jme)
580{
581 /*
582 * Restart TX Engine
583 */
584 jwrite32(jme, JME_TXCS, jme->reg_txcs |
585 TXCS_SELECT_QUEUE0 |
586 TXCS_ENABLE);
587}
588
3bf61c55
GFT
589__always_inline static void
590jme_disable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
591{
592 int i;
593 __u32 val;
594
595 /*
596 * Disable TX Engine
597 */
fcf45b4c 598 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
d7699f87
GFT
599
600 val = jread32(jme, JME_TXCS);
601 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
602 {
fcf45b4c 603 mdelay(1);
d7699f87
GFT
604 val = jread32(jme, JME_TXCS);
605 }
606
8c198884 607 if(!i) {
4330c2f2 608 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
8c198884
GFT
609 jme_reset_mac_processor(jme);
610 }
d7699f87
GFT
611
612
613}
614
3bf61c55
GFT
615static void
616jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
d7699f87 617{
942ed503 618 struct jme_ring *rxring = &(jme->rxring[0]);
3bf61c55 619 register volatile struct rxdesc* rxdesc = rxring->desc;
4330c2f2
GFT
620 struct jme_buffer_info *rxbi = rxring->bufinf;
621 rxdesc += i;
622 rxbi += i;
623
624 rxdesc->dw[0] = 0;
625 rxdesc->dw[1] = 0;
3bf61c55 626 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
fcf45b4c
GFT
627 rxdesc->desc1.bufaddrl = cpu_to_le32(
628 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
3bf61c55
GFT
629 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
630 if(jme->dev->features & NETIF_F_HIGHDMA)
631 rxdesc->desc1.flags = RXFLAG_64BIT;
d7699f87 632 wmb();
3bf61c55 633 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
d7699f87
GFT
634}
635
3bf61c55
GFT
636static int
637jme_make_new_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
638{
639 struct jme_ring *rxring = &(jme->rxring[0]);
b3821cc5 640 struct jme_buffer_info *rxbi = rxring->bufinf + i;
4330c2f2
GFT
641 unsigned long offset;
642 struct sk_buff* skb;
643
79ce639c
GFT
644 skb = netdev_alloc_skb(jme->dev,
645 jme->dev->mtu + RX_EXTRA_LEN);
4330c2f2
GFT
646 if(unlikely(!skb))
647 return -ENOMEM;
3bf61c55 648
3bf61c55
GFT
649 if(unlikely(offset =
650 (unsigned long)(skb->data)
79ce639c 651 & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
4330c2f2 652 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
4330c2f2 653
4330c2f2 654 rxbi->skb = skb;
3bf61c55 655 rxbi->len = skb_tailroom(skb);
b3821cc5
GFT
656 rxbi->mapping = pci_map_page(jme->pdev,
657 virt_to_page(skb->data),
658 offset_in_page(skb->data),
659 rxbi->len,
660 PCI_DMA_FROMDEVICE);
4330c2f2
GFT
661
662 return 0;
663}
664
3bf61c55
GFT
665static void
666jme_free_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
667{
668 struct jme_ring *rxring = &(jme->rxring[0]);
669 struct jme_buffer_info *rxbi = rxring->bufinf;
670 rxbi += i;
671
672 if(rxbi->skb) {
b3821cc5 673 pci_unmap_page(jme->pdev,
4330c2f2 674 rxbi->mapping,
3bf61c55 675 rxbi->len,
4330c2f2
GFT
676 PCI_DMA_FROMDEVICE);
677 dev_kfree_skb(rxbi->skb);
678 rxbi->skb = NULL;
679 rxbi->mapping = 0;
3bf61c55 680 rxbi->len = 0;
4330c2f2
GFT
681 }
682}
683
3bf61c55
GFT
684static void
685jme_free_rx_resources(struct jme_adapter *jme)
686{
687 int i;
688 struct jme_ring *rxring = &(jme->rxring[0]);
689
690 if(rxring->alloc) {
b3821cc5 691 for(i = 0 ; i < jme->rx_ring_size ; ++i)
3bf61c55
GFT
692 jme_free_rx_buf(jme, i);
693
694 dma_free_coherent(&(jme->pdev->dev),
b3821cc5 695 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
3bf61c55
GFT
696 rxring->alloc,
697 rxring->dmaalloc);
698 rxring->alloc = NULL;
699 rxring->desc = NULL;
700 rxring->dmaalloc = 0;
701 rxring->dma = 0;
702 }
703 rxring->next_to_use = 0;
cdcdc9eb 704 atomic_set(&rxring->next_to_clean, 0);
3bf61c55
GFT
705}
706
707static int
708jme_setup_rx_resources(struct jme_adapter *jme)
d7699f87
GFT
709{
710 int i;
711 struct jme_ring *rxring = &(jme->rxring[0]);
712
713 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
b3821cc5
GFT
714 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
715 &(rxring->dmaalloc),
716 GFP_ATOMIC);
4330c2f2
GFT
717 if(!rxring->alloc) {
718 rxring->desc = NULL;
719 rxring->dmaalloc = 0;
720 rxring->dma = 0;
d7699f87 721 return -ENOMEM;
4330c2f2 722 }
d7699f87
GFT
723
724 /*
725 * 16 Bytes align
726 */
3bf61c55
GFT
727 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
728 RING_DESC_ALIGN);
4330c2f2 729 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
d7699f87 730 rxring->next_to_use = 0;
cdcdc9eb 731 atomic_set(&rxring->next_to_clean, 0);
d7699f87 732
d7699f87
GFT
733 /*
734 * Initiallize Receive Descriptors
735 */
b3821cc5 736 for(i = 0 ; i < jme->rx_ring_size ; ++i) {
3bf61c55
GFT
737 if(unlikely(jme_make_new_rx_buf(jme, i))) {
738 jme_free_rx_resources(jme);
739 return -ENOMEM;
740 }
d7699f87
GFT
741
742 jme_set_clean_rxdesc(jme, i);
743 }
744
d7699f87
GFT
745 return 0;
746}
747
3bf61c55
GFT
748__always_inline static void
749jme_enable_rx_engine(struct jme_adapter *jme)
d7699f87 750{
d7699f87
GFT
751 /*
752 * Setup RX DMA Bass Address
753 */
fcf45b4c 754 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
3bf61c55 755 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
fcf45b4c 756 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
757
758 /*
b3821cc5 759 * Setup RX Descriptor Count
d7699f87 760 */
b3821cc5 761 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
d7699f87 762
3bf61c55 763 /*
d7699f87
GFT
764 * Setup Unicast Filter
765 */
766 jme_set_multi(jme->dev);
767
768 /*
769 * Enable RX Engine
770 */
771 wmb();
79ce639c 772 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
773 RXCS_QUEUESEL_Q0 |
774 RXCS_ENABLE |
775 RXCS_QST);
d7699f87
GFT
776}
777
3bf61c55
GFT
778__always_inline static void
779jme_restart_rx_engine(struct jme_adapter *jme)
4330c2f2
GFT
780{
781 /*
3bf61c55 782 * Start RX Engine
4330c2f2 783 */
79ce639c 784 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
785 RXCS_QUEUESEL_Q0 |
786 RXCS_ENABLE |
787 RXCS_QST);
788}
789
790
3bf61c55
GFT
791__always_inline static void
792jme_disable_rx_engine(struct jme_adapter *jme)
d7699f87
GFT
793{
794 int i;
795 __u32 val;
796
797 /*
798 * Disable RX Engine
799 */
29bdd921 800 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
d7699f87
GFT
801
802 val = jread32(jme, JME_RXCS);
803 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
804 {
29bdd921 805 mdelay(1);
d7699f87
GFT
806 val = jread32(jme, JME_RXCS);
807 }
808
809 if(!i)
4330c2f2 810 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
d7699f87
GFT
811
812}
813
192570e0
GFT
814static int
815jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
816{
817 if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
818 return false;
819
820 if(unlikely((flags & RXWBFLAG_TCPON) &&
821 !(flags & RXWBFLAG_TCPCS))) {
822 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
cdcdc9eb 823 goto out_sumerr;
192570e0
GFT
824 }
825
826 if(unlikely((flags & RXWBFLAG_UDPON) &&
827 !(flags & RXWBFLAG_UDPCS))) {
828 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
cdcdc9eb 829 goto out_sumerr;
192570e0
GFT
830 }
831
832 if(unlikely((flags & RXWBFLAG_IPV4) &&
833 !(flags & RXWBFLAG_IPCS))) {
834 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
cdcdc9eb 835 goto out_sumerr;
192570e0
GFT
836 }
837
838 return true;
cdcdc9eb
GFT
839
840out_sumerr:
841 csum_dbg(jme->dev->name, "%s%s%s%s\n",
842 (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
843 (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
844 (flags & RXWBFLAG_UDPON)?"UDP ":"",
845 (flags & RXWBFLAG_TCPON)?"TCP":"");
846 return false;
192570e0
GFT
847}
848
3bf61c55 849static void
42b1055e 850jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
d7699f87 851{
d7699f87 852 struct jme_ring *rxring = &(jme->rxring[0]);
3bf61c55
GFT
853 volatile struct rxdesc *rxdesc = rxring->desc;
854 struct jme_buffer_info *rxbi = rxring->bufinf;
d7699f87 855 struct sk_buff *skb;
3bf61c55 856 int framesize;
d7699f87 857
3bf61c55
GFT
858 rxdesc += idx;
859 rxbi += idx;
d7699f87 860
3bf61c55
GFT
861 skb = rxbi->skb;
862 pci_dma_sync_single_for_cpu(jme->pdev,
863 rxbi->mapping,
864 rxbi->len,
865 PCI_DMA_FROMDEVICE);
866
867 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
868 pci_dma_sync_single_for_device(jme->pdev,
869 rxbi->mapping,
870 rxbi->len,
871 PCI_DMA_FROMDEVICE);
872
873 ++(NET_STAT(jme).rx_dropped);
874 }
875 else {
876 framesize = le16_to_cpu(rxdesc->descwb.framesize)
877 - RX_PREPAD_SIZE;
878
879 skb_reserve(skb, RX_PREPAD_SIZE);
880 skb_put(skb, framesize);
881 skb->protocol = eth_type_trans(skb, jme->dev);
882
192570e0 883 if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
8c198884 884 skb->ip_summed = CHECKSUM_UNNECESSARY;
29bdd921
GFT
885 else
886 skb->ip_summed = CHECKSUM_NONE;
8c198884 887
b3821cc5
GFT
888
889 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
890 vlan_dbg(jme->dev->name, "VLAN: %04x\n",
891 rxdesc->descwb.vlan);
892 if(jme->vlgrp) {
893 vlan_dbg(jme->dev->name,
894 "VLAN Passed to kernel.\n");
cdcdc9eb 895 jme->jme_vlan_rx(skb, jme->vlgrp,
42b1055e 896 le32_to_cpu(rxdesc->descwb.vlan));
b3821cc5
GFT
897 NET_STAT(jme).rx_bytes += 4;
898 }
899 }
900 else {
cdcdc9eb 901 jme->jme_rx(skb);
b3821cc5 902 }
3bf61c55 903
b3821cc5
GFT
904 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
905 RXWBFLAG_DEST_MUL)
3bf61c55
GFT
906 ++(NET_STAT(jme).multicast);
907
908 jme->dev->last_rx = jiffies;
909 NET_STAT(jme).rx_bytes += framesize;
910 ++(NET_STAT(jme).rx_packets);
911 }
912
913 jme_set_clean_rxdesc(jme, idx);
914
915}
916
192570e0 917
8c198884 918
3bf61c55
GFT
919static int
920jme_process_receive(struct jme_adapter *jme, int limit)
921{
922 struct jme_ring *rxring = &(jme->rxring[0]);
923 volatile struct rxdesc *rxdesc = rxring->desc;
b3821cc5 924 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
3bf61c55 925
192570e0
GFT
926 if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
927 goto out_inc;
928
929 if(unlikely(atomic_read(&jme->link_changing) != 1))
930 goto out_inc;
931
932 if(unlikely(!netif_carrier_ok(jme->dev)))
933 goto out_inc;
934
cdcdc9eb 935 i = atomic_read(&rxring->next_to_clean);
3bf61c55 936 while( limit-- > 0 )
d7699f87 937 {
3bf61c55
GFT
938 rxdesc = rxring->desc;
939 rxdesc += i;
940
4330c2f2 941 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
3bf61c55
GFT
942 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
943 goto out;
d7699f87 944
4330c2f2
GFT
945 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
946
3bf61c55 947 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
4330c2f2 948
8c198884 949 if(unlikely(desccnt > 1 ||
192570e0 950 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
d7699f87 951
3bf61c55
GFT
952 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
953 ++(NET_STAT(jme).rx_crc_errors);
954 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
955 ++(NET_STAT(jme).rx_fifo_errors);
956 else
957 ++(NET_STAT(jme).rx_errors);
4330c2f2 958
79ce639c
GFT
959 if(desccnt > 1) {
960 rx_dbg(jme->dev->name,
961 "RX: More than one(%d) descriptor, "
962 "framelen=%d\n",
963 desccnt, le16_to_cpu(rxdesc->descwb.framesize));
3bf61c55 964 limit -= desccnt - 1;
79ce639c 965 }
4330c2f2 966
3bf61c55 967 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
4330c2f2 968 jme_set_clean_rxdesc(jme, j);
b3821cc5 969 j = (j + 1) & (mask);
4330c2f2 970 }
3bf61c55 971
d7699f87
GFT
972 }
973 else {
42b1055e 974 jme_alloc_and_feed_skb(jme, i);
3bf61c55 975 }
4330c2f2 976
b3821cc5 977 i = (i + desccnt) & (mask);
3bf61c55 978 }
4330c2f2 979
192570e0 980
3bf61c55
GFT
981out:
982 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
983 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
984 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
985 >> 4);
4330c2f2 986
cdcdc9eb 987 atomic_set(&rxring->next_to_clean, i);
4330c2f2 988
192570e0
GFT
989out_inc:
990 atomic_inc(&jme->rx_cleaning);
991
3bf61c55 992 return limit > 0 ? limit : 0;
4330c2f2 993
3bf61c55 994}
d7699f87 995
79ce639c
GFT
996static void
997jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
998{
192570e0
GFT
999 if(likely(atmp == dpi->cur)) {
1000 dpi->cnt = 0;
79ce639c 1001 return;
192570e0 1002 }
79ce639c
GFT
1003
1004 if(dpi->attempt == atmp) {
1005 ++(dpi->cnt);
1006 }
1007 else {
1008 dpi->attempt = atmp;
1009 dpi->cnt = 0;
1010 }
1011
1012}
1013
1014static void
1015jme_dynamic_pcc(struct jme_adapter *jme)
1016{
1017 register struct dynpcc_info *dpi = &(jme->dpi);
1018
1019 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1020 jme_attempt_pcc(dpi, PCC_P3);
192570e0 1021 else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
79ce639c
GFT
1022 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1023 jme_attempt_pcc(dpi, PCC_P2);
1024 else
1025 jme_attempt_pcc(dpi, PCC_P1);
1026
192570e0 1027 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
79ce639c
GFT
1028 jme_set_rx_pcc(jme, dpi->attempt);
1029 dpi->cur = dpi->attempt;
1030 dpi->cnt = 0;
1031 }
1032}
1033
1034static void
1035jme_start_pcc_timer(struct jme_adapter *jme)
1036{
1037 struct dynpcc_info *dpi = &(jme->dpi);
1038 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1039 dpi->last_pkts = NET_STAT(jme).rx_packets;
1040 dpi->intr_cnt = 0;
1041 jwrite32(jme, JME_TMCSR,
1042 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1043}
1044
192570e0 1045__always_inline static void
29bdd921
GFT
1046jme_stop_pcc_timer(struct jme_adapter *jme)
1047{
1048 jwrite32(jme, JME_TMCSR, 0);
1049}
1050
79ce639c
GFT
1051static void
1052jme_pcc_tasklet(unsigned long arg)
1053{
1054 struct jme_adapter *jme = (struct jme_adapter*)arg;
1055 struct net_device *netdev = jme->dev;
1056
b3821cc5 1057 if(unlikely(!netif_carrier_ok(netdev) ||
29bdd921
GFT
1058 (atomic_read(&jme->link_changing) != 1)
1059 )) {
1060 jme_stop_pcc_timer(jme);
79ce639c
GFT
1061 return;
1062 }
29bdd921 1063
192570e0
GFT
1064 if(!(jme->flags & JME_FLAG_POLL))
1065 jme_dynamic_pcc(jme);
1066
79ce639c
GFT
1067 jme_start_pcc_timer(jme);
1068}
1069
192570e0
GFT
1070__always_inline static void
1071jme_polling_mode(struct jme_adapter *jme)
1072{
1073 jme_set_rx_pcc(jme, PCC_OFF);
1074}
1075
1076__always_inline static void
1077jme_interrupt_mode(struct jme_adapter *jme)
1078{
1079 jme_set_rx_pcc(jme, PCC_P1);
1080}
1081
3bf61c55
GFT
1082static void
1083jme_link_change_tasklet(unsigned long arg)
1084{
1085 struct jme_adapter *jme = (struct jme_adapter*)arg;
fcf45b4c
GFT
1086 struct net_device *netdev = jme->dev;
1087 int timeout = WAIT_TASKLET_TIMEOUT;
1088 int rc;
1089
1090 if(!atomic_dec_and_test(&jme->link_changing))
1091 goto out;
1092
29bdd921 1093 if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
fcf45b4c
GFT
1094 goto out;
1095
29bdd921 1096 jme->old_mtu = netdev->mtu;
fcf45b4c
GFT
1097 netif_stop_queue(netdev);
1098
1099 while(--timeout > 0 &&
1100 (
1101 atomic_read(&jme->rx_cleaning) != 1 ||
1102 atomic_read(&jme->tx_cleaning) != 1
1103 )) {
1104
1105 mdelay(1);
1106 }
1107
1108 if(netif_carrier_ok(netdev)) {
29bdd921 1109 jme_stop_pcc_timer(jme);
fcf45b4c
GFT
1110 jme_reset_mac_processor(jme);
1111 jme_free_rx_resources(jme);
1112 jme_free_tx_resources(jme);
192570e0 1113
cdcdc9eb 1114 if(jme->flags & JME_FLAG_POLL)
192570e0 1115 jme_polling_mode(jme);
fcf45b4c
GFT
1116 }
1117
1118 jme_check_link(netdev, 0);
1119 if(netif_carrier_ok(netdev)) {
1120 rc = jme_setup_rx_resources(jme);
1121 if(rc) {
1122 jeprintk(netdev->name,
1123 "Allocating resources for RX error"
1124 ", Device STOPPED!\n");
1125 goto out;
1126 }
1127
1128
1129 rc = jme_setup_tx_resources(jme);
1130 if(rc) {
1131 jeprintk(netdev->name,
1132 "Allocating resources for TX error"
1133 ", Device STOPPED!\n");
1134 goto err_out_free_rx_resources;
1135 }
1136
1137 jme_enable_rx_engine(jme);
1138 jme_enable_tx_engine(jme);
1139
1140 netif_start_queue(netdev);
192570e0 1141
cdcdc9eb 1142 if(jme->flags & JME_FLAG_POLL)
192570e0 1143 jme_interrupt_mode(jme);
192570e0 1144
79ce639c 1145 jme_start_pcc_timer(jme);
fcf45b4c
GFT
1146 }
1147
1148 goto out;
1149
1150err_out_free_rx_resources:
1151 jme_free_rx_resources(jme);
1152out:
1153 atomic_inc(&jme->link_changing);
3bf61c55 1154}
d7699f87 1155
3bf61c55
GFT
1156static void
1157jme_rx_clean_tasklet(unsigned long arg)
1158{
1159 struct jme_adapter *jme = (struct jme_adapter*)arg;
79ce639c 1160 struct dynpcc_info *dpi = &(jme->dpi);
d7699f87 1161
192570e0
GFT
1162 jme_process_receive(jme, jme->rx_ring_size);
1163 ++(dpi->intr_cnt);
42b1055e 1164
192570e0 1165}
fcf45b4c 1166
192570e0 1167static int
cdcdc9eb 1168jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
192570e0 1169{
cdcdc9eb 1170 struct jme_adapter *jme = jme_napi_priv(holder);
192570e0
GFT
1171 struct net_device *netdev = jme->dev;
1172 int rest;
fcf45b4c 1173
cdcdc9eb 1174 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
fcf45b4c 1175
cdcdc9eb
GFT
1176 while(atomic_read(&jme->rx_empty) > 0) {
1177 atomic_dec(&jme->rx_empty);
192570e0
GFT
1178 ++(NET_STAT(jme).rx_dropped);
1179 jme_restart_rx_engine(jme);
1180 }
1181 atomic_inc(&jme->rx_empty);
1182
1183 if(rest) {
cdcdc9eb 1184 JME_RX_COMPLETE(netdev, holder);
192570e0
GFT
1185 jme_interrupt_mode(jme);
1186 }
1187
cdcdc9eb
GFT
1188 JME_NAPI_WEIGHT_SET(budget, rest);
1189 return JME_NAPI_WEIGHT_VAL(budget) - rest;
fcf45b4c
GFT
1190}
1191
1192static void
1193jme_rx_empty_tasklet(unsigned long arg)
1194{
1195 struct jme_adapter *jme = (struct jme_adapter*)arg;
1196
79ce639c 1197 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1198 return;
1199
b3821cc5 1200 if(unlikely(!netif_carrier_ok(jme->dev)))
fcf45b4c
GFT
1201 return;
1202
b3821cc5 1203 queue_dbg(jme->dev->name, "RX Queue Full!\n");
29bdd921 1204
fcf45b4c 1205 jme_rx_clean_tasklet(arg);
cdcdc9eb
GFT
1206
1207 while(atomic_read(&jme->rx_empty) > 0) {
1208 atomic_dec(&jme->rx_empty);
1209 ++(NET_STAT(jme).rx_dropped);
1210 jme_restart_rx_engine(jme);
1211 }
1212 atomic_inc(&jme->rx_empty);
4330c2f2
GFT
1213}
1214
b3821cc5
GFT
1215static void
1216jme_wake_queue_if_stopped(struct jme_adapter *jme)
1217{
1218 struct jme_ring *txring = jme->txring;
1219
1220 smp_wmb();
1221 if(unlikely(netif_queue_stopped(jme->dev) &&
1222 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1223
1224 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1225 netif_wake_queue(jme->dev);
1226
1227 }
1228
1229}
1230
3bf61c55
GFT
1231static void
1232jme_tx_clean_tasklet(unsigned long arg)
4330c2f2
GFT
1233{
1234 struct jme_adapter *jme = (struct jme_adapter*)arg;
3bf61c55
GFT
1235 struct jme_ring *txring = &(jme->txring[0]);
1236 volatile struct txdesc *txdesc = txring->desc;
1237 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
b3821cc5 1238 int i, j, cnt = 0, max, err, mask;
3bf61c55 1239
79ce639c 1240 if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
fcf45b4c
GFT
1241 goto out;
1242
79ce639c 1243 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1244 goto out;
1245
b3821cc5 1246 if(unlikely(!netif_carrier_ok(jme->dev)))
fcf45b4c
GFT
1247 goto out;
1248
b3821cc5
GFT
1249 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1250 mask = jme->tx_ring_mask;
3bf61c55
GFT
1251
1252 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1253
cdcdc9eb 1254 for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
3bf61c55
GFT
1255
1256 ctxbi = txbi + i;
1257
b3821cc5
GFT
1258 if(likely(ctxbi->skb &&
1259 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
8c198884
GFT
1260
1261 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
3bf61c55
GFT
1262
1263 tx_dbg(jme->dev->name,
1264 "Tx Tasklet: Clean %d+%d\n",
1265 i, ctxbi->nr_desc);
1266
1267 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
b3821cc5
GFT
1268 ttxbi = txbi + ((i + j) & (mask));
1269 txdesc[(i + j) & (mask)].dw[0] = 0;
3bf61c55 1270
b3821cc5 1271 pci_unmap_page(jme->pdev,
3bf61c55
GFT
1272 ttxbi->mapping,
1273 ttxbi->len,
1274 PCI_DMA_TODEVICE);
1275
3bf61c55
GFT
1276 ttxbi->mapping = 0;
1277 ttxbi->len = 0;
1278 }
1279
1280 dev_kfree_skb(ctxbi->skb);
3bf61c55
GFT
1281
1282 cnt += ctxbi->nr_desc;
1283
8c198884
GFT
1284 if(unlikely(err))
1285 ++(NET_STAT(jme).tx_carrier_errors);
b3821cc5 1286 else {
8c198884 1287 ++(NET_STAT(jme).tx_packets);
b3821cc5
GFT
1288 NET_STAT(jme).tx_bytes += ctxbi->len;
1289 }
1290
1291 ctxbi->skb = NULL;
1292 ctxbi->len = 0;
cdcdc9eb 1293 ctxbi->start_xmit = 0;
3bf61c55
GFT
1294 }
1295 else {
1296 if(!ctxbi->skb)
1297 tx_dbg(jme->dev->name,
1298 "Tx Tasklet:"
b3821cc5 1299 " Stopped due to no skb.\n");
3bf61c55
GFT
1300 else
1301 tx_dbg(jme->dev->name,
1302 "Tx Tasklet:"
b3821cc5 1303 "Stopped due to not done.\n");
3bf61c55
GFT
1304 break;
1305 }
1306
b3821cc5 1307 i = (i + ctxbi->nr_desc) & mask;
3bf61c55
GFT
1308
1309 ctxbi->nr_desc = 0;
d7699f87
GFT
1310 }
1311
3bf61c55
GFT
1312 tx_dbg(jme->dev->name,
1313 "Tx Tasklet: Stop %d Jiffies %lu\n",
1314 i, jiffies);
3bf61c55 1315
cdcdc9eb 1316 atomic_set(&txring->next_to_clean, i);
79ce639c 1317 atomic_add(cnt, &txring->nr_free);
3bf61c55 1318
b3821cc5
GFT
1319 jme_wake_queue_if_stopped(jme);
1320
fcf45b4c
GFT
1321out:
1322 atomic_inc(&jme->tx_cleaning);
d7699f87
GFT
1323}
1324
79ce639c
GFT
1325static void
1326jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
d7699f87 1327{
3bf61c55
GFT
1328 /*
1329 * Disable interrupt
1330 */
1331 jwrite32f(jme, JME_IENC, INTR_ENABLE);
d7699f87 1332
79ce639c 1333 if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
3bf61c55 1334 tasklet_schedule(&jme->linkch_task);
29bdd921 1335 goto out_reenable;
fcf45b4c 1336 }
d7699f87 1337
79ce639c
GFT
1338 if(intrstat & INTR_TMINTR)
1339 tasklet_schedule(&jme->pcc_task);
1340
3bf61c55 1341 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
4330c2f2 1342 tasklet_schedule(&jme->txclean_task);
d7699f87 1343
192570e0
GFT
1344 if(jme->flags & JME_FLAG_POLL) {
1345 if(intrstat & INTR_RX0EMP)
1346 atomic_inc(&jme->rx_empty);
1347
1348 if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
cdcdc9eb 1349 if(likely(JME_RX_SCHEDULE_PREP(jme))) {
192570e0 1350 jme_polling_mode(jme);
cdcdc9eb 1351 JME_RX_SCHEDULE(jme);
192570e0
GFT
1352 }
1353 }
1354 }
1355 else {
cdcdc9eb
GFT
1356 if(intrstat & INTR_RX0EMP) {
1357 atomic_inc(&jme->rx_empty);
192570e0 1358 tasklet_schedule(&jme->rxempty_task);
cdcdc9eb 1359 }
942ed503 1360 else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
192570e0 1361 tasklet_schedule(&jme->rxclean_task);
4330c2f2 1362 }
d7699f87 1363
29bdd921 1364out_reenable:
942ed503
GFT
1365 /*
1366 * Write 1 clear interrupt status
1367 */
1368 jwrite32f(jme, JME_IEVE, intrstat);
1369
3bf61c55 1370 /*
fcf45b4c 1371 * Re-enable interrupt
3bf61c55 1372 */
fcf45b4c 1373 jwrite32f(jme, JME_IENS, INTR_ENABLE);
3bf61c55 1374
79ce639c
GFT
1375
1376}
1377
1378static irqreturn_t
1379jme_intr(int irq, void *dev_id)
1380{
1381 struct net_device *netdev = dev_id;
1382 struct jme_adapter *jme = netdev_priv(netdev);
79ce639c
GFT
1383 __u32 intrstat;
1384
1385 intrstat = jread32(jme, JME_IEVE);
1386
1387 /*
1388 * Check if it's really an interrupt for us
1389 */
29bdd921
GFT
1390 if(unlikely(intrstat == 0))
1391 return IRQ_NONE;
79ce639c
GFT
1392
1393 /*
1394 * Check if the device still exist
1395 */
29bdd921
GFT
1396 if(unlikely(intrstat == ~((typeof(intrstat))0)))
1397 return IRQ_NONE;
79ce639c
GFT
1398
1399 jme_intr_msi(jme, intrstat);
1400
29bdd921 1401 return IRQ_HANDLED;
d7699f87
GFT
1402}
1403
79ce639c
GFT
1404static irqreturn_t
1405jme_msi(int irq, void *dev_id)
1406{
1407 struct net_device *netdev = dev_id;
1408 struct jme_adapter *jme = netdev_priv(netdev);
1409 __u32 intrstat;
1410
1411 pci_dma_sync_single_for_cpu(jme->pdev,
1412 jme->shadow_dma,
1413 sizeof(__u32) * SHADOW_REG_NR,
1414 PCI_DMA_FROMDEVICE);
1415 intrstat = jme->shadow_regs[SHADOW_IEVE];
1416 jme->shadow_regs[SHADOW_IEVE] = 0;
1417
1418 jme_intr_msi(jme, intrstat);
1419
1420 return IRQ_HANDLED;
1421}
1422
942ed503
GFT
1423static irqreturn_t
1424jme_msix_misc(int irq, void *dev_id)
1425{
1426 struct net_device *netdev = dev_id;
1427 struct jme_adapter *jme = netdev_priv(netdev);
1428 __u32 intrstat;
1429
1430 pci_dma_sync_single_for_cpu(jme->pdev,
1431 jme->shadow_dma,
1432 sizeof(__u32) * SHADOW_REG_NR,
1433 PCI_DMA_FROMDEVICE);
1434 intrstat = jme->shadow_regs[SHADOW_IEVE];
1435 jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_MISC;
1436
1437 /*
1438 * Disable interrupt
1439 */
1440 jwrite32f(jme, JME_IENC, INTR_EN_MISC);
1441
1442 if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1443 tasklet_schedule(&jme->linkch_task);
1444 goto out_reenable;
1445 }
1446
1447 if(intrstat & INTR_TMINTR)
1448 tasklet_schedule(&jme->pcc_task);
1449
1450out_reenable:
1451 /*
1452 * Write 1 clear interrupt status
1453 */
1454 jwrite32f(jme, JME_IEVE, INTR_EN_MISC);
1455
1456 /*
1457 * Re-enable interrupt
1458 */
1459 jwrite32f(jme, JME_IENS, INTR_EN_MISC);
1460
1461 return IRQ_HANDLED;
1462}
1463
1464static irqreturn_t
1465jme_msix_tx(int irq, void *dev_id)
1466{
1467 struct net_device *netdev = dev_id;
1468 struct jme_adapter *jme = netdev_priv(netdev);
1469
1470 /*
1471 * Disable interrupt
1472 */
1473 jwrite32f(jme, JME_IENC, INTR_EN_TX);
1474
1475 if(unlikely(atomic_read(&jme->link_changing) != 1))
1476 goto out_reenable;
1477
1478 tasklet_schedule(&jme->txclean_task);
1479
1480out_reenable:
1481 /*
1482 * Write 1 clear interrupt status
1483 */
1484 jwrite32f(jme, JME_IEVE, INTR_EN_TX | INTR_TX0);
1485
1486 /*
1487 * Re-enable interrupt
1488 */
1489 jwrite32f(jme, JME_IENS, INTR_EN_TX);
1490
1491 return IRQ_HANDLED;
1492}
1493
1494static irqreturn_t
1495jme_msix_rx(int irq, void *dev_id)
1496{
1497 struct net_device *netdev = dev_id;
1498 struct jme_adapter *jme = netdev_priv(netdev);
1499 __u32 intrstat;
1500
1501 pci_dma_sync_single_for_cpu(jme->pdev,
1502 jme->shadow_dma,
1503 sizeof(__u32) * SHADOW_REG_NR,
1504 PCI_DMA_FROMDEVICE);
1505 intrstat = jme->shadow_regs[SHADOW_IEVE];
1506 jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_RX0;
1507
1508 /*
1509 * Disable interrupt
1510 */
1511 jwrite32f(jme, JME_IENC, INTR_EN_RX0);
1512
1513 if(unlikely(atomic_read(&jme->link_changing) != 1))
1514 goto out_reenable;
1515
1516 if(jme->flags & JME_FLAG_POLL) {
1517 if(intrstat & INTR_RX0EMP)
1518 atomic_inc(&jme->rx_empty);
1519
1520 if(likely(JME_RX_SCHEDULE_PREP(jme))) {
1521 jme_polling_mode(jme);
1522 JME_RX_SCHEDULE(jme);
1523 }
1524 }
1525 else {
1526 if(intrstat & INTR_RX0EMP) {
1527 atomic_inc(&jme->rx_empty);
1528 tasklet_schedule(&jme->rxempty_task);
1529 }
1530 else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1531 tasklet_schedule(&jme->rxclean_task);
1532 }
1533
1534out_reenable:
1535 /*
1536 * Write 1 clear interrupt status
1537 */
1538 jwrite32f(jme, JME_IEVE, INTR_EN_RX0 | INTR_RX0);
1539
1540 /*
1541 * Re-enable interrupt
1542 */
1543 jwrite32f(jme, JME_IENS, INTR_EN_RX0);
1544
1545 return IRQ_HANDLED;
1546}
79ce639c
GFT
1547
1548static void
1549jme_reset_link(struct jme_adapter *jme)
1550{
1551 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1552}
1553
fcf45b4c
GFT
1554static void
1555jme_restart_an(struct jme_adapter *jme)
1556{
1557 __u32 bmcr;
79ce639c 1558 unsigned long flags;
fcf45b4c 1559
79ce639c 1560 spin_lock_irqsave(&jme->phy_lock, flags);
fcf45b4c
GFT
1561 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1562 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1563 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
79ce639c
GFT
1564 spin_unlock_irqrestore(&jme->phy_lock, flags);
1565}
1566
942ed503
GFT
1567static void
1568jme_setup_msix_info(struct jme_adapter *jme, struct msix_entry *msix_ent)
1569{
1570 int i;
1571
1572 for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1573 jme->msix[i].requested = false;
1574 jme->msix[i].vector = msix_ent[i].vector;
1575 strcpy(jme->msix[i].name, jme->dev->name);
1576 }
1577
1578 jme->msix[0].handler = jme_msix_misc;
1579 jme->msix[1].handler = jme_msix_tx;
1580 jme->msix[2].handler = jme_msix_rx;
1581
1582 strcat(jme->msix[0].name, "-misc");
1583 strcat(jme->msix[1].name, "-tx");
1584 strcat(jme->msix[2].name, "-rx");
1585}
1586
1587static void
1588jme_fill_msix_regs(struct jme_adapter *jme)
1589{
1590 __u32 mask = 1, reg_msix = 0;
1591 int i, vec;
1592
1593 for(i = 0 ; i < 32 ; ++i) {
1594 if(mask & INTR_EN_TX)
1595 vec = 1;
1596 else if(mask & INTR_EN_RX0)
1597 vec = 2;
1598 else
1599 vec = 0;
1600
1601 if(!(i & 7))
1602 reg_msix = 0;
1603 reg_msix |= (vec & 7) << ((i & 7) << 2);
1604 if((i & 7) == 7)
1605 jwrite32(jme,
1606 JME_MSIX_ENT + ((i >> 3) << 2),
1607 reg_msix);
1608
1609 mask <<= 1;
1610 }
1611}
1612
1613static int
1614jme_request_msix_irq(struct jme_adapter *jme)
1615{
1616 int i, rc;
1617 struct jme_msix_info *msix_info;
1618
1619 for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1620 msix_info = jme->msix + i;
1621 rc = request_irq(msix_info->vector,
1622 msix_info->handler,
1623 0,
1624 msix_info->name,
1625 jme->dev);
1626 if(rc)
1627 break;
1628#if 0
1629#ifdef CONFIG_SMP
1630 /*
1631 * Try to set different cpumask for each irq,
1632 * ignoring assign fail since it has no critical
1633 * effect to the working function.
1634 */
1635 if(irq_can_set_affinity(msix_info->vector))
1636 irq_set_affinity(msix_info->vector,
1637 cpumask_of_cpu(i % num_online_cpus()));
1638#endif
1639#endif
1640 msix_info->requested = true;
1641 }
1642
1643 return rc;
1644}
1645
1646static void
1647jme_free_msix(struct jme_adapter *jme)
1648{
1649 int i;
1650 struct jme_msix_info *msix_info;
1651
1652 for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1653 msix_info = jme->msix + i;
1654 if(msix_info->requested)
1655 free_irq(msix_info->vector, jme->dev);
1656 else
1657 break;
1658 msix_info->requested = false;
1659 }
1660 pci_disable_msix(jme->pdev);
1661}
1662
1663static int
1664jme_request_msix(struct jme_adapter *jme)
1665{
1666 int i, rc;
1667 struct msix_entry msix_ent[JME_MSIX_VEC_NR];
1668
1669 for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1670 msix_ent[i].entry = i;
1671 msix_ent[i].vector = 0;
1672 }
1673
1674 rc = pci_enable_msix(jme->pdev, msix_ent, JME_MSIX_VEC_NR);
1675 if (rc)
1676 goto out;
1677
1678 jme_setup_msix_info(jme, msix_ent);
1679 jme_fill_msix_regs(jme);
1680
1681 rc = jme_request_msix_irq(jme);
1682 if(rc)
1683 goto out_free_msix;
1684
1685 return 0;
1686
1687out_free_msix:
1688 jme_free_msix(jme);
1689out:
1690 return rc;
1691}
1692
79ce639c
GFT
1693static int
1694jme_request_irq(struct jme_adapter *jme)
1695{
1696 int rc;
1697 struct net_device *netdev = jme->dev;
1698 irq_handler_t handler = jme_intr;
1699 int irq_flags = IRQF_SHARED;
1700
942ed503
GFT
1701
1702 if(!jme_request_msix(jme)) {
1703 jme->flags |= JME_FLAG_MSIX;
1704 return 0;
1705 }
1706
1707 if(!pci_enable_msi(jme->pdev)) {
79ce639c
GFT
1708 jme->flags |= JME_FLAG_MSI;
1709 handler = jme_msi;
1710 irq_flags = 0;
1711 }
1712
1713 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1714 netdev);
1715 if(rc) {
1716 jeprintk(netdev->name,
b3821cc5 1717 "Unable to request %s interrupt (return: %d)\n",
29bdd921 1718 jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
79ce639c
GFT
1719
1720 if(jme->flags & JME_FLAG_MSI) {
1721 pci_disable_msi(jme->pdev);
1722 jme->flags &= ~JME_FLAG_MSI;
1723 }
1724 }
1725 else {
1726 netdev->irq = jme->pdev->irq;
1727 }
1728
1729 return rc;
1730}
1731
1732static void
1733jme_free_irq(struct jme_adapter *jme)
1734{
942ed503
GFT
1735 if(jme->flags & JME_FLAG_MSIX) {
1736 jme_free_msix(jme);
1737 jme->flags &= ~JME_FLAG_MSIX;
1738 }
1739 else {
1740 free_irq(jme->pdev->irq, jme->dev);
1741 if (jme->flags & JME_FLAG_MSI) {
1742 pci_disable_msi(jme->pdev);
1743 jme->flags &= ~JME_FLAG_MSI;
1744 jme->dev->irq = jme->pdev->irq;
1745 }
1746 }
fcf45b4c
GFT
1747}
1748
3bf61c55
GFT
1749static int
1750jme_open(struct net_device *netdev)
d7699f87
GFT
1751{
1752 struct jme_adapter *jme = netdev_priv(netdev);
cdcdc9eb 1753 int rc, timeout = 10;
fcf45b4c
GFT
1754
1755 while(
1756 --timeout > 0 &&
1757 (
1758 atomic_read(&jme->link_changing) != 1 ||
1759 atomic_read(&jme->rx_cleaning) != 1 ||
1760 atomic_read(&jme->tx_cleaning) != 1
1761 )
1762 )
cdcdc9eb 1763 msleep(1);
fcf45b4c 1764
79ce639c
GFT
1765 if(!timeout) {
1766 rc = -EBUSY;
1767 goto err_out;
1768 }
1769
42b1055e 1770 jme_clear_pm(jme);
fcf45b4c 1771 jme_reset_mac_processor(jme);
cdcdc9eb 1772 JME_NAPI_ENABLE(jme);
d7699f87 1773
79ce639c
GFT
1774 rc = jme_request_irq(jme);
1775 if(rc)
4330c2f2 1776 goto err_out;
79ce639c 1777
4330c2f2 1778 jme_enable_shadow(jme);
d7699f87 1779 jme_start_irq(jme);
42b1055e
GFT
1780
1781 if(jme->flags & JME_FLAG_SSET)
1782 jme_set_settings(netdev, &jme->old_ecmd);
1783 else
1784 jme_reset_phy_processor(jme);
1785
29bdd921 1786 jme_reset_link(jme);
d7699f87
GFT
1787
1788 return 0;
1789
d7699f87
GFT
1790err_out:
1791 netif_stop_queue(netdev);
1792 netif_carrier_off(netdev);
4330c2f2 1793 return rc;
d7699f87
GFT
1794}
1795
42b1055e
GFT
1796static void
1797jme_set_100m_half(struct jme_adapter *jme)
1798{
1799 __u32 bmcr, tmp;
1800
1801 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1802 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1803 BMCR_SPEED1000 | BMCR_FULLDPLX);
1804 tmp |= BMCR_SPEED100;
1805
1806 if (bmcr != tmp)
1807 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1808
cdcdc9eb
GFT
1809 if(jme->fpgaver)
1810 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1811 else
1812 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
42b1055e
GFT
1813}
1814
1815static void
1816jme_phy_off(struct jme_adapter *jme)
1817{
1818 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1819}
1820
1821
3bf61c55
GFT
1822static int
1823jme_close(struct net_device *netdev)
d7699f87
GFT
1824{
1825 struct jme_adapter *jme = netdev_priv(netdev);
1826
1827 netif_stop_queue(netdev);
1828 netif_carrier_off(netdev);
1829
1830 jme_stop_irq(jme);
4330c2f2 1831 jme_disable_shadow(jme);
79ce639c 1832 jme_free_irq(jme);
d7699f87 1833
cdcdc9eb 1834 JME_NAPI_DISABLE(jme);
192570e0 1835
4330c2f2
GFT
1836 tasklet_kill(&jme->linkch_task);
1837 tasklet_kill(&jme->txclean_task);
1838 tasklet_kill(&jme->rxclean_task);
fcf45b4c 1839 tasklet_kill(&jme->rxempty_task);
8c198884
GFT
1840
1841 jme_reset_mac_processor(jme);
d7699f87
GFT
1842 jme_free_rx_resources(jme);
1843 jme_free_tx_resources(jme);
42b1055e 1844 jme->phylink = 0;
b3821cc5
GFT
1845 jme_phy_off(jme);
1846
1847 return 0;
1848}
1849
1850static int
1851jme_alloc_txdesc(struct jme_adapter *jme,
1852 struct sk_buff *skb)
1853{
1854 struct jme_ring *txring = jme->txring;
1855 int idx, nr_alloc, mask = jme->tx_ring_mask;
1856
1857 idx = txring->next_to_use;
1858 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1859
1860 if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1861 return -1;
1862
1863 atomic_sub(nr_alloc, &txring->nr_free);
42b1055e 1864
b3821cc5
GFT
1865 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1866
1867 return idx;
1868}
1869
1870static void
1871jme_fill_tx_map(struct pci_dev *pdev,
1872 volatile struct txdesc *txdesc,
1873 struct jme_buffer_info *txbi,
1874 struct page *page,
1875 __u32 page_offset,
1876 __u32 len,
1877 __u8 hidma)
1878{
1879 dma_addr_t dmaaddr;
1880
1881 dmaaddr = pci_map_page(pdev,
1882 page,
1883 page_offset,
1884 len,
1885 PCI_DMA_TODEVICE);
1886
1887 pci_dma_sync_single_for_device(pdev,
1888 dmaaddr,
1889 len,
1890 PCI_DMA_TODEVICE);
1891
1892 txdesc->dw[0] = 0;
1893 txdesc->dw[1] = 0;
1894 txdesc->desc2.flags = TXFLAG_OWN;
1895 txdesc->desc2.flags |= (hidma)?TXFLAG_64BIT:0;
1896 txdesc->desc2.datalen = cpu_to_le16(len);
1897 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
1898 txdesc->desc2.bufaddrl = cpu_to_le32(
1899 (__u64)dmaaddr & 0xFFFFFFFFUL);
1900
1901 txbi->mapping = dmaaddr;
1902 txbi->len = len;
1903}
1904
1905static void
1906jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1907{
1908 struct jme_ring *txring = jme->txring;
1909 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1910 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1911 __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1912 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1913 int mask = jme->tx_ring_mask;
1914 struct skb_frag_struct *frag;
1915 __u32 len;
1916
1917 for(i = 0 ; i < nr_frags ; ++i) {
1918 frag = &skb_shinfo(skb)->frags[i];
1919 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1920 ctxbi = txbi + ((idx + i + 2) & (mask));
1921
1922 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1923 frag->page_offset, frag->size, hidma);
42b1055e 1924 }
b3821cc5
GFT
1925
1926 len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1927 ctxdesc = txdesc + ((idx + 1) & (mask));
1928 ctxbi = txbi + ((idx + 1) & (mask));
1929 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1930 offset_in_page(skb->data), len, hidma);
1931
1932}
1933
1934static int
1935jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1936{
1937 if(unlikely(skb_shinfo(skb)->gso_size &&
1938 skb_header_cloned(skb) &&
1939 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1940 dev_kfree_skb(skb);
1941 return -1;
1942 }
1943
1944 return 0;
1945}
1946
1947static int
1948jme_tx_tso(struct sk_buff *skb,
1949 volatile __u16 *mss, __u8 *flags)
1950{
1951 if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1952 *flags |= TXFLAG_LSEN;
1953
1954 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1955 struct iphdr *iph = ip_hdr(skb);
1956
1957 iph->check = 0;
1958 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1959 iph->daddr, 0,
1960 IPPROTO_TCP,
1961 0);
1962 }
1963 else {
1964 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1965
1966 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1967 &ip6h->daddr, 0,
1968 IPPROTO_TCP,
1969 0);
1970 }
1971
1972 return 0;
1973 }
1974
1975 return 1;
1976}
1977
1978static void
1979jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1980{
1981 if(skb->ip_summed == CHECKSUM_PARTIAL) {
1982 __u8 ip_proto;
1983
1984 switch (skb->protocol) {
1985 case __constant_htons(ETH_P_IP):
1986 ip_proto = ip_hdr(skb)->protocol;
1987 break;
1988 case __constant_htons(ETH_P_IPV6):
1989 ip_proto = ipv6_hdr(skb)->nexthdr;
1990 break;
1991 default:
1992 ip_proto = 0;
1993 break;
1994 }
1995
1996 switch(ip_proto) {
1997 case IPPROTO_TCP:
1998 *flags |= TXFLAG_TCPCS;
1999 break;
2000 case IPPROTO_UDP:
2001 *flags |= TXFLAG_UDPCS;
2002 break;
2003 default:
2004 jeprintk("jme", "Error upper layer protocol.\n");
2005 break;
2006 }
2007 }
2008}
2009
2010__always_inline static void
2011jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
2012{
2013 if(vlan_tx_tag_present(skb)) {
2014 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
2015 *flags |= TXFLAG_TAGON;
2016 *vlan = vlan_tx_tag_get(skb);
42b1055e 2017 }
b3821cc5
GFT
2018}
2019
2020static int
2021jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2022{
2023 struct jme_ring *txring = jme->txring;
2024 volatile struct txdesc *txdesc;
2025 struct jme_buffer_info *txbi;
2026 __u8 flags;
2027
2028 txdesc = (volatile struct txdesc*)txring->desc + idx;
2029 txbi = txring->bufinf + idx;
2030
2031 txdesc->dw[0] = 0;
2032 txdesc->dw[1] = 0;
2033 txdesc->dw[2] = 0;
2034 txdesc->dw[3] = 0;
2035 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2036 /*
2037 * Set OWN bit at final.
2038 * When kernel transmit faster than NIC.
2039 * And NIC trying to send this descriptor before we tell
2040 * it to start sending this TX queue.
2041 * Other fields are already filled correctly.
2042 */
2043 wmb();
2044 flags = TXFLAG_OWN | TXFLAG_INT;
2045 //Set checksum flags while not tso
2046 if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2047 jme_tx_csum(skb, &flags);
2048 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2049 txdesc->desc1.flags = flags;
2050 /*
2051 * Set tx buffer info after telling NIC to send
2052 * For better tx_clean timing
2053 */
2054 wmb();
2055 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2056 txbi->skb = skb;
2057 txbi->len = skb->len;
cdcdc9eb 2058 if(!(txbi->start_xmit = jiffies))
8d27293f 2059 txbi->start_xmit = (0UL-1);
d7699f87
GFT
2060
2061 return 0;
2062}
2063
b3821cc5
GFT
2064static void
2065jme_stop_queue_if_full(struct jme_adapter *jme)
2066{
2067 struct jme_ring *txring = jme->txring;
cdcdc9eb
GFT
2068 struct jme_buffer_info *txbi = txring->bufinf;
2069
2070 txbi += atomic_read(&txring->next_to_clean);
b3821cc5
GFT
2071
2072 smp_wmb();
2073 if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2074 netif_stop_queue(jme->dev);
2075 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
2076 smp_wmb();
2077 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
2078 netif_wake_queue(jme->dev);
2079 queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
2080 }
2081 }
2082
cdcdc9eb
GFT
2083 if(unlikely( txbi->start_xmit &&
2084 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2085 txbi->skb)) {
2086 netif_stop_queue(jme->dev);
2087 }
b3821cc5
GFT
2088}
2089
3bf61c55
GFT
2090/*
2091 * This function is already protected by netif_tx_lock()
2092 */
2093static int
2094jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
d7699f87
GFT
2095{
2096 struct jme_adapter *jme = netdev_priv(netdev);
b3821cc5 2097 int idx;
d7699f87 2098
b3821cc5
GFT
2099 if(skb_shinfo(skb)->nr_frags) {
2100 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
2101 skb_shinfo(skb)->nr_frags,
2102 skb_headlen(skb),
2103 skb->len,
2104 skb_shinfo(skb)->gso_size,
2105 skb->ip_summed);
2106 }
2107
2108 if(unlikely(jme_expand_header(jme, skb))) {
2109 ++(NET_STAT(jme).tx_dropped);
2110 return NETDEV_TX_OK;
2111 }
2112
2113 idx = jme_alloc_txdesc(jme, skb);
79ce639c 2114
b3821cc5
GFT
2115 if(unlikely(idx<0)) {
2116 netif_stop_queue(netdev);
2117 jeprintk(netdev->name,
2118 "BUG! Tx ring full when queue awake!\n");
d7699f87 2119
b3821cc5
GFT
2120 return NETDEV_TX_BUSY;
2121 }
2122
2123 jme_map_tx_skb(jme, skb, idx);
2124 jme_fill_first_tx_desc(jme, skb, idx);
2125
2126 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
d7699f87 2127
4330c2f2
GFT
2128 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2129 TXCS_SELECT_QUEUE0 |
2130 TXCS_QUEUE0S |
2131 TXCS_ENABLE);
d7699f87
GFT
2132 netdev->trans_start = jiffies;
2133
b3821cc5
GFT
2134 jme_stop_queue_if_full(jme);
2135
4330c2f2 2136 return NETDEV_TX_OK;
d7699f87
GFT
2137}
2138
3bf61c55
GFT
2139static int
2140jme_set_macaddr(struct net_device *netdev, void *p)
d7699f87
GFT
2141{
2142 struct jme_adapter *jme = netdev_priv(netdev);
2143 struct sockaddr *addr = p;
2144 __u32 val;
2145
2146 if(netif_running(netdev))
2147 return -EBUSY;
2148
fcf45b4c 2149 spin_lock(&jme->macaddr_lock);
d7699f87
GFT
2150 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2151
186fc259
GFT
2152 val = (addr->sa_data[3] & 0xff) << 24 |
2153 (addr->sa_data[2] & 0xff) << 16 |
2154 (addr->sa_data[1] & 0xff) << 8 |
2155 (addr->sa_data[0] & 0xff);
4330c2f2 2156 jwrite32(jme, JME_RXUMA_LO, val);
186fc259
GFT
2157 val = (addr->sa_data[5] & 0xff) << 8 |
2158 (addr->sa_data[4] & 0xff);
4330c2f2 2159 jwrite32(jme, JME_RXUMA_HI, val);
fcf45b4c 2160 spin_unlock(&jme->macaddr_lock);
d7699f87
GFT
2161
2162 return 0;
2163}
2164
3bf61c55
GFT
2165static void
2166jme_set_multi(struct net_device *netdev)
d7699f87 2167{
3bf61c55 2168 struct jme_adapter *jme = netdev_priv(netdev);
d7699f87 2169 u32 mc_hash[2] = {};
d7699f87 2170 int i;
8c198884 2171 unsigned long flags;
d7699f87 2172
8c198884
GFT
2173 spin_lock_irqsave(&jme->rxmcs_lock, flags);
2174
2175 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
d7699f87 2176
3bf61c55 2177 if (netdev->flags & IFF_PROMISC) {
8c198884 2178 jme->reg_rxmcs |= RXMCS_ALLFRAME;
3bf61c55
GFT
2179 }
2180 else if (netdev->flags & IFF_ALLMULTI) {
8c198884 2181 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
3bf61c55 2182 }
d7699f87 2183 else if(netdev->flags & IFF_MULTICAST) {
3bf61c55
GFT
2184 struct dev_mc_list *mclist;
2185 int bit_nr;
d7699f87 2186
8c198884 2187 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
3bf61c55
GFT
2188 for (i = 0, mclist = netdev->mc_list;
2189 mclist && i < netdev->mc_count;
2190 ++i, mclist = mclist->next) {
2191
d7699f87
GFT
2192 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2193 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
d7699f87
GFT
2194 }
2195
4330c2f2
GFT
2196 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2197 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
d7699f87
GFT
2198 }
2199
d7699f87 2200 wmb();
8c198884
GFT
2201 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2202
2203 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
d7699f87
GFT
2204}
2205
3bf61c55 2206static int
8c198884 2207jme_change_mtu(struct net_device *netdev, int new_mtu)
d7699f87 2208{
79ce639c
GFT
2209 struct jme_adapter *jme = netdev_priv(netdev);
2210
29bdd921
GFT
2211 if(new_mtu == jme->old_mtu)
2212 return 0;
2213
79ce639c 2214 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
42b1055e 2215 ((new_mtu) < IPV6_MIN_MTU))
79ce639c
GFT
2216 return -EINVAL;
2217
2218 if(new_mtu > 4000) {
2219 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2220 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2221 jme_restart_rx_engine(jme);
2222 }
2223 else {
2224 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2225 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2226 jme_restart_rx_engine(jme);
2227 }
2228
2229 if(new_mtu > 1900) {
b3821cc5
GFT
2230 netdev->features &= ~(NETIF_F_HW_CSUM |
2231 NETIF_F_TSO |
2232 NETIF_F_TSO6);
79ce639c
GFT
2233 }
2234 else {
b3821cc5
GFT
2235 if(jme->flags & JME_FLAG_TXCSUM)
2236 netdev->features |= NETIF_F_HW_CSUM;
2237 if(jme->flags & JME_FLAG_TSO)
2238 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
79ce639c
GFT
2239 }
2240
2241 netdev->mtu = new_mtu;
2242 jme_reset_link(jme);
2243
2244 return 0;
d7699f87
GFT
2245}
2246
8c198884
GFT
2247static void
2248jme_tx_timeout(struct net_device *netdev)
2249{
2250 struct jme_adapter *jme = netdev_priv(netdev);
2251
cdcdc9eb
GFT
2252 jme->phylink = 0;
2253 jme_reset_phy_processor(jme);
2254 if(jme->flags & JME_FLAG_SSET)
2255 jme_set_settings(netdev, &jme->old_ecmd);
2256
8c198884 2257 /*
cdcdc9eb 2258 * Force to Reset the link again
8c198884 2259 */
29bdd921 2260 jme_reset_link(jme);
8c198884
GFT
2261}
2262
42b1055e
GFT
2263static void
2264jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2265{
2266 struct jme_adapter *jme = netdev_priv(netdev);
2267
2268 jme->vlgrp = grp;
2269}
2270
3bf61c55
GFT
2271static void
2272jme_get_drvinfo(struct net_device *netdev,
2273 struct ethtool_drvinfo *info)
d7699f87
GFT
2274{
2275 struct jme_adapter *jme = netdev_priv(netdev);
2276
2277 strcpy(info->driver, DRV_NAME);
2278 strcpy(info->version, DRV_VERSION);
2279 strcpy(info->bus_info, pci_name(jme->pdev));
2280}
2281
8c198884
GFT
2282static int
2283jme_get_regs_len(struct net_device *netdev)
2284{
186fc259 2285 return JME_REG_LEN;
8c198884
GFT
2286}
2287
2288static void
2289mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
2290{
2291 int i;
2292
2293 for(i = 0 ; i < len ; i += 4)
79ce639c 2294 p[i >> 2] = jread32(jme, reg + i);
186fc259 2295}
8c198884 2296
186fc259
GFT
2297static void
2298mdio_memcpy(struct jme_adapter *jme, __u32 *p, int reg_nr)
2299{
2300 int i;
2301 __u16 *p16 = (__u16*)p;
2302
2303 for(i = 0 ; i < reg_nr ; ++i)
2304 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
8c198884
GFT
2305}
2306
2307static void
2308jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2309{
2310 struct jme_adapter *jme = netdev_priv(netdev);
2311 __u32 *p32 = (__u32*)p;
2312
186fc259 2313 memset(p, 0xFF, JME_REG_LEN);
8c198884
GFT
2314
2315 regs->version = 1;
2316 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2317
2318 p32 += 0x100 >> 2;
2319 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2320
2321 p32 += 0x100 >> 2;
2322 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2323
2324 p32 += 0x100 >> 2;
2325 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2326
186fc259
GFT
2327 p32 += 0x100 >> 2;
2328 mdio_memcpy(jme, p32, JME_PHY_REG_NR);
8c198884
GFT
2329}
2330
2331static int
2332jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2333{
2334 struct jme_adapter *jme = netdev_priv(netdev);
2335
8c198884
GFT
2336 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2337 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2338
cdcdc9eb
GFT
2339 if(jme->flags & JME_FLAG_POLL) {
2340 ecmd->use_adaptive_rx_coalesce = false;
2341 ecmd->rx_coalesce_usecs = 0;
2342 ecmd->rx_max_coalesced_frames = 0;
2343 return 0;
2344 }
2345
2346 ecmd->use_adaptive_rx_coalesce = true;
2347
8c198884
GFT
2348 switch(jme->dpi.cur) {
2349 case PCC_P1:
2350 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2351 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2352 break;
2353 case PCC_P2:
2354 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2355 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2356 break;
2357 case PCC_P3:
2358 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2359 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2360 break;
2361 default:
2362 break;
2363 }
2364
2365 return 0;
2366}
2367
192570e0
GFT
2368static int
2369jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2370{
2371 struct jme_adapter *jme = netdev_priv(netdev);
2372 struct dynpcc_info *dpi = &(jme->dpi);
2373
cdcdc9eb
GFT
2374 if(netif_running(netdev))
2375 return -EBUSY;
2376
192570e0
GFT
2377 if(ecmd->use_adaptive_rx_coalesce
2378 && (jme->flags & JME_FLAG_POLL)) {
2379 jme->flags &= ~JME_FLAG_POLL;
cdcdc9eb
GFT
2380 jme->jme_rx = netif_rx;
2381 jme->jme_vlan_rx = vlan_hwaccel_rx;
192570e0
GFT
2382 dpi->cur = PCC_P1;
2383 dpi->attempt = PCC_P1;
2384 dpi->cnt = 0;
2385 jme_set_rx_pcc(jme, PCC_P1);
2386 jme_interrupt_mode(jme);
2387 }
2388 else if(!(ecmd->use_adaptive_rx_coalesce)
2389 && !(jme->flags & JME_FLAG_POLL)) {
2390 jme->flags |= JME_FLAG_POLL;
cdcdc9eb
GFT
2391 jme->jme_rx = netif_receive_skb;
2392 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
192570e0
GFT
2393 jme_interrupt_mode(jme);
2394 }
2395
2396 return 0;
2397}
2398
8c198884
GFT
2399static void
2400jme_get_pauseparam(struct net_device *netdev,
2401 struct ethtool_pauseparam *ecmd)
2402{
2403 struct jme_adapter *jme = netdev_priv(netdev);
2404 unsigned long flags;
2405 __u32 val;
2406
2407 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2408 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2409
2410 spin_lock_irqsave(&jme->phy_lock, flags);
2411 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2412 spin_unlock_irqrestore(&jme->phy_lock, flags);
b3821cc5
GFT
2413
2414 ecmd->autoneg =
2415 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
8c198884
GFT
2416}
2417
2418static int
2419jme_set_pauseparam(struct net_device *netdev,
2420 struct ethtool_pauseparam *ecmd)
2421{
2422 struct jme_adapter *jme = netdev_priv(netdev);
2423 unsigned long flags;
2424 __u32 val;
2425
2426 if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
2427 (ecmd->tx_pause != 0)) {
2428
2429 if(ecmd->tx_pause)
2430 jme->reg_txpfc |= TXPFC_PF_EN;
2431 else
2432 jme->reg_txpfc &= ~TXPFC_PF_EN;
2433
2434 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2435 }
2436
2437 spin_lock_irqsave(&jme->rxmcs_lock, flags);
2438 if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2439 (ecmd->rx_pause != 0)) {
2440
2441 if(ecmd->rx_pause)
2442 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2443 else
2444 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2445
2446 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2447 }
2448 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2449
2450 spin_lock_irqsave(&jme->phy_lock, flags);
2451 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
b3821cc5 2452 if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
8c198884
GFT
2453 (ecmd->autoneg != 0)) {
2454
2455 if(ecmd->autoneg)
2456 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2457 else
2458 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2459
b3821cc5
GFT
2460 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2461 MII_ADVERTISE, val);
8c198884
GFT
2462 }
2463 spin_unlock_irqrestore(&jme->phy_lock, flags);
2464
2465 return 0;
2466}
2467
29bdd921
GFT
2468static void
2469jme_get_wol(struct net_device *netdev,
2470 struct ethtool_wolinfo *wol)
2471{
2472 struct jme_adapter *jme = netdev_priv(netdev);
2473
2474 wol->supported = WAKE_MAGIC | WAKE_PHY;
2475
2476 wol->wolopts = 0;
2477
2478 if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2479 wol->wolopts |= WAKE_PHY;
2480
2481 if(jme->reg_pmcs & PMCS_MFEN)
2482 wol->wolopts |= WAKE_MAGIC;
2483
2484}
2485
2486static int
2487jme_set_wol(struct net_device *netdev,
2488 struct ethtool_wolinfo *wol)
2489{
2490 struct jme_adapter *jme = netdev_priv(netdev);
2491
2492 if(wol->wolopts & (WAKE_MAGICSECURE |
2493 WAKE_UCAST |
2494 WAKE_MCAST |
2495 WAKE_BCAST |
2496 WAKE_ARP))
2497 return -EOPNOTSUPP;
2498
2499 jme->reg_pmcs = 0;
2500
2501 if(wol->wolopts & WAKE_PHY)
2502 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2503
2504 if(wol->wolopts & WAKE_MAGIC)
2505 jme->reg_pmcs |= PMCS_MFEN;
2506
42b1055e 2507
29bdd921
GFT
2508 return 0;
2509}
b3821cc5 2510
3bf61c55
GFT
2511static int
2512jme_get_settings(struct net_device *netdev,
2513 struct ethtool_cmd *ecmd)
d7699f87
GFT
2514{
2515 struct jme_adapter *jme = netdev_priv(netdev);
2516 int rc;
79ce639c 2517 unsigned long flags;
8c198884 2518
79ce639c 2519 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 2520 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
79ce639c 2521 spin_unlock_irqrestore(&jme->phy_lock, flags);
d7699f87
GFT
2522 return rc;
2523}
2524
3bf61c55
GFT
2525static int
2526jme_set_settings(struct net_device *netdev,
2527 struct ethtool_cmd *ecmd)
d7699f87
GFT
2528{
2529 struct jme_adapter *jme = netdev_priv(netdev);
79ce639c 2530 int rc, fdc=0;
fcf45b4c
GFT
2531 unsigned long flags;
2532
8c198884
GFT
2533 if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2534 return -EINVAL;
2535
79ce639c
GFT
2536 if(jme->mii_if.force_media &&
2537 ecmd->autoneg != AUTONEG_ENABLE &&
2538 (jme->mii_if.full_duplex != ecmd->duplex))
2539 fdc = 1;
2540
fcf45b4c 2541 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 2542 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
fcf45b4c
GFT
2543 spin_unlock_irqrestore(&jme->phy_lock, flags);
2544
79ce639c
GFT
2545 if(!rc && fdc)
2546 jme_reset_link(jme);
2547
29bdd921
GFT
2548 if(!rc) {
2549 jme->flags |= JME_FLAG_SSET;
2550 jme->old_ecmd = *ecmd;
2551 }
2552
d7699f87
GFT
2553 return rc;
2554}
2555
3bf61c55
GFT
2556static __u32
2557jme_get_link(struct net_device *netdev)
2558{
d7699f87
GFT
2559 struct jme_adapter *jme = netdev_priv(netdev);
2560 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2561}
2562
8c198884
GFT
2563static u32
2564jme_get_rx_csum(struct net_device *netdev)
2565{
2566 struct jme_adapter *jme = netdev_priv(netdev);
2567
2568 return jme->reg_rxmcs & RXMCS_CHECKSUM;
2569}
2570
2571static int
2572jme_set_rx_csum(struct net_device *netdev, u32 on)
2573{
2574 struct jme_adapter *jme = netdev_priv(netdev);
2575 unsigned long flags;
b3821cc5 2576
8c198884
GFT
2577 spin_lock_irqsave(&jme->rxmcs_lock, flags);
2578 if(on)
2579 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2580 else
2581 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2582 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2583 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2584
2585 return 0;
2586}
2587
2588static int
2589jme_set_tx_csum(struct net_device *netdev, u32 on)
2590{
b3821cc5
GFT
2591 struct jme_adapter *jme = netdev_priv(netdev);
2592
2593 if(on) {
2594 jme->flags |= JME_FLAG_TXCSUM;
2595 if(netdev->mtu <= 1900)
2596 netdev->features |= NETIF_F_HW_CSUM;
2597 }
2598 else {
2599 jme->flags &= ~JME_FLAG_TXCSUM;
8c198884 2600 netdev->features &= ~NETIF_F_HW_CSUM;
b3821cc5 2601 }
8c198884
GFT
2602
2603 return 0;
2604}
2605
b3821cc5
GFT
2606static int
2607jme_set_tso(struct net_device *netdev, u32 on)
2608{
2609 struct jme_adapter *jme = netdev_priv(netdev);
2610
2611 if (on) {
2612 jme->flags |= JME_FLAG_TSO;
2613 if(netdev->mtu <= 1900)
2614 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2615 }
2616 else {
2617 jme->flags &= ~JME_FLAG_TSO;
2618 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2619 }
2620
2621 return 0;
2622}
2623
8c198884
GFT
2624static int
2625jme_nway_reset(struct net_device *netdev)
2626{
2627 struct jme_adapter *jme = netdev_priv(netdev);
2628 jme_restart_an(jme);
2629 return 0;
2630}
2631
186fc259
GFT
2632static __u8
2633jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2634{
2635 __u32 val;
2636 int to;
2637
2638 val = jread32(jme, JME_SMBCSR);
2639 to = JME_SMB_BUSY_TIMEOUT;
2640 while((val & SMBCSR_BUSY) && --to) {
2641 msleep(1);
2642 val = jread32(jme, JME_SMBCSR);
2643 }
2644 if(!to) {
2645 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2646 return 0xFF;
2647 }
2648
2649 jwrite32(jme, JME_SMBINTF,
2650 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2651 SMBINTF_HWRWN_READ |
2652 SMBINTF_HWCMD);
2653
2654 val = jread32(jme, JME_SMBINTF);
2655 to = JME_SMB_BUSY_TIMEOUT;
2656 while((val & SMBINTF_HWCMD) && --to) {
2657 msleep(1);
2658 val = jread32(jme, JME_SMBINTF);
2659 }
2660 if(!to) {
2661 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2662 return 0xFF;
2663 }
2664
2665 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2666}
2667
2668static void
2669jme_smb_write(struct jme_adapter *jme, unsigned int addr, __u8 data)
2670{
2671 __u32 val;
2672 int to;
2673
2674 val = jread32(jme, JME_SMBCSR);
2675 to = JME_SMB_BUSY_TIMEOUT;
2676 while((val & SMBCSR_BUSY) && --to) {
2677 msleep(1);
2678 val = jread32(jme, JME_SMBCSR);
2679 }
2680 if(!to) {
2681 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2682 return;
2683 }
2684
2685 jwrite32(jme, JME_SMBINTF,
2686 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2687 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2688 SMBINTF_HWRWN_WRITE |
2689 SMBINTF_HWCMD);
2690
2691 val = jread32(jme, JME_SMBINTF);
2692 to = JME_SMB_BUSY_TIMEOUT;
2693 while((val & SMBINTF_HWCMD) && --to) {
2694 msleep(1);
2695 val = jread32(jme, JME_SMBINTF);
2696 }
2697 if(!to) {
2698 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2699 return;
2700 }
2701
2702 mdelay(2);
2703}
2704
2705static int
2706jme_get_eeprom_len(struct net_device *netdev)
2707{
2708 struct jme_adapter *jme = netdev_priv(netdev);
2709 __u32 val;
2710 val = jread32(jme, JME_SMBCSR);
2711 return (val & SMBCSR_EEPROMD)?JME_SMB_LEN:0;
2712}
2713
2714static int
2715jme_get_eeprom(struct net_device *netdev,
2716 struct ethtool_eeprom *eeprom, u8 *data)
2717{
2718 struct jme_adapter *jme = netdev_priv(netdev);
942ed503 2719 int i, offset = eeprom->offset, len = eeprom->len, idx;
186fc259
GFT
2720
2721 /*
8d27293f 2722 * ethtool will check the boundary for us
186fc259 2723 */
942ed503 2724 memset(data, 0xFF, len);
186fc259 2725 eeprom->magic = JME_EEPROM_MAGIC;
942ed503
GFT
2726 for(i = 0 ; i < len ; ++i) {
2727 idx = i + offset;
2728 data[i] = jme_smb_read(jme, idx);
2729 if(data[i] == 0xFF)
2730 break;
2731 if((idx > 1) && !((idx - 2) % 3) && (data[i] & 0x80))
2732 len = (len > i + 3)?i + 3:len;
2733 }
186fc259
GFT
2734
2735 return 0;
2736}
2737
2738static int
2739jme_set_eeprom(struct net_device *netdev,
2740 struct ethtool_eeprom *eeprom, u8 *data)
2741{
2742 struct jme_adapter *jme = netdev_priv(netdev);
2743 int i, offset = eeprom->offset, len = eeprom->len;
2744
2745 if (eeprom->magic != JME_EEPROM_MAGIC)
2746 return -EINVAL;
2747
2748 /*
8d27293f 2749 * ethtool will check the boundary for us
186fc259
GFT
2750 */
2751 for(i = 0 ; i < len ; ++i)
2752 jme_smb_write(jme, i + offset, data[i]);
2753
2754 return 0;
2755}
2756
d7699f87
GFT
2757static const struct ethtool_ops jme_ethtool_ops = {
2758 .get_drvinfo = jme_get_drvinfo,
8c198884
GFT
2759 .get_regs_len = jme_get_regs_len,
2760 .get_regs = jme_get_regs,
2761 .get_coalesce = jme_get_coalesce,
192570e0 2762 .set_coalesce = jme_set_coalesce,
8c198884
GFT
2763 .get_pauseparam = jme_get_pauseparam,
2764 .set_pauseparam = jme_set_pauseparam,
29bdd921
GFT
2765 .get_wol = jme_get_wol,
2766 .set_wol = jme_set_wol,
d7699f87
GFT
2767 .get_settings = jme_get_settings,
2768 .set_settings = jme_set_settings,
2769 .get_link = jme_get_link,
8c198884
GFT
2770 .get_rx_csum = jme_get_rx_csum,
2771 .set_rx_csum = jme_set_rx_csum,
2772 .set_tx_csum = jme_set_tx_csum,
b3821cc5
GFT
2773 .set_tso = jme_set_tso,
2774 .set_sg = ethtool_op_set_sg,
8c198884 2775 .nway_reset = jme_nway_reset,
186fc259
GFT
2776 .get_eeprom_len = jme_get_eeprom_len,
2777 .get_eeprom = jme_get_eeprom,
2778 .set_eeprom = jme_set_eeprom,
d7699f87
GFT
2779};
2780
3bf61c55
GFT
2781static int
2782jme_pci_dma64(struct pci_dev *pdev)
d7699f87 2783{
3bf61c55 2784 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
42b1055e
GFT
2785 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2786 dprintk("jme", "64Bit DMA Selected.\n");
3bf61c55 2787 return 1;
42b1055e 2788 }
3bf61c55 2789
8c198884 2790 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
42b1055e
GFT
2791 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2792 dprintk("jme", "40Bit DMA Selected.\n");
8c198884 2793 return 1;
42b1055e 2794 }
8c198884 2795
3bf61c55 2796 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
42b1055e
GFT
2797 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2798 dprintk("jme", "32Bit DMA Selected.\n");
3bf61c55 2799 return 0;
42b1055e 2800 }
3bf61c55
GFT
2801
2802 return -1;
2803}
2804
42b1055e 2805__always_inline static void
cdcdc9eb
GFT
2806jme_phy_init(struct jme_adapter *jme)
2807{
2808 __u16 reg26;
2809
2810 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2811 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2812}
2813
2814__always_inline static void
2815jme_set_gmii(struct jme_adapter *jme)
2816{
2817 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
2818}
2819
2820static void
2821jme_check_hw_ver(struct jme_adapter *jme)
42b1055e 2822{
cdcdc9eb
GFT
2823 __u32 chipmode;
2824
2825 chipmode = jread32(jme, JME_CHIPMODE);
2826
2827 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2828 jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
42b1055e
GFT
2829}
2830
3bf61c55
GFT
2831static int __devinit
2832jme_init_one(struct pci_dev *pdev,
2833 const struct pci_device_id *ent)
2834{
cdcdc9eb 2835 int rc = 0, using_dac, i;
d7699f87
GFT
2836 struct net_device *netdev;
2837 struct jme_adapter *jme;
cdcdc9eb 2838 __u16 bmcr, bmsr;
d7699f87
GFT
2839
2840 /*
2841 * set up PCI device basics
2842 */
4330c2f2
GFT
2843 rc = pci_enable_device(pdev);
2844 if(rc) {
2845 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2846 goto err_out;
2847 }
d7699f87 2848
3bf61c55
GFT
2849 using_dac = jme_pci_dma64(pdev);
2850 if(using_dac < 0) {
2851 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2852 rc = -EIO;
2853 goto err_out_disable_pdev;
2854 }
2855
4330c2f2
GFT
2856 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2857 printk(KERN_ERR PFX "No PCI resource region found.\n");
2858 rc = -ENOMEM;
2859 goto err_out_disable_pdev;
2860 }
d7699f87 2861
4330c2f2
GFT
2862 rc = pci_request_regions(pdev, DRV_NAME);
2863 if(rc) {
2864 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2865 goto err_out_disable_pdev;
2866 }
d7699f87
GFT
2867
2868 pci_set_master(pdev);
2869
2870 /*
2871 * alloc and init net device
2872 */
3bf61c55 2873 netdev = alloc_etherdev(sizeof(*jme));
d7699f87 2874 if(!netdev) {
42b1055e 2875 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
4330c2f2
GFT
2876 rc = -ENOMEM;
2877 goto err_out_release_regions;
d7699f87
GFT
2878 }
2879 netdev->open = jme_open;
2880 netdev->stop = jme_close;
2881 netdev->hard_start_xmit = jme_start_xmit;
d7699f87
GFT
2882 netdev->set_mac_address = jme_set_macaddr;
2883 netdev->set_multicast_list = jme_set_multi;
2884 netdev->change_mtu = jme_change_mtu;
2885 netdev->ethtool_ops = &jme_ethtool_ops;
8c198884
GFT
2886 netdev->tx_timeout = jme_tx_timeout;
2887 netdev->watchdog_timeo = TX_TIMEOUT;
42b1055e 2888 netdev->vlan_rx_register = jme_vlan_rx_register;
3bf61c55 2889 NETDEV_GET_STATS(netdev, &jme_get_stats);
42b1055e 2890 netdev->features = NETIF_F_HW_CSUM |
b3821cc5
GFT
2891 NETIF_F_SG |
2892 NETIF_F_TSO |
2893 NETIF_F_TSO6 |
42b1055e
GFT
2894 NETIF_F_HW_VLAN_TX |
2895 NETIF_F_HW_VLAN_RX;
3bf61c55 2896 if(using_dac)
8c198884 2897 netdev->features |= NETIF_F_HIGHDMA;
d7699f87
GFT
2898
2899 SET_NETDEV_DEV(netdev, &pdev->dev);
2900 pci_set_drvdata(pdev, netdev);
2901
2902 /*
2903 * init adapter info
2904 */
2905 jme = netdev_priv(netdev);
2906 jme->pdev = pdev;
2907 jme->dev = netdev;
cdcdc9eb
GFT
2908 jme->jme_rx = netif_rx;
2909 jme->jme_vlan_rx = vlan_hwaccel_rx;
29bdd921 2910 jme->old_mtu = netdev->mtu = 1500;
fcf45b4c 2911 jme->phylink = 0;
b3821cc5
GFT
2912 jme->tx_ring_size = 1 << 10;
2913 jme->tx_ring_mask = jme->tx_ring_size - 1;
2914 jme->tx_wake_threshold = 1 << 9;
2915 jme->rx_ring_size = 1 << 9;
2916 jme->rx_ring_mask = jme->rx_ring_size - 1;
d7699f87
GFT
2917 jme->regs = ioremap(pci_resource_start(pdev, 0),
2918 pci_resource_len(pdev, 0));
4330c2f2 2919 if (!(jme->regs)) {
42b1055e 2920 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
d7699f87
GFT
2921 rc = -ENOMEM;
2922 goto err_out_free_netdev;
2923 }
4330c2f2
GFT
2924 jme->shadow_regs = pci_alloc_consistent(pdev,
2925 sizeof(__u32) * SHADOW_REG_NR,
2926 &(jme->shadow_dma));
2927 if (!(jme->shadow_regs)) {
42b1055e 2928 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
4330c2f2
GFT
2929 rc = -ENOMEM;
2930 goto err_out_unmap;
2931 }
2932
cdcdc9eb 2933 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
192570e0 2934
d7699f87 2935 spin_lock_init(&jme->phy_lock);
fcf45b4c 2936 spin_lock_init(&jme->macaddr_lock);
8c198884 2937 spin_lock_init(&jme->rxmcs_lock);
fcf45b4c 2938
fcf45b4c
GFT
2939 atomic_set(&jme->link_changing, 1);
2940 atomic_set(&jme->rx_cleaning, 1);
2941 atomic_set(&jme->tx_cleaning, 1);
192570e0 2942 atomic_set(&jme->rx_empty, 1);
fcf45b4c 2943
79ce639c
GFT
2944 tasklet_init(&jme->pcc_task,
2945 &jme_pcc_tasklet,
2946 (unsigned long) jme);
4330c2f2
GFT
2947 tasklet_init(&jme->linkch_task,
2948 &jme_link_change_tasklet,
2949 (unsigned long) jme);
2950 tasklet_init(&jme->txclean_task,
2951 &jme_tx_clean_tasklet,
2952 (unsigned long) jme);
2953 tasklet_init(&jme->rxclean_task,
2954 &jme_rx_clean_tasklet,
2955 (unsigned long) jme);
fcf45b4c
GFT
2956 tasklet_init(&jme->rxempty_task,
2957 &jme_rx_empty_tasklet,
2958 (unsigned long) jme);
8c198884
GFT
2959 jme->dpi.cur = PCC_P1;
2960
8d27293f
GFT
2961 if(pdev->device == JME_GE_DEVICE)
2962 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2963 else
2964 jme->reg_ghc = GHC_DPX | GHC_SPEED_100M;
79ce639c 2965 jme->reg_rxcs = RXCS_DEFAULT;
8c198884
GFT
2966 jme->reg_rxmcs = RXMCS_DEFAULT;
2967 jme->reg_txpfc = 0;
b3821cc5 2968 jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
cdcdc9eb 2969 jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
192570e0 2970
fcf45b4c
GFT
2971 /*
2972 * Get Max Read Req Size from PCI Config Space
2973 */
2974 pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2975 switch(jme->mrrs) {
2976 case MRRS_128B:
2977 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2978 break;
2979 case MRRS_256B:
2980 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2981 break;
2982 default:
2983 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2984 break;
2985 };
2986
2987
d7699f87 2988 /*
cdcdc9eb 2989 * Must check before reset_mac_processor
d7699f87 2990 */
cdcdc9eb
GFT
2991 jme_check_hw_ver(jme);
2992 jme->mii_if.dev = netdev;
2993 if(jme->fpgaver) {
2994 jme->mii_if.phy_id = 0;
2995 for(i = 1 ; i < 32 ; ++i) {
2996 bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2997 bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2998 if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2999 jme->mii_if.phy_id = i;
3000 break;
3001 }
3002 }
3003
3004 if(!jme->mii_if.phy_id) {
3005 rc = -EIO;
3006 printk(KERN_ERR PFX "Can not find phy_id.\n");
3007 goto err_out_free_shadow;
3008 }
3009
3010 jme->reg_ghc |= GHC_LINK_POLL;
3011 }
3012 else {
3013 jme->mii_if.phy_id = 1;
3014 }
8d27293f
GFT
3015 if(pdev->device == JME_GE_DEVICE)
3016 jme->mii_if.supports_gmii = true;
3017 else
3018 jme->mii_if.supports_gmii = false;
cdcdc9eb
GFT
3019 jme->mii_if.mdio_read = jme_mdio_read;
3020 jme->mii_if.mdio_write = jme_mdio_write;
3021
d7699f87 3022 jme_clear_pm(jme);
cdcdc9eb
GFT
3023 if(jme->fpgaver)
3024 jme_set_gmii(jme);
3025 else
3026 jme_phy_init(jme);
42b1055e 3027 jme_phy_off(jme);
cdcdc9eb
GFT
3028
3029 /*
3030 * Reset MAC processor and reload EEPROM for MAC Address
3031 */
d7699f87 3032 jme_reset_mac_processor(jme);
4330c2f2
GFT
3033 rc = jme_reload_eeprom(jme);
3034 if(rc) {
3bf61c55 3035 printk(KERN_ERR PFX
b3821cc5 3036 "Reload eeprom for reading MAC Address error.\n");
4330c2f2
GFT
3037 goto err_out_free_shadow;
3038 }
d7699f87
GFT
3039 jme_load_macaddr(netdev);
3040
3041
3042 /*
3043 * Tell stack that we are not ready to work until open()
3044 */
3045 netif_carrier_off(netdev);
3046 netif_stop_queue(netdev);
3047
3048 /*
3049 * Register netdev
3050 */
4330c2f2
GFT
3051 rc = register_netdev(netdev);
3052 if(rc) {
3053 printk(KERN_ERR PFX "Cannot register net device.\n");
3054 goto err_out_free_shadow;
3055 }
d7699f87 3056
4330c2f2 3057 jprintk(netdev->name,
cdcdc9eb
GFT
3058 "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
3059 (jme->fpgaver != 0)?" (FPGA)":"",
3060 (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
4330c2f2
GFT
3061 netdev->dev_addr[0],
3062 netdev->dev_addr[1],
3063 netdev->dev_addr[2],
3064 netdev->dev_addr[3],
3065 netdev->dev_addr[4],
8c198884 3066 netdev->dev_addr[5]);
d7699f87
GFT
3067
3068 return 0;
3069
4330c2f2
GFT
3070err_out_free_shadow:
3071 pci_free_consistent(pdev,
3072 sizeof(__u32) * SHADOW_REG_NR,
3073 jme->shadow_regs,
3074 jme->shadow_dma);
d7699f87
GFT
3075err_out_unmap:
3076 iounmap(jme->regs);
3077err_out_free_netdev:
3078 pci_set_drvdata(pdev, NULL);
3079 free_netdev(netdev);
4330c2f2
GFT
3080err_out_release_regions:
3081 pci_release_regions(pdev);
d7699f87
GFT
3082err_out_disable_pdev:
3083 pci_disable_device(pdev);
d7699f87 3084err_out:
4330c2f2 3085 return rc;
d7699f87
GFT
3086}
3087
3bf61c55
GFT
3088static void __devexit
3089jme_remove_one(struct pci_dev *pdev)
3090{
d7699f87
GFT
3091 struct net_device *netdev = pci_get_drvdata(pdev);
3092 struct jme_adapter *jme = netdev_priv(netdev);
3093
3094 unregister_netdev(netdev);
4330c2f2
GFT
3095 pci_free_consistent(pdev,
3096 sizeof(__u32) * SHADOW_REG_NR,
3097 jme->shadow_regs,
3098 jme->shadow_dma);
d7699f87
GFT
3099 iounmap(jme->regs);
3100 pci_set_drvdata(pdev, NULL);
3101 free_netdev(netdev);
3102 pci_release_regions(pdev);
3103 pci_disable_device(pdev);
3104
3105}
3106
29bdd921
GFT
3107static int
3108jme_suspend(struct pci_dev *pdev, pm_message_t state)
3109{
3110 struct net_device *netdev = pci_get_drvdata(pdev);
3111 struct jme_adapter *jme = netdev_priv(netdev);
3112 int timeout = 100;
3113
3114 atomic_dec(&jme->link_changing);
3115
3116 netif_device_detach(netdev);
3117 netif_stop_queue(netdev);
3118 jme_stop_irq(jme);
3119 jme_free_irq(jme);
3120
3121 while(--timeout > 0 &&
3122 (
3123 atomic_read(&jme->rx_cleaning) != 1 ||
3124 atomic_read(&jme->tx_cleaning) != 1
3125 )) {
3126 mdelay(1);
3127 }
3128 if(!timeout) {
3129 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
3130 return -EBUSY;
3131 }
3132 jme_disable_shadow(jme);
3133
3134 if(netif_carrier_ok(netdev)) {
3135 jme_stop_pcc_timer(jme);
3136 jme_reset_mac_processor(jme);
3137 jme_free_rx_resources(jme);
3138 jme_free_tx_resources(jme);
3139 netif_carrier_off(netdev);
3140 jme->phylink = 0;
192570e0 3141
cdcdc9eb 3142 if(jme->flags & JME_FLAG_POLL)
192570e0 3143 jme_polling_mode(jme);
29bdd921
GFT
3144 }
3145
29bdd921
GFT
3146
3147 pci_save_state(pdev);
3148 if(jme->reg_pmcs) {
42b1055e 3149 jme_set_100m_half(jme);
29bdd921 3150 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
42b1055e
GFT
3151 pci_enable_wake(pdev, PCI_D3hot, true);
3152 pci_enable_wake(pdev, PCI_D3cold, true);
29bdd921
GFT
3153 }
3154 else {
42b1055e
GFT
3155 jme_phy_off(jme);
3156 pci_enable_wake(pdev, PCI_D3hot, false);
3157 pci_enable_wake(pdev, PCI_D3cold, false);
29bdd921
GFT
3158 }
3159 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3160
3161 return 0;
3162}
3163
3164static int
3165jme_resume(struct pci_dev *pdev)
3166{
3167 struct net_device *netdev = pci_get_drvdata(pdev);
3168 struct jme_adapter *jme = netdev_priv(netdev);
3169
3170 jme_clear_pm(jme);
3171 pci_restore_state(pdev);
3172
3173 if(jme->flags & JME_FLAG_SSET)
3174 jme_set_settings(netdev, &jme->old_ecmd);
3175 else
3176 jme_reset_phy_processor(jme);
3177
3178 jme_reset_mac_processor(jme);
3179 jme_enable_shadow(jme);
3180 jme_request_irq(jme);
3181 jme_start_irq(jme);
3182 netif_device_attach(netdev);
3183
3184 atomic_inc(&jme->link_changing);
3185
3186 jme_reset_link(jme);
3187
3188 return 0;
3189}
3190
d7699f87 3191static struct pci_device_id jme_pci_tbl[] = {
8d27293f
GFT
3192 { PCI_VDEVICE(JMICRON, JME_GE_DEVICE) },
3193 { PCI_VDEVICE(JMICRON, JME_FE_DEVICE) },
d7699f87
GFT
3194 { }
3195};
3196
3197static struct pci_driver jme_driver = {
3198 .name = DRV_NAME,
3199 .id_table = jme_pci_tbl,
3200 .probe = jme_init_one,
3201 .remove = __devexit_p(jme_remove_one),
d7699f87
GFT
3202#ifdef CONFIG_PM
3203 .suspend = jme_suspend,
3204 .resume = jme_resume,
3205#endif /* CONFIG_PM */
d7699f87
GFT
3206};
3207
3bf61c55
GFT
3208static int __init
3209jme_init_module(void)
d7699f87 3210{
4330c2f2
GFT
3211 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
3212 "driver version %s\n", DRV_VERSION);
d7699f87
GFT
3213 return pci_register_driver(&jme_driver);
3214}
3215
3bf61c55
GFT
3216static void __exit
3217jme_cleanup_module(void)
d7699f87
GFT
3218{
3219 pci_unregister_driver(&jme_driver);
3220}
3221
3222module_init(jme_init_module);
3223module_exit(jme_cleanup_module);
3224
3bf61c55 3225MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
d7699f87
GFT
3226MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3227MODULE_LICENSE("GPL");
3228MODULE_VERSION(DRV_VERSION);
3229MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3230