]> bbs.cooldavid.org Git - jme.git/blob - jme.c
Import jme 0.9b source
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * TODO:
26  *      -  Decode register dump for ethtool.
27  */
28
29 #include <linux/version.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/crc32.h>
38 #include <linux/delay.h>
39 #include <linux/spinlock.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/tcp.h>
44 #include <linux/udp.h>
45 #include <linux/if_vlan.h>
46 #include "jme.h"
47
48 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
49 static struct net_device_stats *
50 jme_get_stats(struct net_device *netdev)
51 {
52         struct jme_adapter *jme = netdev_priv(netdev);
53         return &jme->stats;
54 }
55 #endif
56
57 static int
58 jme_mdio_read(struct net_device *netdev, int phy, int reg)
59 {
60         struct jme_adapter *jme = netdev_priv(netdev);
61         int i, val;
62
63         jwrite32(jme, JME_SMI, SMI_OP_REQ |
64                                 smi_phy_addr(phy) |
65                                 smi_reg_addr(reg));
66
67         wmb();
68         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
69                 udelay(20);
70                 val = jread32(jme, JME_SMI);
71                 if ((val & SMI_OP_REQ) == 0)
72                         break;
73         }
74
75         if (i == 0) {
76                 jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
77                 return 0;
78         }
79
80         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
81 }
82
83 static void
84 jme_mdio_write(struct net_device *netdev,
85                                 int phy, int reg, int val)
86 {
87         struct jme_adapter *jme = netdev_priv(netdev);
88         int i;
89
90         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
91                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
92                 smi_phy_addr(phy) | smi_reg_addr(reg));
93
94         wmb();
95         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
96                 udelay(20);
97                 val = jread32(jme, JME_SMI);
98                 if ((val & SMI_OP_REQ) == 0)
99                         break;
100         }
101
102         if (i == 0)
103                 jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
104
105         return;
106 }
107
108 __always_inline static void
109 jme_reset_phy_processor(struct jme_adapter *jme)
110 {
111         __u32 val;
112
113         jme_mdio_write(jme->dev,
114                         jme->mii_if.phy_id,
115                         MII_ADVERTISE, ADVERTISE_ALL |
116                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
117
118         jme_mdio_write(jme->dev,
119                         jme->mii_if.phy_id,
120                         MII_CTRL1000,
121                         ADVERTISE_1000FULL | ADVERTISE_1000HALF);
122
123         val = jme_mdio_read(jme->dev,
124                                 jme->mii_if.phy_id,
125                                 MII_BMCR);
126
127         jme_mdio_write(jme->dev,
128                         jme->mii_if.phy_id,
129                         MII_BMCR, val | BMCR_RESET);
130
131         return;
132 }
133
134 static void
135 jme_setup_wakeup_frame(struct jme_adapter *jme,
136                 __u32 *mask, __u32 crc, int fnr)
137 {
138         int i;
139
140         /*
141          * Setup CRC pattern
142          */
143         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
144         wmb();
145         jwrite32(jme, JME_WFODP, crc);
146         wmb();
147
148         /*
149          * Setup Mask
150          */
151         for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
152                 jwrite32(jme, JME_WFOI,
153                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
154                                 (fnr & WFOI_FRAME_SEL));
155                 wmb();
156                 jwrite32(jme, JME_WFODP, mask[i]);
157                 wmb();
158         }
159 }
160
161 __always_inline static void
162 jme_reset_mac_processor(struct jme_adapter *jme)
163 {
164         __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
165         __u32 crc = 0xCDCDCDCD;
166         __u32 gpreg0;
167         int i;
168
169         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
170         udelay(2);
171         jwrite32(jme, JME_GHC, jme->reg_ghc);
172         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
173         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
174         for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
175                 jme_setup_wakeup_frame(jme, mask, crc, i);
176         if(jme->fpgaver)
177                 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
178         else
179                 gpreg0 = GPREG0_DEFAULT;
180         jwrite32(jme, JME_GPREG0, gpreg0);
181         jwrite32(jme, JME_GPREG1, 0);
182 }
183
184 __always_inline static void
185 jme_clear_pm(struct jme_adapter *jme)
186 {
187         jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
188         pci_set_power_state(jme->pdev, PCI_D0);
189         pci_enable_wake(jme->pdev, PCI_D0, false);
190 }
191
192 static int
193 jme_reload_eeprom(struct jme_adapter *jme)
194 {
195         __u32 val;
196         int i;
197
198         val = jread32(jme, JME_SMBCSR);
199
200         if(val & SMBCSR_EEPROMD)
201         {
202                 val |= SMBCSR_CNACK;
203                 jwrite32(jme, JME_SMBCSR, val);
204                 val |= SMBCSR_RELOAD;
205                 jwrite32(jme, JME_SMBCSR, val);
206                 mdelay(12);
207
208                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
209                 {
210                         mdelay(1);
211                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
212                                 break;
213                 }
214
215                 if(i == 0) {
216                         jeprintk(jme->dev->name, "eeprom reload timeout\n");
217                         return -EIO;
218                 }
219         }
220         else
221                 return -EIO;
222
223         return 0;
224 }
225
226 static void
227 jme_load_macaddr(struct net_device *netdev)
228 {
229         struct jme_adapter *jme = netdev_priv(netdev);
230         unsigned char macaddr[6];
231         __u32 val;
232
233         spin_lock(&jme->macaddr_lock);
234         val = jread32(jme, JME_RXUMA_LO);
235         macaddr[0] = (val >>  0) & 0xFF;
236         macaddr[1] = (val >>  8) & 0xFF;
237         macaddr[2] = (val >> 16) & 0xFF;
238         macaddr[3] = (val >> 24) & 0xFF;
239         val = jread32(jme, JME_RXUMA_HI);
240         macaddr[4] = (val >>  0) & 0xFF;
241         macaddr[5] = (val >>  8) & 0xFF;
242         memcpy(netdev->dev_addr, macaddr, 6);
243         spin_unlock(&jme->macaddr_lock);
244 }
245
246 __always_inline static void
247 jme_set_rx_pcc(struct jme_adapter *jme, int p)
248 {
249         switch(p) {
250         case PCC_OFF:
251                 jwrite32(jme, JME_PCCRX0,
252                         ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
253                         ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
254                 break;
255         case PCC_P1:
256                 jwrite32(jme, JME_PCCRX0,
257                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
258                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
259                 break;
260         case PCC_P2:
261                 jwrite32(jme, JME_PCCRX0,
262                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
263                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
264                 break;
265         case PCC_P3:
266                 jwrite32(jme, JME_PCCRX0,
267                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
268                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
269                 break;
270         default:
271                 break;
272         }
273         wmb();
274
275         if(!(jme->flags & JME_FLAG_POLL))
276                 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
277 }
278
279 static void
280 jme_start_irq(struct jme_adapter *jme)
281 {
282         register struct dynpcc_info *dpi = &(jme->dpi);
283
284         jme_set_rx_pcc(jme, PCC_P1);
285         dpi->cur                = PCC_P1;
286         dpi->attempt            = PCC_P1;
287         dpi->cnt                = 0;
288
289         jwrite32(jme, JME_PCCTX,
290                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
291                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
292                         PCCTXQ0_EN
293                 );
294
295         /*
296          * Enable Interrupts
297          */
298         jwrite32(jme, JME_IENS, INTR_ENABLE);
299 }
300
301 __always_inline static void
302 jme_stop_irq(struct jme_adapter *jme)
303 {
304         /*
305          * Disable Interrupts
306          */
307         jwrite32(jme, JME_IENC, INTR_ENABLE);
308 }
309
310
311 __always_inline static void
312 jme_enable_shadow(struct jme_adapter *jme)
313 {
314         jwrite32(jme,
315                  JME_SHBA_LO,
316                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
317 }
318
319 __always_inline static void
320 jme_disable_shadow(struct jme_adapter *jme)
321 {
322         jwrite32(jme, JME_SHBA_LO, 0x0);
323 }
324
325 static __u32
326 jme_linkstat_from_phy(struct jme_adapter *jme)
327 {
328         __u32 phylink, bmsr;
329
330         phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
331         bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
332         if(bmsr & BMCR_ANCOMP)
333                 phylink |= PHY_LINK_AUTONEG_COMPLETE;
334
335         return phylink;
336 }
337
338 static int
339 jme_check_link(struct net_device *netdev, int testonly)
340 {
341         struct jme_adapter *jme = netdev_priv(netdev);
342         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
343         char linkmsg[64];
344         int rc = 0;
345
346         linkmsg[0] = '\0';
347
348         if(jme->fpgaver)
349                 phylink = jme_linkstat_from_phy(jme);
350         else
351                 phylink = jread32(jme, JME_PHY_LINK);
352
353         if (phylink & PHY_LINK_UP) {
354                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
355                         /*
356                          * If we did not enable AN
357                          * Speed/Duplex Info should be obtained from SMI
358                          */
359                         phylink = PHY_LINK_UP;
360
361                         bmcr = jme_mdio_read(jme->dev,
362                                                 jme->mii_if.phy_id,
363                                                 MII_BMCR);
364
365
366                         phylink |= ((bmcr & BMCR_SPEED1000) &&
367                                         (bmcr & BMCR_SPEED100) == 0) ?
368                                         PHY_LINK_SPEED_1000M :
369                                         (bmcr & BMCR_SPEED100) ?
370                                         PHY_LINK_SPEED_100M :
371                                         PHY_LINK_SPEED_10M;
372
373                         phylink |= (bmcr & BMCR_FULLDPLX) ?
374                                          PHY_LINK_DUPLEX : 0;
375
376                         strcat(linkmsg, "Forced: ");
377                 }
378                 else {
379                         /*
380                          * Keep polling for speed/duplex resolve complete
381                          */
382                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
383                                 --cnt) {
384
385                                 udelay(1);
386
387                                 if(jme->fpgaver)
388                                         phylink = jme_linkstat_from_phy(jme);
389                                 else
390                                         phylink = jread32(jme, JME_PHY_LINK);
391                         }
392
393                         if(!cnt)
394                                 jeprintk(netdev->name,
395                                         "Waiting speed resolve timeout.\n");
396
397                         strcat(linkmsg, "ANed: ");
398                 }
399
400                 if(jme->phylink == phylink) {
401                         rc = 1;
402                         goto out;
403                 }
404                 if(testonly)
405                         goto out;
406
407                 jme->phylink = phylink;
408
409                 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
410                                         GHC_SPEED_100M |
411                                         GHC_SPEED_1000M |
412                                         GHC_DPX);
413                 switch(phylink & PHY_LINK_SPEED_MASK) {
414                         case PHY_LINK_SPEED_10M:
415                                 ghc |= GHC_SPEED_10M;
416                                 strcat(linkmsg, "10 Mbps, ");
417                                 break;
418                         case PHY_LINK_SPEED_100M:
419                                 ghc |= GHC_SPEED_100M;
420                                 strcat(linkmsg, "100 Mbps, ");
421                                 break;
422                         case PHY_LINK_SPEED_1000M:
423                                 ghc |= GHC_SPEED_1000M;
424                                 strcat(linkmsg, "1000 Mbps, ");
425                                 break;
426                         default:
427                                 break;
428                 }
429                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
430
431                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
432                                         "Full-Duplex, " :
433                                         "Half-Duplex, ");
434
435                 if(phylink & PHY_LINK_MDI_STAT)
436                         strcat(linkmsg, "MDI-X");
437                 else
438                         strcat(linkmsg, "MDI");
439
440                 if(phylink & PHY_LINK_DUPLEX)
441                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
442                 else {
443                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
444                                                 TXMCS_BACKOFF |
445                                                 TXMCS_CARRIERSENSE |
446                                                 TXMCS_COLLISION);
447                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
448                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
449                                 TXTRHD_TXREN |
450                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
451                 }
452
453                 jme->reg_ghc = ghc;
454                 jwrite32(jme, JME_GHC, ghc);
455
456                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
457                 netif_carrier_on(netdev);
458         }
459         else {
460                 if(testonly)
461                         goto out;
462
463                 jprintk(netdev->name, "Link is down.\n");
464                 jme->phylink = 0;
465                 netif_carrier_off(netdev);
466         }
467
468 out:
469         return rc;
470 }
471
472 static int
473 jme_setup_tx_resources(struct jme_adapter *jme)
474 {
475         struct jme_ring *txring = &(jme->txring[0]);
476
477         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
478                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
479                                    &(txring->dmaalloc),
480                                    GFP_ATOMIC);
481
482         if(!txring->alloc) {
483                 txring->desc = NULL;
484                 txring->dmaalloc = 0;
485                 txring->dma = 0;
486                 return -ENOMEM;
487         }
488
489         /*
490          * 16 Bytes align
491          */
492         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
493                                                 RING_DESC_ALIGN);
494         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
495         txring->next_to_use     = 0;
496         atomic_set(&txring->next_to_clean, 0);
497         atomic_set(&txring->nr_free, jme->tx_ring_size);
498
499         /*
500          * Initialize Transmit Descriptors
501          */
502         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
503         memset(txring->bufinf, 0,
504                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
505
506         return 0;
507 }
508
509 static void
510 jme_free_tx_resources(struct jme_adapter *jme)
511 {
512         int i;
513         struct jme_ring *txring = &(jme->txring[0]);
514         struct jme_buffer_info *txbi = txring->bufinf;
515
516         if(txring->alloc) {
517                 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
518                         txbi = txring->bufinf + i;
519                         if(txbi->skb) {
520                                 dev_kfree_skb(txbi->skb);
521                                 txbi->skb = NULL;
522                         }
523                         txbi->mapping   = 0;
524                         txbi->len       = 0;
525                         txbi->nr_desc   = 0;
526                 }
527
528                 dma_free_coherent(&(jme->pdev->dev),
529                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
530                                   txring->alloc,
531                                   txring->dmaalloc);
532
533                 txring->alloc           = NULL;
534                 txring->desc            = NULL;
535                 txring->dmaalloc        = 0;
536                 txring->dma             = 0;
537         }
538         txring->next_to_use     = 0;
539         atomic_set(&txring->next_to_clean, 0);
540         atomic_set(&txring->nr_free, 0);
541
542 }
543
544 __always_inline static void
545 jme_enable_tx_engine(struct jme_adapter *jme)
546 {
547         /*
548          * Select Queue 0
549          */
550         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
551
552         /*
553          * Setup TX Queue 0 DMA Bass Address
554          */
555         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
556         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
557         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
558
559         /*
560          * Setup TX Descptor Count
561          */
562         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
563
564         /*
565          * Enable TX Engine
566          */
567         wmb();
568         jwrite32(jme, JME_TXCS, jme->reg_txcs |
569                                 TXCS_SELECT_QUEUE0 |
570                                 TXCS_ENABLE);
571
572 }
573
574 __always_inline static void
575 jme_restart_tx_engine(struct jme_adapter *jme)
576 {
577         /*
578          * Restart TX Engine
579          */
580         jwrite32(jme, JME_TXCS, jme->reg_txcs |
581                                 TXCS_SELECT_QUEUE0 |
582                                 TXCS_ENABLE);
583 }
584
585 __always_inline static void
586 jme_disable_tx_engine(struct jme_adapter *jme)
587 {
588         int i;
589         __u32 val;
590
591         /*
592          * Disable TX Engine
593          */
594         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
595
596         val = jread32(jme, JME_TXCS);
597         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
598         {
599                 mdelay(1);
600                 val = jread32(jme, JME_TXCS);
601         }
602
603         if(!i) {
604                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
605                 jme_reset_mac_processor(jme);
606         }
607
608
609 }
610
611 static void
612 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
613 {
614         struct jme_ring *rxring = jme->rxring;
615         register volatile struct rxdesc* rxdesc = rxring->desc;
616         struct jme_buffer_info *rxbi = rxring->bufinf;
617         rxdesc += i;
618         rxbi += i;
619
620         rxdesc->dw[0] = 0;
621         rxdesc->dw[1] = 0;
622         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
623         rxdesc->desc1.bufaddrl  = cpu_to_le32(
624                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
625         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
626         if(jme->dev->features & NETIF_F_HIGHDMA)
627                 rxdesc->desc1.flags = RXFLAG_64BIT;
628         wmb();
629         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
630 }
631
632 static int
633 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
634 {
635         struct jme_ring *rxring = &(jme->rxring[0]);
636         struct jme_buffer_info *rxbi = rxring->bufinf + i;
637         unsigned long offset;
638         struct sk_buff* skb;
639
640         skb = netdev_alloc_skb(jme->dev,
641                 jme->dev->mtu + RX_EXTRA_LEN);
642         if(unlikely(!skb))
643                 return -ENOMEM;
644
645         if(unlikely(offset =
646                         (unsigned long)(skb->data)
647                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
648                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
649
650         rxbi->skb = skb;
651         rxbi->len = skb_tailroom(skb);
652         rxbi->mapping = pci_map_page(jme->pdev,
653                                         virt_to_page(skb->data),
654                                         offset_in_page(skb->data),
655                                         rxbi->len,
656                                         PCI_DMA_FROMDEVICE);
657
658         return 0;
659 }
660
661 static void
662 jme_free_rx_buf(struct jme_adapter *jme, int i)
663 {
664         struct jme_ring *rxring = &(jme->rxring[0]);
665         struct jme_buffer_info *rxbi = rxring->bufinf;
666         rxbi += i;
667
668         if(rxbi->skb) {
669                 pci_unmap_page(jme->pdev,
670                                  rxbi->mapping,
671                                  rxbi->len,
672                                  PCI_DMA_FROMDEVICE);
673                 dev_kfree_skb(rxbi->skb);
674                 rxbi->skb = NULL;
675                 rxbi->mapping = 0;
676                 rxbi->len = 0;
677         }
678 }
679
680 static void
681 jme_free_rx_resources(struct jme_adapter *jme)
682 {
683         int i;
684         struct jme_ring *rxring = &(jme->rxring[0]);
685
686         if(rxring->alloc) {
687                 for(i = 0 ; i < jme->rx_ring_size ; ++i)
688                         jme_free_rx_buf(jme, i);
689
690                 dma_free_coherent(&(jme->pdev->dev),
691                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
692                                   rxring->alloc,
693                                   rxring->dmaalloc);
694                 rxring->alloc    = NULL;
695                 rxring->desc     = NULL;
696                 rxring->dmaalloc = 0;
697                 rxring->dma      = 0;
698         }
699         rxring->next_to_use   = 0;
700         atomic_set(&rxring->next_to_clean, 0);
701 }
702
703 static int
704 jme_setup_rx_resources(struct jme_adapter *jme)
705 {
706         int i;
707         struct jme_ring *rxring = &(jme->rxring[0]);
708
709         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
710                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
711                                    &(rxring->dmaalloc),
712                                    GFP_ATOMIC);
713         if(!rxring->alloc) {
714                 rxring->desc = NULL;
715                 rxring->dmaalloc = 0;
716                 rxring->dma = 0;
717                 return -ENOMEM;
718         }
719
720         /*
721          * 16 Bytes align
722          */
723         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
724                                                 RING_DESC_ALIGN);
725         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
726         rxring->next_to_use     = 0;
727         atomic_set(&rxring->next_to_clean, 0);
728
729         /*
730          * Initiallize Receive Descriptors
731          */
732         for(i = 0 ; i < jme->rx_ring_size ; ++i) {
733                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
734                         jme_free_rx_resources(jme);
735                         return -ENOMEM;
736                 }
737
738                 jme_set_clean_rxdesc(jme, i);
739         }
740
741         return 0;
742 }
743
744 __always_inline static void
745 jme_enable_rx_engine(struct jme_adapter *jme)
746 {
747         /*
748          * Setup RX DMA Bass Address
749          */
750         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
751         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
752         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
753
754         /*
755          * Setup RX Descriptor Count
756          */
757         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
758
759         /*
760          * Setup Unicast Filter
761          */
762         jme_set_multi(jme->dev);
763
764         /*
765          * Enable RX Engine
766          */
767         wmb();
768         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
769                                 RXCS_QUEUESEL_Q0 |
770                                 RXCS_ENABLE |
771                                 RXCS_QST);
772 }
773
774 __always_inline static void
775 jme_restart_rx_engine(struct jme_adapter *jme)
776 {
777         /*
778          * Start RX Engine
779          */
780         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
781                                 RXCS_QUEUESEL_Q0 |
782                                 RXCS_ENABLE |
783                                 RXCS_QST);
784 }
785
786
787 __always_inline static void
788 jme_disable_rx_engine(struct jme_adapter *jme)
789 {
790         int i;
791         __u32 val;
792
793         /*
794          * Disable RX Engine
795          */
796         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
797
798         val = jread32(jme, JME_RXCS);
799         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
800         {
801                 mdelay(1);
802                 val = jread32(jme, JME_RXCS);
803         }
804
805         if(!i)
806                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
807
808 }
809
810 static int
811 jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
812 {
813         if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
814                 return false;
815
816         if(unlikely((flags & RXWBFLAG_TCPON) &&
817         !(flags & RXWBFLAG_TCPCS))) {
818                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
819                 goto out_sumerr;
820         }
821
822         if(unlikely((flags & RXWBFLAG_UDPON) &&
823         !(flags & RXWBFLAG_UDPCS))) {
824                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
825                 goto out_sumerr;
826         }
827
828         if(unlikely((flags & RXWBFLAG_IPV4) &&
829         !(flags & RXWBFLAG_IPCS))) {
830                 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
831                 goto out_sumerr;
832         }
833
834         return true;
835
836 out_sumerr:
837         csum_dbg(jme->dev->name, "%s%s%s%s\n",
838                 (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
839                 (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
840                 (flags & RXWBFLAG_UDPON)?"UDP ":"",
841                 (flags & RXWBFLAG_TCPON)?"TCP":"");
842         return false;
843 }
844
845 static void
846 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
847 {
848         struct jme_ring *rxring = &(jme->rxring[0]);
849         volatile struct rxdesc *rxdesc = rxring->desc;
850         struct jme_buffer_info *rxbi = rxring->bufinf;
851         struct sk_buff *skb;
852         int framesize;
853
854         rxdesc += idx;
855         rxbi += idx;
856
857         skb = rxbi->skb;
858         pci_dma_sync_single_for_cpu(jme->pdev,
859                                         rxbi->mapping,
860                                         rxbi->len,
861                                         PCI_DMA_FROMDEVICE);
862
863         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
864                 pci_dma_sync_single_for_device(jme->pdev,
865                                                 rxbi->mapping,
866                                                 rxbi->len,
867                                                 PCI_DMA_FROMDEVICE);
868
869                 ++(NET_STAT(jme).rx_dropped);
870         }
871         else {
872                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
873                                 - RX_PREPAD_SIZE;
874
875                 skb_reserve(skb, RX_PREPAD_SIZE);
876                 skb_put(skb, framesize);
877                 skb->protocol = eth_type_trans(skb, jme->dev);
878
879                 if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
880                         skb->ip_summed = CHECKSUM_UNNECESSARY;
881                 else
882                         skb->ip_summed = CHECKSUM_NONE;
883
884
885                 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
886                         vlan_dbg(jme->dev->name, "VLAN: %04x\n",
887                                         rxdesc->descwb.vlan);
888                         if(jme->vlgrp) {
889                                 vlan_dbg(jme->dev->name,
890                                         "VLAN Passed to kernel.\n");
891                                 jme->jme_vlan_rx(skb, jme->vlgrp,
892                                         le32_to_cpu(rxdesc->descwb.vlan));
893                                 NET_STAT(jme).rx_bytes += 4;
894                         }
895                 }
896                 else {
897                         jme->jme_rx(skb);
898                 }
899
900                 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
901                                 RXWBFLAG_DEST_MUL)
902                         ++(NET_STAT(jme).multicast);
903
904                 jme->dev->last_rx = jiffies;
905                 NET_STAT(jme).rx_bytes += framesize;
906                 ++(NET_STAT(jme).rx_packets);
907         }
908
909         jme_set_clean_rxdesc(jme, idx);
910
911 }
912
913
914
915 static int
916 jme_process_receive(struct jme_adapter *jme, int limit)
917 {
918         struct jme_ring *rxring = &(jme->rxring[0]);
919         volatile struct rxdesc *rxdesc = rxring->desc;
920         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
921
922         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
923                 goto out_inc;
924
925         if(unlikely(atomic_read(&jme->link_changing) != 1))
926                 goto out_inc;
927
928         if(unlikely(!netif_carrier_ok(jme->dev)))
929                 goto out_inc;
930
931         i = atomic_read(&rxring->next_to_clean);
932         while( limit-- > 0 )
933         {
934                 rxdesc = rxring->desc;
935                 rxdesc += i;
936
937                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
938                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
939                         goto out;
940
941                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
942
943                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
944
945                 if(unlikely(desccnt > 1 ||
946                 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
947
948                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
949                                 ++(NET_STAT(jme).rx_crc_errors);
950                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
951                                 ++(NET_STAT(jme).rx_fifo_errors);
952                         else
953                                 ++(NET_STAT(jme).rx_errors);
954
955                         if(desccnt > 1) {
956                                 rx_dbg(jme->dev->name,
957                                         "RX: More than one(%d) descriptor, "
958                                         "framelen=%d\n",
959                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
960                                 limit -= desccnt - 1;
961                         }
962
963                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
964                                 jme_set_clean_rxdesc(jme, j);
965                                 j = (j + 1) & (mask);
966                         }
967
968                 }
969                 else {
970                         jme_alloc_and_feed_skb(jme, i);
971                 }
972
973                 i = (i + desccnt) & (mask);
974         }
975
976
977 out:
978         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
979         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
980                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
981                         >> 4);
982
983         atomic_set(&rxring->next_to_clean, i);
984
985 out_inc:
986         atomic_inc(&jme->rx_cleaning);
987
988         return limit > 0 ? limit : 0;
989
990 }
991
992 static void
993 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
994 {
995         if(likely(atmp == dpi->cur)) {
996                 dpi->cnt = 0;
997                 return;
998         }
999
1000         if(dpi->attempt == atmp) {
1001                 ++(dpi->cnt);
1002         }
1003         else {
1004                 dpi->attempt = atmp;
1005                 dpi->cnt = 0;
1006         }
1007
1008 }
1009
1010 static void
1011 jme_dynamic_pcc(struct jme_adapter *jme)
1012 {
1013         register struct dynpcc_info *dpi = &(jme->dpi);
1014
1015         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1016                 jme_attempt_pcc(dpi, PCC_P3);
1017         else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1018         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1019                 jme_attempt_pcc(dpi, PCC_P2);
1020         else
1021                 jme_attempt_pcc(dpi, PCC_P1);
1022
1023         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1024                 jme_set_rx_pcc(jme, dpi->attempt);
1025                 dpi->cur = dpi->attempt;
1026                 dpi->cnt = 0;
1027         }
1028 }
1029
1030 static void
1031 jme_start_pcc_timer(struct jme_adapter *jme)
1032 {
1033         struct dynpcc_info *dpi = &(jme->dpi);
1034         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1035         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1036         dpi->intr_cnt           = 0;
1037         jwrite32(jme, JME_TMCSR,
1038                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1039 }
1040
1041 __always_inline static void
1042 jme_stop_pcc_timer(struct jme_adapter *jme)
1043 {
1044         jwrite32(jme, JME_TMCSR, 0);
1045 }
1046
1047 static void
1048 jme_pcc_tasklet(unsigned long arg)
1049 {
1050         struct jme_adapter *jme = (struct jme_adapter*)arg;
1051         struct net_device *netdev = jme->dev;
1052
1053
1054         if(unlikely(!netif_carrier_ok(netdev) ||
1055                 (atomic_read(&jme->link_changing) != 1)
1056         )) {
1057                 jme_stop_pcc_timer(jme);
1058                 return;
1059         }
1060
1061         if(!(jme->flags & JME_FLAG_POLL))
1062                 jme_dynamic_pcc(jme);
1063
1064         jme_start_pcc_timer(jme);
1065 }
1066
1067 __always_inline static void
1068 jme_polling_mode(struct jme_adapter *jme)
1069 {
1070         jme_set_rx_pcc(jme, PCC_OFF);
1071 }
1072
1073 __always_inline static void
1074 jme_interrupt_mode(struct jme_adapter *jme)
1075 {
1076         jme_set_rx_pcc(jme, PCC_P1);
1077 }
1078
1079 static void
1080 jme_link_change_tasklet(unsigned long arg)
1081 {
1082         struct jme_adapter *jme = (struct jme_adapter*)arg;
1083         struct net_device *netdev = jme->dev;
1084         int timeout = WAIT_TASKLET_TIMEOUT;
1085         int rc;
1086
1087         if(!atomic_dec_and_test(&jme->link_changing))
1088                 goto out;
1089
1090         if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1091                 goto out;
1092
1093         jme->old_mtu = netdev->mtu;
1094         netif_stop_queue(netdev);
1095
1096         while(--timeout > 0 &&
1097                 (
1098                 atomic_read(&jme->rx_cleaning) != 1 ||
1099                 atomic_read(&jme->tx_cleaning) != 1
1100                 )) {
1101
1102                 mdelay(1);
1103         }
1104
1105         if(netif_carrier_ok(netdev)) {
1106                 jme_stop_pcc_timer(jme);
1107                 jme_reset_mac_processor(jme);
1108                 jme_free_rx_resources(jme);
1109                 jme_free_tx_resources(jme);
1110
1111                 if(jme->flags & JME_FLAG_POLL)
1112                         jme_polling_mode(jme);
1113         }
1114
1115         jme_check_link(netdev, 0);
1116         if(netif_carrier_ok(netdev)) {
1117                 rc = jme_setup_rx_resources(jme);
1118                 if(rc) {
1119                         jeprintk(netdev->name,
1120                                 "Allocating resources for RX error"
1121                                 ", Device STOPPED!\n");
1122                         goto out;
1123                 }
1124
1125
1126                 rc = jme_setup_tx_resources(jme);
1127                 if(rc) {
1128                         jeprintk(netdev->name,
1129                                 "Allocating resources for TX error"
1130                                 ", Device STOPPED!\n");
1131                         goto err_out_free_rx_resources;
1132                 }
1133
1134                 jme_enable_rx_engine(jme);
1135                 jme_enable_tx_engine(jme);
1136
1137                 netif_start_queue(netdev);
1138
1139                 if(jme->flags & JME_FLAG_POLL)
1140                         jme_interrupt_mode(jme);
1141
1142                 jme_start_pcc_timer(jme);
1143         }
1144
1145         goto out;
1146
1147 err_out_free_rx_resources:
1148         jme_free_rx_resources(jme);
1149 out:
1150         atomic_inc(&jme->link_changing);
1151 }
1152
1153 static void
1154 jme_rx_clean_tasklet(unsigned long arg)
1155 {
1156         struct jme_adapter *jme = (struct jme_adapter*)arg;
1157         struct dynpcc_info *dpi = &(jme->dpi);
1158
1159         jme_process_receive(jme, jme->rx_ring_size);
1160         ++(dpi->intr_cnt);
1161
1162 }
1163
1164 static int
1165 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1166 {
1167         struct jme_adapter *jme = jme_napi_priv(holder);
1168         struct net_device *netdev = jme->dev;
1169         int rest;
1170
1171         rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1172
1173         while(atomic_read(&jme->rx_empty) > 0) {
1174                 atomic_dec(&jme->rx_empty);
1175                 ++(NET_STAT(jme).rx_dropped);
1176                 jme_restart_rx_engine(jme);
1177         }
1178         atomic_inc(&jme->rx_empty);
1179
1180         if(rest) {
1181                 JME_RX_COMPLETE(netdev, holder);
1182                 jme_interrupt_mode(jme);
1183         }
1184
1185         JME_NAPI_WEIGHT_SET(budget, rest);
1186         return JME_NAPI_WEIGHT_VAL(budget) - rest;
1187 }
1188
1189 static void
1190 jme_rx_empty_tasklet(unsigned long arg)
1191 {
1192         struct jme_adapter *jme = (struct jme_adapter*)arg;
1193
1194         if(unlikely(atomic_read(&jme->link_changing) != 1))
1195                 return;
1196
1197         if(unlikely(!netif_carrier_ok(jme->dev)))
1198                 return;
1199
1200         queue_dbg(jme->dev->name, "RX Queue Full!\n");
1201
1202         jme_rx_clean_tasklet(arg);
1203
1204         while(atomic_read(&jme->rx_empty) > 0) {
1205                 atomic_dec(&jme->rx_empty);
1206                 ++(NET_STAT(jme).rx_dropped);
1207                 jme_restart_rx_engine(jme);
1208         }
1209         atomic_inc(&jme->rx_empty);
1210 }
1211
1212 static void
1213 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1214 {
1215         struct jme_ring *txring = jme->txring;
1216
1217         smp_wmb();
1218         if(unlikely(netif_queue_stopped(jme->dev) &&
1219         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1220
1221                 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1222                 netif_wake_queue(jme->dev);
1223
1224         }
1225
1226 }
1227
1228 static void
1229 jme_tx_clean_tasklet(unsigned long arg)
1230 {
1231         struct jme_adapter *jme = (struct jme_adapter*)arg;
1232         struct jme_ring *txring = &(jme->txring[0]);
1233         volatile struct txdesc *txdesc = txring->desc;
1234         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1235         int i, j, cnt = 0, max, err, mask;
1236
1237         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1238                 goto out;
1239
1240         if(unlikely(atomic_read(&jme->link_changing) != 1))
1241                 goto out;
1242
1243         if(unlikely(!netif_carrier_ok(jme->dev)))
1244                 goto out;
1245
1246         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1247         mask = jme->tx_ring_mask;
1248
1249         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1250
1251         for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1252
1253                 ctxbi = txbi + i;
1254
1255                 if(likely(ctxbi->skb &&
1256                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1257
1258                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1259
1260                         tx_dbg(jme->dev->name,
1261                                 "Tx Tasklet: Clean %d+%d\n",
1262                                 i, ctxbi->nr_desc);
1263
1264                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1265                                 ttxbi = txbi + ((i + j) & (mask));
1266                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1267
1268                                 pci_unmap_page(jme->pdev,
1269                                                  ttxbi->mapping,
1270                                                  ttxbi->len,
1271                                                  PCI_DMA_TODEVICE);
1272
1273                                 ttxbi->mapping = 0;
1274                                 ttxbi->len = 0;
1275                         }
1276
1277                         dev_kfree_skb(ctxbi->skb);
1278
1279                         cnt += ctxbi->nr_desc;
1280
1281                         if(unlikely(err))
1282                                 ++(NET_STAT(jme).tx_carrier_errors);
1283                         else {
1284                                 ++(NET_STAT(jme).tx_packets);
1285                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1286                         }
1287
1288                         ctxbi->skb = NULL;
1289                         ctxbi->len = 0;
1290                         ctxbi->start_xmit = 0;
1291                 }
1292                 else {
1293                         if(!ctxbi->skb)
1294                                 tx_dbg(jme->dev->name,
1295                                         "Tx Tasklet:"
1296                                         " Stopped due to no skb.\n");
1297                         else
1298                                 tx_dbg(jme->dev->name,
1299                                         "Tx Tasklet:"
1300                                         "Stopped due to not done.\n");
1301                         break;
1302                 }
1303
1304                 i = (i + ctxbi->nr_desc) & mask;
1305
1306                 ctxbi->nr_desc = 0;
1307         }
1308
1309         tx_dbg(jme->dev->name,
1310                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1311                 i, jiffies);
1312
1313         atomic_set(&txring->next_to_clean, i);
1314         atomic_add(cnt, &txring->nr_free);
1315
1316         jme_wake_queue_if_stopped(jme);
1317
1318 out:
1319         atomic_inc(&jme->tx_cleaning);
1320 }
1321
1322 static void
1323 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1324 {
1325         /*
1326          * Disable interrupt
1327          */
1328         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1329
1330         /*
1331          * Write 1 clear interrupt status
1332          */
1333         jwrite32f(jme, JME_IEVE, intrstat);
1334
1335         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1336                 tasklet_schedule(&jme->linkch_task);
1337                 goto out_reenable;
1338         }
1339
1340         if(intrstat & INTR_TMINTR)
1341                 tasklet_schedule(&jme->pcc_task);
1342
1343         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1344                 tasklet_schedule(&jme->txclean_task);
1345
1346         if(jme->flags & JME_FLAG_POLL) {
1347                 if(intrstat & INTR_RX0EMP)
1348                         atomic_inc(&jme->rx_empty);
1349
1350                 if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1351                         if(likely(JME_RX_SCHEDULE_PREP(jme))) {
1352                                 jme_polling_mode(jme);
1353                                 JME_RX_SCHEDULE(jme);
1354                         }
1355                 }
1356         }
1357         else {
1358                 if(intrstat & INTR_RX0EMP) {
1359                         atomic_inc(&jme->rx_empty);
1360                         tasklet_schedule(&jme->rxempty_task);
1361                 }
1362
1363                 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1364                         tasklet_schedule(&jme->rxclean_task);
1365         }
1366
1367 out_reenable:
1368         /*
1369          * Re-enable interrupt
1370          */
1371         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1372
1373
1374 }
1375
1376 static irqreturn_t
1377 jme_intr(int irq, void *dev_id)
1378 {
1379         struct net_device *netdev = dev_id;
1380         struct jme_adapter *jme = netdev_priv(netdev);
1381         __u32 intrstat;
1382
1383         intrstat = jread32(jme, JME_IEVE);
1384
1385         /*
1386          * Check if it's really an interrupt for us
1387          */
1388         if(unlikely(intrstat == 0))
1389                 return IRQ_NONE;
1390
1391         /*
1392          * Check if the device still exist
1393          */
1394         if(unlikely(intrstat == ~((typeof(intrstat))0)))
1395                 return IRQ_NONE;
1396
1397         jme_intr_msi(jme, intrstat);
1398
1399         return IRQ_HANDLED;
1400 }
1401
1402 static irqreturn_t
1403 jme_msi(int irq, void *dev_id)
1404 {
1405         struct net_device *netdev = dev_id;
1406         struct jme_adapter *jme = netdev_priv(netdev);
1407         __u32 intrstat;
1408
1409         pci_dma_sync_single_for_cpu(jme->pdev,
1410                                     jme->shadow_dma,
1411                                     sizeof(__u32) * SHADOW_REG_NR,
1412                                     PCI_DMA_FROMDEVICE);
1413         intrstat = jme->shadow_regs[SHADOW_IEVE];
1414         jme->shadow_regs[SHADOW_IEVE] = 0;
1415
1416         jme_intr_msi(jme, intrstat);
1417
1418         return IRQ_HANDLED;
1419 }
1420
1421
1422 static void
1423 jme_reset_link(struct jme_adapter *jme)
1424 {
1425         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1426 }
1427
1428 static void
1429 jme_restart_an(struct jme_adapter *jme)
1430 {
1431         __u32 bmcr;
1432         unsigned long flags;
1433
1434         spin_lock_irqsave(&jme->phy_lock, flags);
1435         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1436         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1437         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1438         spin_unlock_irqrestore(&jme->phy_lock, flags);
1439 }
1440
1441 static int
1442 jme_request_irq(struct jme_adapter *jme)
1443 {
1444         int rc;
1445         struct net_device *netdev = jme->dev;
1446         irq_handler_t handler = jme_intr;
1447         int irq_flags = IRQF_SHARED;
1448
1449         if (!pci_enable_msi(jme->pdev)) {
1450                 jme->flags |= JME_FLAG_MSI;
1451                 handler = jme_msi;
1452                 irq_flags = 0;
1453         }
1454
1455         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1456                           netdev);
1457         if(rc) {
1458                 jeprintk(netdev->name,
1459                         "Unable to request %s interrupt (return: %d)\n",
1460                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1461
1462                 if(jme->flags & JME_FLAG_MSI) {
1463                         pci_disable_msi(jme->pdev);
1464                         jme->flags &= ~JME_FLAG_MSI;
1465                 }
1466         }
1467         else {
1468                 netdev->irq = jme->pdev->irq;
1469         }
1470
1471         return rc;
1472 }
1473
1474 static void
1475 jme_free_irq(struct jme_adapter *jme)
1476 {
1477         free_irq(jme->pdev->irq, jme->dev);
1478         if (jme->flags & JME_FLAG_MSI) {
1479                 pci_disable_msi(jme->pdev);
1480                 jme->flags &= ~JME_FLAG_MSI;
1481                 jme->dev->irq = jme->pdev->irq;
1482         }
1483 }
1484
1485 static int
1486 jme_open(struct net_device *netdev)
1487 {
1488         struct jme_adapter *jme = netdev_priv(netdev);
1489         int rc, timeout = 10;
1490
1491         while(
1492                 --timeout > 0 &&
1493                 (
1494                 atomic_read(&jme->link_changing) != 1 ||
1495                 atomic_read(&jme->rx_cleaning) != 1 ||
1496                 atomic_read(&jme->tx_cleaning) != 1
1497                 )
1498         )
1499                 msleep(1);
1500
1501         if(!timeout) {
1502                 rc = -EBUSY;
1503                 goto err_out;
1504         }
1505
1506         jme_clear_pm(jme);
1507         jme_reset_mac_processor(jme);
1508         JME_NAPI_ENABLE(jme);
1509
1510         rc = jme_request_irq(jme);
1511         if(rc)
1512                 goto err_out;
1513
1514         jme_enable_shadow(jme);
1515         jme_start_irq(jme);
1516
1517         if(jme->flags & JME_FLAG_SSET)
1518                 jme_set_settings(netdev, &jme->old_ecmd);
1519         else
1520                 jme_reset_phy_processor(jme);
1521
1522         jme_reset_link(jme);
1523
1524         return 0;
1525
1526 err_out:
1527         netif_stop_queue(netdev);
1528         netif_carrier_off(netdev);
1529         return rc;
1530 }
1531
1532 static void
1533 jme_set_100m_half(struct jme_adapter *jme)
1534 {
1535         __u32 bmcr, tmp;
1536
1537         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1538         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1539                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1540         tmp |= BMCR_SPEED100;
1541
1542         if (bmcr != tmp)
1543                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1544
1545         if(jme->fpgaver)
1546                 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1547         else
1548                 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1549 }
1550
1551 static void
1552 jme_phy_off(struct jme_adapter *jme)
1553 {
1554         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1555 }
1556
1557
1558 static int
1559 jme_close(struct net_device *netdev)
1560 {
1561         struct jme_adapter *jme = netdev_priv(netdev);
1562
1563         netif_stop_queue(netdev);
1564         netif_carrier_off(netdev);
1565
1566         jme_stop_irq(jme);
1567         jme_disable_shadow(jme);
1568         jme_free_irq(jme);
1569
1570         JME_NAPI_DISABLE(jme);
1571
1572         tasklet_kill(&jme->linkch_task);
1573         tasklet_kill(&jme->txclean_task);
1574         tasklet_kill(&jme->rxclean_task);
1575         tasklet_kill(&jme->rxempty_task);
1576
1577         jme_reset_mac_processor(jme);
1578         jme_free_rx_resources(jme);
1579         jme_free_tx_resources(jme);
1580         jme->phylink = 0;
1581         jme_phy_off(jme);
1582
1583         return 0;
1584 }
1585
1586 static int
1587 jme_alloc_txdesc(struct jme_adapter *jme,
1588                         struct sk_buff *skb)
1589 {
1590         struct jme_ring *txring = jme->txring;
1591         int idx, nr_alloc, mask = jme->tx_ring_mask;
1592
1593         idx = txring->next_to_use;
1594         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1595
1596         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1597                 return -1;
1598
1599         atomic_sub(nr_alloc, &txring->nr_free);
1600
1601         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1602
1603         return idx;
1604 }
1605
1606 static void
1607 jme_fill_tx_map(struct pci_dev *pdev,
1608                 volatile struct txdesc *txdesc,
1609                 struct jme_buffer_info *txbi,
1610                 struct page *page,
1611                 __u32 page_offset,
1612                 __u32 len,
1613                 __u8 hidma)
1614 {
1615         dma_addr_t dmaaddr;
1616
1617         dmaaddr = pci_map_page(pdev,
1618                                 page,
1619                                 page_offset,
1620                                 len,
1621                                 PCI_DMA_TODEVICE);
1622
1623         pci_dma_sync_single_for_device(pdev,
1624                                        dmaaddr,
1625                                        len,
1626                                        PCI_DMA_TODEVICE);
1627
1628         txdesc->dw[0] = 0;
1629         txdesc->dw[1] = 0;
1630         txdesc->desc2.flags     = TXFLAG_OWN;
1631         txdesc->desc2.flags     |= (hidma)?TXFLAG_64BIT:0;
1632         txdesc->desc2.datalen   = cpu_to_le16(len);
1633         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1634         txdesc->desc2.bufaddrl  = cpu_to_le32(
1635                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
1636
1637         txbi->mapping = dmaaddr;
1638         txbi->len = len;
1639 }
1640
1641 static void
1642 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1643 {
1644         struct jme_ring *txring = jme->txring;
1645         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1646         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1647         __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1648         int i, nr_frags = skb_shinfo(skb)->nr_frags;
1649         int mask = jme->tx_ring_mask;
1650         struct skb_frag_struct *frag;
1651         __u32 len;
1652
1653         for(i = 0 ; i < nr_frags ; ++i) {
1654                 frag = &skb_shinfo(skb)->frags[i];
1655                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1656                 ctxbi = txbi + ((idx + i + 2) & (mask));
1657
1658                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1659                                  frag->page_offset, frag->size, hidma);
1660         }
1661
1662         len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1663         ctxdesc = txdesc + ((idx + 1) & (mask));
1664         ctxbi = txbi + ((idx + 1) & (mask));
1665         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1666                         offset_in_page(skb->data), len, hidma);
1667
1668 }
1669
1670 static int
1671 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1672 {
1673         if(unlikely(skb_shinfo(skb)->gso_size &&
1674                         skb_header_cloned(skb) &&
1675                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1676                 dev_kfree_skb(skb);
1677                 return -1;
1678         }
1679
1680         return 0;
1681 }
1682
1683 static int
1684 jme_tx_tso(struct sk_buff *skb,
1685                 volatile __u16 *mss, __u8 *flags)
1686 {
1687         if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1688                 *flags |= TXFLAG_LSEN;
1689
1690                 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1691                         struct iphdr *iph = ip_hdr(skb);
1692
1693                         iph->check = 0;
1694                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1695                                                                 iph->daddr, 0,
1696                                                                 IPPROTO_TCP,
1697                                                                 0);
1698                 }
1699                 else {
1700                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
1701
1702                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1703                                                                 &ip6h->daddr, 0,
1704                                                                 IPPROTO_TCP,
1705                                                                 0);
1706                 }
1707
1708                 return 0;
1709         }
1710
1711         return 1;
1712 }
1713
1714 static void
1715 jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1716 {
1717         if(skb->ip_summed == CHECKSUM_PARTIAL) {
1718                 __u8 ip_proto;
1719
1720                 switch (skb->protocol) {
1721                 case __constant_htons(ETH_P_IP):
1722                         ip_proto = ip_hdr(skb)->protocol;
1723                         break;
1724                 case __constant_htons(ETH_P_IPV6):
1725                         ip_proto = ipv6_hdr(skb)->nexthdr;
1726                         break;
1727                 default:
1728                         ip_proto = 0;
1729                         break;
1730                 }
1731
1732                 switch(ip_proto) {
1733                 case IPPROTO_TCP:
1734                         *flags |= TXFLAG_TCPCS;
1735                         break;
1736                 case IPPROTO_UDP:
1737                         *flags |= TXFLAG_UDPCS;
1738                         break;
1739                 default:
1740                         jeprintk("jme", "Error upper layer protocol.\n");
1741                         break;
1742                 }
1743         }
1744 }
1745
1746 __always_inline static void
1747 jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
1748 {
1749         if(vlan_tx_tag_present(skb)) {
1750                 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
1751                 *flags |= TXFLAG_TAGON;
1752                 *vlan = vlan_tx_tag_get(skb);
1753         }
1754 }
1755
1756 static int
1757 jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1758 {
1759         struct jme_ring *txring = jme->txring;
1760         volatile struct txdesc *txdesc;
1761         struct jme_buffer_info *txbi;
1762         __u8 flags;
1763
1764         txdesc = (volatile struct txdesc*)txring->desc + idx;
1765         txbi = txring->bufinf + idx;
1766
1767         txdesc->dw[0] = 0;
1768         txdesc->dw[1] = 0;
1769         txdesc->dw[2] = 0;
1770         txdesc->dw[3] = 0;
1771         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1772         /*
1773          * Set OWN bit at final.
1774          * When kernel transmit faster than NIC.
1775          * And NIC trying to send this descriptor before we tell
1776          * it to start sending this TX queue.
1777          * Other fields are already filled correctly.
1778          */
1779         wmb();
1780         flags = TXFLAG_OWN | TXFLAG_INT;
1781         //Set checksum flags while not tso
1782         if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1783                 jme_tx_csum(skb, &flags);
1784         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1785         txdesc->desc1.flags = flags;
1786         /*
1787          * Set tx buffer info after telling NIC to send
1788          * For better tx_clean timing
1789          */
1790         wmb();
1791         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1792         txbi->skb = skb;
1793         txbi->len = skb->len;
1794         if(!(txbi->start_xmit = jiffies))
1795                 txbi->start_xmit = 1;
1796
1797         return 0;
1798 }
1799
1800 static void
1801 jme_stop_queue_if_full(struct jme_adapter *jme)
1802 {
1803         struct jme_ring *txring = jme->txring;
1804         struct jme_buffer_info *txbi = txring->bufinf;
1805
1806         txbi += atomic_read(&txring->next_to_clean);
1807
1808         smp_wmb();
1809         if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1810                 netif_stop_queue(jme->dev);
1811                 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
1812                 smp_wmb();
1813                 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
1814                         netif_wake_queue(jme->dev);
1815                         queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
1816                 }
1817         }
1818
1819         if(unlikely(    txbi->start_xmit &&
1820                         (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1821                         txbi->skb)) {
1822                 netif_stop_queue(jme->dev);
1823         }
1824 }
1825
1826 /*
1827  * This function is already protected by netif_tx_lock()
1828  */
1829 static int
1830 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1831 {
1832         struct jme_adapter *jme = netdev_priv(netdev);
1833         int idx;
1834
1835         if(skb_shinfo(skb)->nr_frags) {
1836                 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
1837                         skb_shinfo(skb)->nr_frags,
1838                         skb_headlen(skb),
1839                         skb->len,
1840                         skb_shinfo(skb)->gso_size,
1841                         skb->ip_summed);
1842         }
1843
1844         if(unlikely(jme_expand_header(jme, skb))) {
1845                 ++(NET_STAT(jme).tx_dropped);
1846                 return NETDEV_TX_OK;
1847         }
1848
1849         idx = jme_alloc_txdesc(jme, skb);
1850
1851         if(unlikely(idx<0)) {
1852                 netif_stop_queue(netdev);
1853                 jeprintk(netdev->name,
1854                                 "BUG! Tx ring full when queue awake!\n");
1855
1856                 return NETDEV_TX_BUSY;
1857         }
1858
1859         jme_map_tx_skb(jme, skb, idx);
1860         jme_fill_first_tx_desc(jme, skb, idx);
1861
1862         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
1863
1864         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1865                                 TXCS_SELECT_QUEUE0 |
1866                                 TXCS_QUEUE0S |
1867                                 TXCS_ENABLE);
1868         netdev->trans_start = jiffies;
1869
1870         jme_stop_queue_if_full(jme);
1871
1872         return NETDEV_TX_OK;
1873 }
1874
1875 static int
1876 jme_set_macaddr(struct net_device *netdev, void *p)
1877 {
1878         struct jme_adapter *jme = netdev_priv(netdev);
1879         struct sockaddr *addr = p;
1880         __u32 val;
1881
1882         if(netif_running(netdev))
1883                 return -EBUSY;
1884
1885         spin_lock(&jme->macaddr_lock);
1886         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1887
1888         val = addr->sa_data[3] << 24 |
1889               addr->sa_data[2] << 16 |
1890               addr->sa_data[1] <<  8 |
1891               addr->sa_data[0];
1892         jwrite32(jme, JME_RXUMA_LO, val);
1893         val = addr->sa_data[5] << 8 |
1894               addr->sa_data[4];
1895         jwrite32(jme, JME_RXUMA_HI, val);
1896         spin_unlock(&jme->macaddr_lock);
1897
1898         return 0;
1899 }
1900
1901 static void
1902 jme_set_multi(struct net_device *netdev)
1903 {
1904         struct jme_adapter *jme = netdev_priv(netdev);
1905         u32 mc_hash[2] = {};
1906         int i;
1907         unsigned long flags;
1908
1909         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1910
1911         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1912
1913         if (netdev->flags & IFF_PROMISC) {
1914                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1915         }
1916         else if (netdev->flags & IFF_ALLMULTI) {
1917                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1918         }
1919         else if(netdev->flags & IFF_MULTICAST) {
1920                 struct dev_mc_list *mclist;
1921                 int bit_nr;
1922
1923                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1924                 for (i = 0, mclist = netdev->mc_list;
1925                         mclist && i < netdev->mc_count;
1926                         ++i, mclist = mclist->next) {
1927
1928                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1929                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1930                 }
1931
1932                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1933                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1934         }
1935
1936         wmb();
1937         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1938
1939         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1940 }
1941
1942 static int
1943 jme_change_mtu(struct net_device *netdev, int new_mtu)
1944 {
1945         struct jme_adapter *jme = netdev_priv(netdev);
1946
1947         if(new_mtu == jme->old_mtu)
1948                 return 0;
1949
1950         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1951                 ((new_mtu) < IPV6_MIN_MTU))
1952                 return -EINVAL;
1953
1954         if(new_mtu > 4000) {
1955                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1956                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1957                 jme_restart_rx_engine(jme);
1958         }
1959         else {
1960                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1961                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1962                 jme_restart_rx_engine(jme);
1963         }
1964
1965         if(new_mtu > 1900) {
1966                 netdev->features &= ~(NETIF_F_HW_CSUM |
1967                                 NETIF_F_TSO |
1968                                 NETIF_F_TSO6);
1969         }
1970         else {
1971                 if(jme->flags & JME_FLAG_TXCSUM)
1972                         netdev->features |= NETIF_F_HW_CSUM;
1973                 if(jme->flags & JME_FLAG_TSO)
1974                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1975         }
1976
1977         netdev->mtu = new_mtu;
1978         jme_reset_link(jme);
1979
1980         return 0;
1981 }
1982
1983 static void
1984 jme_tx_timeout(struct net_device *netdev)
1985 {
1986         struct jme_adapter *jme = netdev_priv(netdev);
1987
1988         jme->phylink = 0;
1989         jme_reset_phy_processor(jme);
1990         if(jme->flags & JME_FLAG_SSET)
1991                 jme_set_settings(netdev, &jme->old_ecmd);
1992
1993         /*
1994          * Force to Reset the link again
1995          */
1996         jme_reset_link(jme);
1997 }
1998
1999 static void
2000 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2001 {
2002         struct jme_adapter *jme = netdev_priv(netdev);
2003
2004         jme->vlgrp = grp;
2005 }
2006
2007 static void
2008 jme_get_drvinfo(struct net_device *netdev,
2009                      struct ethtool_drvinfo *info)
2010 {
2011         struct jme_adapter *jme = netdev_priv(netdev);
2012
2013         strcpy(info->driver, DRV_NAME);
2014         strcpy(info->version, DRV_VERSION);
2015         strcpy(info->bus_info, pci_name(jme->pdev));
2016 }
2017
2018 static int
2019 jme_get_regs_len(struct net_device *netdev)
2020 {
2021         return 0x400;
2022 }
2023
2024 static void
2025 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
2026 {
2027         int i;
2028
2029         for(i = 0 ; i < len ; i += 4)
2030                 p[i >> 2] = jread32(jme, reg + i);
2031
2032 }
2033
2034 static void
2035 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2036 {
2037         struct jme_adapter *jme = netdev_priv(netdev);
2038         __u32 *p32 = (__u32*)p;
2039
2040         memset(p, 0, 0x400);
2041
2042         regs->version = 1;
2043         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2044
2045         p32 += 0x100 >> 2;
2046         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2047
2048         p32 += 0x100 >> 2;
2049         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2050
2051         p32 += 0x100 >> 2;
2052         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2053
2054 }
2055
2056 static int
2057 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2058 {
2059         struct jme_adapter *jme = netdev_priv(netdev);
2060
2061         ecmd->tx_coalesce_usecs = PCC_TX_TO;
2062         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2063
2064         if(jme->flags & JME_FLAG_POLL) {
2065                 ecmd->use_adaptive_rx_coalesce = false;
2066                 ecmd->rx_coalesce_usecs = 0;
2067                 ecmd->rx_max_coalesced_frames = 0;
2068                 return 0;
2069         }
2070
2071         ecmd->use_adaptive_rx_coalesce = true;
2072
2073         switch(jme->dpi.cur) {
2074         case PCC_P1:
2075                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2076                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2077                 break;
2078         case PCC_P2:
2079                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2080                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2081                 break;
2082         case PCC_P3:
2083                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2084                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2085                 break;
2086         default:
2087                 break;
2088         }
2089
2090         return 0;
2091 }
2092
2093 static int
2094 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2095 {
2096         struct jme_adapter *jme = netdev_priv(netdev);
2097         struct dynpcc_info *dpi = &(jme->dpi);
2098
2099         if(netif_running(netdev))
2100                 return -EBUSY;
2101
2102         if(ecmd->use_adaptive_rx_coalesce
2103         && (jme->flags & JME_FLAG_POLL)) {
2104                 jme->flags &= ~JME_FLAG_POLL;
2105                 jme->jme_rx = netif_rx;
2106                 jme->jme_vlan_rx = vlan_hwaccel_rx;
2107                 dpi->cur                = PCC_P1;
2108                 dpi->attempt            = PCC_P1;
2109                 dpi->cnt                = 0;
2110                 jme_set_rx_pcc(jme, PCC_P1);
2111                 jme_interrupt_mode(jme);
2112         }
2113         else if(!(ecmd->use_adaptive_rx_coalesce)
2114         && !(jme->flags & JME_FLAG_POLL)) {
2115                 jme->flags |= JME_FLAG_POLL;
2116                 jme->jme_rx = netif_receive_skb;
2117                 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2118                 jme_interrupt_mode(jme);
2119         }
2120
2121         return 0;
2122 }
2123
2124 static void
2125 jme_get_pauseparam(struct net_device *netdev,
2126                         struct ethtool_pauseparam *ecmd)
2127 {
2128         struct jme_adapter *jme = netdev_priv(netdev);
2129         unsigned long flags;
2130         __u32 val;
2131
2132         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2133         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2134
2135         spin_lock_irqsave(&jme->phy_lock, flags);
2136         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2137         spin_unlock_irqrestore(&jme->phy_lock, flags);
2138
2139         ecmd->autoneg =
2140                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2141 }
2142
2143 static int
2144 jme_set_pauseparam(struct net_device *netdev,
2145                         struct ethtool_pauseparam *ecmd)
2146 {
2147         struct jme_adapter *jme = netdev_priv(netdev);
2148         unsigned long flags;
2149         __u32 val;
2150
2151         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
2152                 (ecmd->tx_pause != 0)) {
2153
2154                 if(ecmd->tx_pause)
2155                         jme->reg_txpfc |= TXPFC_PF_EN;
2156                 else
2157                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2158
2159                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2160         }
2161
2162         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2163         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2164                 (ecmd->rx_pause != 0)) {
2165
2166                 if(ecmd->rx_pause)
2167                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2168                 else
2169                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2170
2171                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2172         }
2173         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2174
2175         spin_lock_irqsave(&jme->phy_lock, flags);
2176         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2177         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
2178                 (ecmd->autoneg != 0)) {
2179
2180                 if(ecmd->autoneg)
2181                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2182                 else
2183                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2184
2185                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2186                                 MII_ADVERTISE, val);
2187         }
2188         spin_unlock_irqrestore(&jme->phy_lock, flags);
2189
2190         return 0;
2191 }
2192
2193 static void
2194 jme_get_wol(struct net_device *netdev,
2195                 struct ethtool_wolinfo *wol)
2196 {
2197         struct jme_adapter *jme = netdev_priv(netdev);
2198
2199         wol->supported = WAKE_MAGIC | WAKE_PHY;
2200
2201         wol->wolopts = 0;
2202
2203         if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2204                 wol->wolopts |= WAKE_PHY;
2205
2206         if(jme->reg_pmcs & PMCS_MFEN)
2207                 wol->wolopts |= WAKE_MAGIC;
2208
2209 }
2210
2211 static int
2212 jme_set_wol(struct net_device *netdev,
2213                 struct ethtool_wolinfo *wol)
2214 {
2215         struct jme_adapter *jme = netdev_priv(netdev);
2216
2217         if(wol->wolopts & (WAKE_MAGICSECURE |
2218                                 WAKE_UCAST |
2219                                 WAKE_MCAST |
2220                                 WAKE_BCAST |
2221                                 WAKE_ARP))
2222                 return -EOPNOTSUPP;
2223
2224         jme->reg_pmcs = 0;
2225
2226         if(wol->wolopts & WAKE_PHY)
2227                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2228
2229         if(wol->wolopts & WAKE_MAGIC)
2230                 jme->reg_pmcs |= PMCS_MFEN;
2231
2232
2233         return 0;
2234 }
2235
2236 static int
2237 jme_get_settings(struct net_device *netdev,
2238                      struct ethtool_cmd *ecmd)
2239 {
2240         struct jme_adapter *jme = netdev_priv(netdev);
2241         int rc;
2242         unsigned long flags;
2243
2244         spin_lock_irqsave(&jme->phy_lock, flags);
2245         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2246         spin_unlock_irqrestore(&jme->phy_lock, flags);
2247         return rc;
2248 }
2249
2250 static int
2251 jme_set_settings(struct net_device *netdev,
2252                      struct ethtool_cmd *ecmd)
2253 {
2254         struct jme_adapter *jme = netdev_priv(netdev);
2255         int rc, fdc=0;
2256         unsigned long flags;
2257
2258         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2259                 return -EINVAL;
2260
2261         if(jme->mii_if.force_media &&
2262         ecmd->autoneg != AUTONEG_ENABLE &&
2263         (jme->mii_if.full_duplex != ecmd->duplex))
2264                 fdc = 1;
2265
2266         spin_lock_irqsave(&jme->phy_lock, flags);
2267         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2268         spin_unlock_irqrestore(&jme->phy_lock, flags);
2269
2270         if(!rc && fdc)
2271                 jme_reset_link(jme);
2272
2273         if(!rc) {
2274                 jme->flags |= JME_FLAG_SSET;
2275                 jme->old_ecmd = *ecmd;
2276         }
2277
2278         return rc;
2279 }
2280
2281 static __u32
2282 jme_get_link(struct net_device *netdev)
2283 {
2284         struct jme_adapter *jme = netdev_priv(netdev);
2285         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2286 }
2287
2288 static u32
2289 jme_get_rx_csum(struct net_device *netdev)
2290 {
2291         struct jme_adapter *jme = netdev_priv(netdev);
2292
2293         return jme->reg_rxmcs & RXMCS_CHECKSUM;
2294 }
2295
2296 static int
2297 jme_set_rx_csum(struct net_device *netdev, u32 on)
2298 {
2299         struct jme_adapter *jme = netdev_priv(netdev);
2300         unsigned long flags;
2301
2302         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2303         if(on)
2304                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2305         else
2306                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2307         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2308         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2309
2310         return 0;
2311 }
2312
2313 static int
2314 jme_set_tx_csum(struct net_device *netdev, u32 on)
2315 {
2316         struct jme_adapter *jme = netdev_priv(netdev);
2317
2318         if(on) {
2319                 jme->flags |= JME_FLAG_TXCSUM;
2320                 if(netdev->mtu <= 1900)
2321                         netdev->features |= NETIF_F_HW_CSUM;
2322         }
2323         else {
2324                 jme->flags &= ~JME_FLAG_TXCSUM;
2325                 netdev->features &= ~NETIF_F_HW_CSUM;
2326         }
2327
2328         return 0;
2329 }
2330
2331 static int
2332 jme_set_tso(struct net_device *netdev, u32 on)
2333 {
2334         struct jme_adapter *jme = netdev_priv(netdev);
2335
2336         if (on) {
2337                 jme->flags |= JME_FLAG_TSO;
2338                 if(netdev->mtu <= 1900)
2339                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2340         }
2341         else {
2342                 jme->flags &= ~JME_FLAG_TSO;
2343                 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2344         }
2345
2346         return 0;
2347 }
2348
2349 static int
2350 jme_nway_reset(struct net_device *netdev)
2351 {
2352         struct jme_adapter *jme = netdev_priv(netdev);
2353         jme_restart_an(jme);
2354         return 0;
2355 }
2356
2357 static const struct ethtool_ops jme_ethtool_ops = {
2358         .get_drvinfo            = jme_get_drvinfo,
2359         .get_regs_len           = jme_get_regs_len,
2360         .get_regs               = jme_get_regs,
2361         .get_coalesce           = jme_get_coalesce,
2362         .set_coalesce           = jme_set_coalesce,
2363         .get_pauseparam         = jme_get_pauseparam,
2364         .set_pauseparam         = jme_set_pauseparam,
2365         .get_wol                = jme_get_wol,
2366         .set_wol                = jme_set_wol,
2367         .get_settings           = jme_get_settings,
2368         .set_settings           = jme_set_settings,
2369         .get_link               = jme_get_link,
2370         .get_rx_csum            = jme_get_rx_csum,
2371         .set_rx_csum            = jme_set_rx_csum,
2372         .set_tx_csum            = jme_set_tx_csum,
2373         .set_tso                = jme_set_tso,
2374         .set_sg                 = ethtool_op_set_sg,
2375         .nway_reset             = jme_nway_reset,
2376 };
2377
2378 static int
2379 jme_pci_dma64(struct pci_dev *pdev)
2380 {
2381         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2382                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2383                         dprintk("jme", "64Bit DMA Selected.\n");
2384                         return 1;
2385                 }
2386
2387         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2388                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2389                         dprintk("jme", "40Bit DMA Selected.\n");
2390                         return 1;
2391                 }
2392
2393         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2394                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2395                         dprintk("jme", "32Bit DMA Selected.\n");
2396                         return 0;
2397                 }
2398
2399         return -1;
2400 }
2401
2402 __always_inline static void
2403 jme_phy_init(struct jme_adapter *jme)
2404 {
2405         __u16 reg26;
2406
2407         reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2408         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2409 }
2410
2411 __always_inline static void
2412 jme_set_gmii(struct jme_adapter *jme)
2413 {
2414         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
2415 }
2416
2417 static void
2418 jme_check_hw_ver(struct jme_adapter *jme)
2419 {
2420         __u32 chipmode;
2421
2422         chipmode = jread32(jme, JME_CHIPMODE);
2423
2424         jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2425         jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
2426 }
2427
2428 static int __devinit
2429 jme_init_one(struct pci_dev *pdev,
2430              const struct pci_device_id *ent)
2431 {
2432         int rc = 0, using_dac, i;
2433         struct net_device *netdev;
2434         struct jme_adapter *jme;
2435         __u16 bmcr, bmsr;
2436
2437         /*
2438          * set up PCI device basics
2439          */
2440         rc = pci_enable_device(pdev);
2441         if(rc) {
2442                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2443                 goto err_out;
2444         }
2445
2446         using_dac = jme_pci_dma64(pdev);
2447         if(using_dac < 0) {
2448                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2449                 rc = -EIO;
2450                 goto err_out_disable_pdev;
2451         }
2452
2453         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2454                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2455                 rc = -ENOMEM;
2456                 goto err_out_disable_pdev;
2457         }
2458
2459         rc = pci_request_regions(pdev, DRV_NAME);
2460         if(rc) {
2461                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2462                 goto err_out_disable_pdev;
2463         }
2464
2465         pci_set_master(pdev);
2466
2467         /*
2468          * alloc and init net device
2469          */
2470         netdev = alloc_etherdev(sizeof(*jme));
2471         if(!netdev) {
2472                 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
2473                 rc = -ENOMEM;
2474                 goto err_out_release_regions;
2475         }
2476         netdev->open                    = jme_open;
2477         netdev->stop                    = jme_close;
2478         netdev->hard_start_xmit         = jme_start_xmit;
2479         netdev->set_mac_address         = jme_set_macaddr;
2480         netdev->set_multicast_list      = jme_set_multi;
2481         netdev->change_mtu              = jme_change_mtu;
2482         netdev->ethtool_ops             = &jme_ethtool_ops;
2483         netdev->tx_timeout              = jme_tx_timeout;
2484         netdev->watchdog_timeo          = TX_TIMEOUT;
2485         netdev->vlan_rx_register        = jme_vlan_rx_register;
2486         NETDEV_GET_STATS(netdev, &jme_get_stats);
2487         netdev->features                =       NETIF_F_HW_CSUM |
2488                                                 NETIF_F_SG |
2489                                                 NETIF_F_TSO |
2490                                                 NETIF_F_TSO6 |
2491                                                 NETIF_F_HW_VLAN_TX |
2492                                                 NETIF_F_HW_VLAN_RX;
2493         if(using_dac)
2494                 netdev->features        |=      NETIF_F_HIGHDMA;
2495
2496         SET_NETDEV_DEV(netdev, &pdev->dev);
2497         pci_set_drvdata(pdev, netdev);
2498
2499         /*
2500          * init adapter info
2501          */
2502         jme = netdev_priv(netdev);
2503         jme->pdev = pdev;
2504         jme->dev = netdev;
2505         jme->jme_rx = netif_rx;
2506         jme->jme_vlan_rx = vlan_hwaccel_rx;
2507         jme->old_mtu = netdev->mtu = 1500;
2508         jme->phylink = 0;
2509         jme->tx_ring_size = 1 << 10;
2510         jme->tx_ring_mask = jme->tx_ring_size - 1;
2511         jme->tx_wake_threshold = 1 << 9;
2512         jme->rx_ring_size = 1 << 9;
2513         jme->rx_ring_mask = jme->rx_ring_size - 1;
2514         jme->regs = ioremap(pci_resource_start(pdev, 0),
2515                              pci_resource_len(pdev, 0));
2516         if (!(jme->regs)) {
2517                 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
2518                 rc = -ENOMEM;
2519                 goto err_out_free_netdev;
2520         }
2521         jme->shadow_regs = pci_alloc_consistent(pdev,
2522                                                 sizeof(__u32) * SHADOW_REG_NR,
2523                                                 &(jme->shadow_dma));
2524         if (!(jme->shadow_regs)) {
2525                 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
2526                 rc = -ENOMEM;
2527                 goto err_out_unmap;
2528         }
2529
2530         NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2531
2532         spin_lock_init(&jme->phy_lock);
2533         spin_lock_init(&jme->macaddr_lock);
2534         spin_lock_init(&jme->rxmcs_lock);
2535
2536         atomic_set(&jme->link_changing, 1);
2537         atomic_set(&jme->rx_cleaning, 1);
2538         atomic_set(&jme->tx_cleaning, 1);
2539         atomic_set(&jme->rx_empty, 1);
2540
2541         tasklet_init(&jme->pcc_task,
2542                      &jme_pcc_tasklet,
2543                      (unsigned long) jme);
2544         tasklet_init(&jme->linkch_task,
2545                      &jme_link_change_tasklet,
2546                      (unsigned long) jme);
2547         tasklet_init(&jme->txclean_task,
2548                      &jme_tx_clean_tasklet,
2549                      (unsigned long) jme);
2550         tasklet_init(&jme->rxclean_task,
2551                      &jme_rx_clean_tasklet,
2552                      (unsigned long) jme);
2553         tasklet_init(&jme->rxempty_task,
2554                      &jme_rx_empty_tasklet,
2555                      (unsigned long) jme);
2556         jme->dpi.cur = PCC_P1;
2557
2558         jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2559         jme->reg_rxcs = RXCS_DEFAULT;
2560         jme->reg_rxmcs = RXMCS_DEFAULT;
2561         jme->reg_txpfc = 0;
2562         jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
2563         jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
2564
2565         /*
2566          * Get Max Read Req Size from PCI Config Space
2567          */
2568         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2569         switch(jme->mrrs) {
2570                 case MRRS_128B:
2571                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2572                         break;
2573                 case MRRS_256B:
2574                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2575                         break;
2576                 default:
2577                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2578                         break;
2579         };
2580
2581
2582         /*
2583          * Must check before reset_mac_processor
2584          */
2585         jme_check_hw_ver(jme);
2586         jme->mii_if.dev = netdev;
2587         if(jme->fpgaver) {
2588                 jme->mii_if.phy_id = 0;
2589                 for(i = 1 ; i < 32 ; ++i) {
2590                         bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2591                         bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2592                         if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2593                                 jme->mii_if.phy_id = i;
2594                                 break;
2595                         }
2596                 }
2597
2598                 if(!jme->mii_if.phy_id) {
2599                         rc = -EIO;
2600                         printk(KERN_ERR PFX "Can not find phy_id.\n");
2601                          goto err_out_free_shadow;
2602                 }
2603
2604                 jme->reg_ghc |= GHC_LINK_POLL;
2605         }
2606         else {
2607                 jme->mii_if.phy_id = 1;
2608         }
2609         jme->mii_if.supports_gmii = 1;
2610         jme->mii_if.mdio_read = jme_mdio_read;
2611         jme->mii_if.mdio_write = jme_mdio_write;
2612
2613         jme_clear_pm(jme);
2614         if(jme->fpgaver)
2615                 jme_set_gmii(jme);
2616         else
2617                 jme_phy_init(jme);
2618         jme_phy_off(jme);
2619
2620         /*
2621          * Reset MAC processor and reload EEPROM for MAC Address
2622          */
2623         jme_reset_mac_processor(jme);
2624         rc = jme_reload_eeprom(jme);
2625         if(rc) {
2626                 printk(KERN_ERR PFX
2627                         "Reload eeprom for reading MAC Address error.\n");
2628                 goto err_out_free_shadow;
2629         }
2630         jme_load_macaddr(netdev);
2631
2632
2633         /*
2634          * Tell stack that we are not ready to work until open()
2635          */
2636         netif_carrier_off(netdev);
2637         netif_stop_queue(netdev);
2638
2639         /*
2640          * Register netdev
2641          */
2642         rc = register_netdev(netdev);
2643         if(rc) {
2644                 printk(KERN_ERR PFX "Cannot register net device.\n");
2645                 goto err_out_free_shadow;
2646         }
2647
2648         jprintk(netdev->name,
2649                 "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2650                 (jme->fpgaver != 0)?" (FPGA)":"",
2651                 (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
2652                 netdev->dev_addr[0],
2653                 netdev->dev_addr[1],
2654                 netdev->dev_addr[2],
2655                 netdev->dev_addr[3],
2656                 netdev->dev_addr[4],
2657                 netdev->dev_addr[5]);
2658
2659         return 0;
2660
2661 err_out_free_shadow:
2662         pci_free_consistent(pdev,
2663                             sizeof(__u32) * SHADOW_REG_NR,
2664                             jme->shadow_regs,
2665                             jme->shadow_dma);
2666 err_out_unmap:
2667         iounmap(jme->regs);
2668 err_out_free_netdev:
2669         pci_set_drvdata(pdev, NULL);
2670         free_netdev(netdev);
2671 err_out_release_regions:
2672         pci_release_regions(pdev);
2673 err_out_disable_pdev:
2674         pci_disable_device(pdev);
2675 err_out:
2676         return rc;
2677 }
2678
2679 static void __devexit
2680 jme_remove_one(struct pci_dev *pdev)
2681 {
2682         struct net_device *netdev = pci_get_drvdata(pdev);
2683         struct jme_adapter *jme = netdev_priv(netdev);
2684
2685         unregister_netdev(netdev);
2686         pci_free_consistent(pdev,
2687                             sizeof(__u32) * SHADOW_REG_NR,
2688                             jme->shadow_regs,
2689                             jme->shadow_dma);
2690         iounmap(jme->regs);
2691         pci_set_drvdata(pdev, NULL);
2692         free_netdev(netdev);
2693         pci_release_regions(pdev);
2694         pci_disable_device(pdev);
2695
2696 }
2697
2698 static int
2699 jme_suspend(struct pci_dev *pdev, pm_message_t state)
2700 {
2701         struct net_device *netdev = pci_get_drvdata(pdev);
2702         struct jme_adapter *jme = netdev_priv(netdev);
2703         int timeout = 100;
2704
2705         atomic_dec(&jme->link_changing);
2706
2707         netif_device_detach(netdev);
2708         netif_stop_queue(netdev);
2709         jme_stop_irq(jme);
2710         jme_free_irq(jme);
2711
2712         while(--timeout > 0 &&
2713         (
2714                 atomic_read(&jme->rx_cleaning) != 1 ||
2715                 atomic_read(&jme->tx_cleaning) != 1
2716         )) {
2717                 mdelay(1);
2718         }
2719         if(!timeout) {
2720                 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2721                 return -EBUSY;
2722         }
2723         jme_disable_shadow(jme);
2724
2725         if(netif_carrier_ok(netdev)) {
2726                 jme_stop_pcc_timer(jme);
2727                 jme_reset_mac_processor(jme);
2728                 jme_free_rx_resources(jme);
2729                 jme_free_tx_resources(jme);
2730                 netif_carrier_off(netdev);
2731                 jme->phylink = 0;
2732
2733                 if(jme->flags & JME_FLAG_POLL)
2734                         jme_polling_mode(jme);
2735         }
2736
2737
2738         pci_save_state(pdev);
2739         if(jme->reg_pmcs) {
2740                 jme_set_100m_half(jme);
2741                 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2742                 pci_enable_wake(pdev, PCI_D3hot, true);
2743                 pci_enable_wake(pdev, PCI_D3cold, true);
2744         }
2745         else {
2746                 jme_phy_off(jme);
2747                 pci_enable_wake(pdev, PCI_D3hot, false);
2748                 pci_enable_wake(pdev, PCI_D3cold, false);
2749         }
2750         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2751
2752         return 0;
2753 }
2754
2755 static int
2756 jme_resume(struct pci_dev *pdev)
2757 {
2758         struct net_device *netdev = pci_get_drvdata(pdev);
2759         struct jme_adapter *jme = netdev_priv(netdev);
2760
2761         jme_clear_pm(jme);
2762         pci_restore_state(pdev);
2763
2764         if(jme->flags & JME_FLAG_SSET)
2765                 jme_set_settings(netdev, &jme->old_ecmd);
2766         else
2767                 jme_reset_phy_processor(jme);
2768
2769         jme_reset_mac_processor(jme);
2770         jme_enable_shadow(jme);
2771         jme_request_irq(jme);
2772         jme_start_irq(jme);
2773         netif_device_attach(netdev);
2774
2775         atomic_inc(&jme->link_changing);
2776
2777         jme_reset_link(jme);
2778
2779         return 0;
2780 }
2781
2782 static struct pci_device_id jme_pci_tbl[] = {
2783         { PCI_VDEVICE(JMICRON, 0x250) },
2784         { }
2785 };
2786
2787 static struct pci_driver jme_driver = {
2788         .name           = DRV_NAME,
2789         .id_table       = jme_pci_tbl,
2790         .probe          = jme_init_one,
2791         .remove         = __devexit_p(jme_remove_one),
2792 #ifdef CONFIG_PM
2793         .suspend        = jme_suspend,
2794         .resume         = jme_resume,
2795 #endif /* CONFIG_PM */
2796 };
2797
2798 static int __init
2799 jme_init_module(void)
2800 {
2801         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2802                "driver version %s\n", DRV_VERSION);
2803         return pci_register_driver(&jme_driver);
2804 }
2805
2806 static void __exit
2807 jme_cleanup_module(void)
2808 {
2809         pci_unregister_driver(&jme_driver);
2810 }
2811
2812 module_init(jme_init_module);
2813 module_exit(jme_cleanup_module);
2814
2815 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2816 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2817 MODULE_LICENSE("GPL");
2818 MODULE_VERSION(DRV_VERSION);
2819 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2820