]> bbs.cooldavid.org Git - jme.git/blob - jme.c
Import jme 0.9d-msix source
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * TODO:
26  *      -  Decode register dump for ethtool.
27  */
28
29 #include <linux/version.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 #include <linux/irq.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/crc32.h>
39 #include <linux/delay.h>
40 #include <linux/spinlock.h>
41 #include <linux/net.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/if_vlan.h>
48 #include "jme.h"
49
50 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
51 static struct net_device_stats *
52 jme_get_stats(struct net_device *netdev)
53 {
54         struct jme_adapter *jme = netdev_priv(netdev);
55         return &jme->stats;
56 }
57 #endif
58
59 static int
60 jme_mdio_read(struct net_device *netdev, int phy, int reg)
61 {
62         struct jme_adapter *jme = netdev_priv(netdev);
63         int i, val, again = (reg == MII_BMSR)?1:0;
64
65 read_again:
66         jwrite32(jme, JME_SMI, SMI_OP_REQ |
67                                 smi_phy_addr(phy) |
68                                 smi_reg_addr(reg));
69
70         wmb();
71         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
72                 udelay(20);
73                 val = jread32(jme, JME_SMI);
74                 if ((val & SMI_OP_REQ) == 0)
75                         break;
76         }
77
78         if (i == 0) {
79                 jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
80                 return 0;
81         }
82
83         if(again--)
84                 goto read_again;
85
86         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
87 }
88
89 static void
90 jme_mdio_write(struct net_device *netdev,
91                                 int phy, int reg, int val)
92 {
93         struct jme_adapter *jme = netdev_priv(netdev);
94         int i;
95
96         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
97                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
98                 smi_phy_addr(phy) | smi_reg_addr(reg));
99
100         wmb();
101         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
102                 udelay(20);
103                 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
104                         break;
105         }
106
107         if (i == 0)
108                 jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
109
110         return;
111 }
112
113 __always_inline static void
114 jme_reset_phy_processor(struct jme_adapter *jme)
115 {
116         __u32 val;
117
118         jme_mdio_write(jme->dev,
119                         jme->mii_if.phy_id,
120                         MII_ADVERTISE, ADVERTISE_ALL |
121                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
122
123         if(jme->pdev->device == JME_GE_DEVICE)
124                 jme_mdio_write(jme->dev,
125                                 jme->mii_if.phy_id,
126                                 MII_CTRL1000,
127                                 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
128
129         val = jme_mdio_read(jme->dev,
130                                 jme->mii_if.phy_id,
131                                 MII_BMCR);
132
133         jme_mdio_write(jme->dev,
134                         jme->mii_if.phy_id,
135                         MII_BMCR, val | BMCR_RESET);
136
137         return;
138 }
139
140 static void
141 jme_setup_wakeup_frame(struct jme_adapter *jme,
142                 __u32 *mask, __u32 crc, int fnr)
143 {
144         int i;
145
146         /*
147          * Setup CRC pattern
148          */
149         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
150         wmb();
151         jwrite32(jme, JME_WFODP, crc);
152         wmb();
153
154         /*
155          * Setup Mask
156          */
157         for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
158                 jwrite32(jme, JME_WFOI,
159                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
160                                 (fnr & WFOI_FRAME_SEL));
161                 wmb();
162                 jwrite32(jme, JME_WFODP, mask[i]);
163                 wmb();
164         }
165 }
166
167 __always_inline static void
168 jme_reset_mac_processor(struct jme_adapter *jme)
169 {
170         __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
171         __u32 crc = 0xCDCDCDCD;
172         __u32 gpreg0;
173         int i;
174
175         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
176         udelay(2);
177         jwrite32(jme, JME_GHC, jme->reg_ghc);
178         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
179         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
180         for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
181                 jme_setup_wakeup_frame(jme, mask, crc, i);
182         if(jme->fpgaver)
183                 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
184         else
185                 gpreg0 = GPREG0_DEFAULT;
186         jwrite32(jme, JME_GPREG0, gpreg0);
187         jwrite32(jme, JME_GPREG1, 0);
188 }
189
190 __always_inline static void
191 jme_clear_pm(struct jme_adapter *jme)
192 {
193         jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
194         pci_set_power_state(jme->pdev, PCI_D0);
195         pci_enable_wake(jme->pdev, PCI_D0, false);
196 }
197
198 static int
199 jme_reload_eeprom(struct jme_adapter *jme)
200 {
201         __u32 val;
202         int i;
203
204         val = jread32(jme, JME_SMBCSR);
205
206         if(val & SMBCSR_EEPROMD)
207         {
208                 val |= SMBCSR_CNACK;
209                 jwrite32(jme, JME_SMBCSR, val);
210                 val |= SMBCSR_RELOAD;
211                 jwrite32(jme, JME_SMBCSR, val);
212                 mdelay(12);
213
214                 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i)
215                 {
216                         mdelay(1);
217                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
218                                 break;
219                 }
220
221                 if(i == 0) {
222                         jeprintk("jme", "eeprom reload timeout\n");
223                         return -EIO;
224                 }
225         }
226
227         return 0;
228 }
229
230 static void
231 jme_load_macaddr(struct net_device *netdev)
232 {
233         struct jme_adapter *jme = netdev_priv(netdev);
234         unsigned char macaddr[6];
235         __u32 val;
236
237         spin_lock(&jme->macaddr_lock);
238         val = jread32(jme, JME_RXUMA_LO);
239         macaddr[0] = (val >>  0) & 0xFF;
240         macaddr[1] = (val >>  8) & 0xFF;
241         macaddr[2] = (val >> 16) & 0xFF;
242         macaddr[3] = (val >> 24) & 0xFF;
243         val = jread32(jme, JME_RXUMA_HI);
244         macaddr[4] = (val >>  0) & 0xFF;
245         macaddr[5] = (val >>  8) & 0xFF;
246         memcpy(netdev->dev_addr, macaddr, 6);
247         spin_unlock(&jme->macaddr_lock);
248 }
249
250 __always_inline static void
251 jme_set_rx_pcc(struct jme_adapter *jme, int p)
252 {
253         switch(p) {
254         case PCC_OFF:
255                 jwrite32(jme, JME_PCCRX0,
256                         ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
257                         ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
258                 break;
259         case PCC_P1:
260                 jwrite32(jme, JME_PCCRX0,
261                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
262                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
263                 break;
264         case PCC_P2:
265                 jwrite32(jme, JME_PCCRX0,
266                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
267                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
268                 break;
269         case PCC_P3:
270                 jwrite32(jme, JME_PCCRX0,
271                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
272                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
273                 break;
274         default:
275                 break;
276         }
277         wmb();
278
279         if(!(jme->flags & JME_FLAG_POLL))
280                 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
281 }
282
283 static void
284 jme_start_irq(struct jme_adapter *jme)
285 {
286         register struct dynpcc_info *dpi = &(jme->dpi);
287
288         jme_set_rx_pcc(jme, PCC_P1);
289         dpi->cur                = PCC_P1;
290         dpi->attempt            = PCC_P1;
291         dpi->cnt                = 0;
292
293         jwrite32(jme, JME_PCCTX,
294                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
295                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
296                         PCCTXQ0_EN
297                 );
298
299         /*
300          * Enable Interrupts
301          */
302         jwrite32(jme, JME_IENS, INTR_ENABLE);
303 }
304
305 __always_inline static void
306 jme_stop_irq(struct jme_adapter *jme)
307 {
308         /*
309          * Disable Interrupts
310          */
311         jwrite32(jme, JME_IENC, INTR_ENABLE);
312 }
313
314
315 __always_inline static void
316 jme_enable_shadow(struct jme_adapter *jme)
317 {
318         jwrite32(jme,
319                  JME_SHBA_LO,
320                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
321 }
322
323 __always_inline static void
324 jme_disable_shadow(struct jme_adapter *jme)
325 {
326         jwrite32(jme, JME_SHBA_LO, 0x0);
327 }
328
329 static __u32
330 jme_linkstat_from_phy(struct jme_adapter *jme)
331 {
332         __u32 phylink, bmsr;
333
334         phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
335         bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
336         if(bmsr & BMSR_ANCOMP)
337                 phylink |= PHY_LINK_AUTONEG_COMPLETE;
338
339         return phylink;
340 }
341
342 static int
343 jme_check_link(struct net_device *netdev, int testonly)
344 {
345         struct jme_adapter *jme = netdev_priv(netdev);
346         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
347         char linkmsg[64];
348         int rc = 0;
349
350         linkmsg[0] = '\0';
351
352         if(jme->fpgaver)
353                 phylink = jme_linkstat_from_phy(jme);
354         else
355                 phylink = jread32(jme, JME_PHY_LINK);
356
357         if (phylink & PHY_LINK_UP) {
358                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
359                         /*
360                          * If we did not enable AN
361                          * Speed/Duplex Info should be obtained from SMI
362                          */
363                         phylink = PHY_LINK_UP;
364
365                         bmcr = jme_mdio_read(jme->dev,
366                                                 jme->mii_if.phy_id,
367                                                 MII_BMCR);
368
369
370                         phylink |= ((bmcr & BMCR_SPEED1000) &&
371                                         (bmcr & BMCR_SPEED100) == 0) ?
372                                         PHY_LINK_SPEED_1000M :
373                                         (bmcr & BMCR_SPEED100) ?
374                                         PHY_LINK_SPEED_100M :
375                                         PHY_LINK_SPEED_10M;
376
377                         phylink |= (bmcr & BMCR_FULLDPLX) ?
378                                          PHY_LINK_DUPLEX : 0;
379
380                         strcat(linkmsg, "Forced: ");
381                 }
382                 else {
383                         /*
384                          * Keep polling for speed/duplex resolve complete
385                          */
386                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
387                                 --cnt) {
388
389                                 udelay(1);
390
391                                 if(jme->fpgaver)
392                                         phylink = jme_linkstat_from_phy(jme);
393                                 else
394                                         phylink = jread32(jme, JME_PHY_LINK);
395                         }
396
397                         if(!cnt)
398                                 jeprintk(netdev->name,
399                                         "Waiting speed resolve timeout.\n");
400
401                         strcat(linkmsg, "ANed: ");
402                 }
403
404                 if(jme->phylink == phylink) {
405                         rc = 1;
406                         goto out;
407                 }
408                 if(testonly)
409                         goto out;
410
411                 jme->phylink = phylink;
412
413                 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
414                                         GHC_SPEED_100M |
415                                         GHC_SPEED_1000M |
416                                         GHC_DPX);
417                 switch(phylink & PHY_LINK_SPEED_MASK) {
418                         case PHY_LINK_SPEED_10M:
419                                 ghc |= GHC_SPEED_10M;
420                                 strcat(linkmsg, "10 Mbps, ");
421                                 break;
422                         case PHY_LINK_SPEED_100M:
423                                 ghc |= GHC_SPEED_100M;
424                                 strcat(linkmsg, "100 Mbps, ");
425                                 break;
426                         case PHY_LINK_SPEED_1000M:
427                                 ghc |= GHC_SPEED_1000M;
428                                 strcat(linkmsg, "1000 Mbps, ");
429                                 break;
430                         default:
431                                 break;
432                 }
433                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
434
435                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
436                                         "Full-Duplex, " :
437                                         "Half-Duplex, ");
438
439                 if(phylink & PHY_LINK_MDI_STAT)
440                         strcat(linkmsg, "MDI-X");
441                 else
442                         strcat(linkmsg, "MDI");
443
444                 if(phylink & PHY_LINK_DUPLEX)
445                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
446                 else {
447                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
448                                                 TXMCS_BACKOFF |
449                                                 TXMCS_CARRIERSENSE |
450                                                 TXMCS_COLLISION);
451                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
452                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
453                                 TXTRHD_TXREN |
454                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
455                 }
456
457                 jme->reg_ghc = ghc;
458                 jwrite32(jme, JME_GHC, ghc);
459
460                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
461                 netif_carrier_on(netdev);
462         }
463         else {
464                 if(testonly)
465                         goto out;
466
467                 jprintk(netdev->name, "Link is down.\n");
468                 jme->phylink = 0;
469                 netif_carrier_off(netdev);
470         }
471
472 out:
473         return rc;
474 }
475
476 static int
477 jme_setup_tx_resources(struct jme_adapter *jme)
478 {
479         struct jme_ring *txring = &(jme->txring[0]);
480
481         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
482                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
483                                    &(txring->dmaalloc),
484                                    GFP_ATOMIC);
485
486         if(!txring->alloc) {
487                 txring->desc = NULL;
488                 txring->dmaalloc = 0;
489                 txring->dma = 0;
490                 return -ENOMEM;
491         }
492
493         /*
494          * 16 Bytes align
495          */
496         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
497                                                 RING_DESC_ALIGN);
498         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
499         txring->next_to_use     = 0;
500         atomic_set(&txring->next_to_clean, 0);
501         atomic_set(&txring->nr_free, jme->tx_ring_size);
502
503         /*
504          * Initialize Transmit Descriptors
505          */
506         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
507         memset(txring->bufinf, 0,
508                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
509
510         return 0;
511 }
512
513 static void
514 jme_free_tx_resources(struct jme_adapter *jme)
515 {
516         int i;
517         struct jme_ring *txring = &(jme->txring[0]);
518         struct jme_buffer_info *txbi = txring->bufinf;
519
520         if(txring->alloc) {
521                 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
522                         txbi = txring->bufinf + i;
523                         if(txbi->skb) {
524                                 dev_kfree_skb(txbi->skb);
525                                 txbi->skb = NULL;
526                         }
527                         txbi->mapping   = 0;
528                         txbi->len       = 0;
529                         txbi->nr_desc   = 0;
530                 }
531
532                 dma_free_coherent(&(jme->pdev->dev),
533                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
534                                   txring->alloc,
535                                   txring->dmaalloc);
536
537                 txring->alloc           = NULL;
538                 txring->desc            = NULL;
539                 txring->dmaalloc        = 0;
540                 txring->dma             = 0;
541         }
542         txring->next_to_use     = 0;
543         atomic_set(&txring->next_to_clean, 0);
544         atomic_set(&txring->nr_free, 0);
545
546 }
547
548 __always_inline static void
549 jme_enable_tx_engine(struct jme_adapter *jme)
550 {
551         /*
552          * Select Queue 0
553          */
554         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
555
556         /*
557          * Setup TX Queue 0 DMA Bass Address
558          */
559         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
560         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
561         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
562
563         /*
564          * Setup TX Descptor Count
565          */
566         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
567
568         /*
569          * Enable TX Engine
570          */
571         wmb();
572         jwrite32(jme, JME_TXCS, jme->reg_txcs |
573                                 TXCS_SELECT_QUEUE0 |
574                                 TXCS_ENABLE);
575
576 }
577
578 __always_inline static void
579 jme_restart_tx_engine(struct jme_adapter *jme)
580 {
581         /*
582          * Restart TX Engine
583          */
584         jwrite32(jme, JME_TXCS, jme->reg_txcs |
585                                 TXCS_SELECT_QUEUE0 |
586                                 TXCS_ENABLE);
587 }
588
589 __always_inline static void
590 jme_disable_tx_engine(struct jme_adapter *jme)
591 {
592         int i;
593         __u32 val;
594
595         /*
596          * Disable TX Engine
597          */
598         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
599
600         val = jread32(jme, JME_TXCS);
601         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
602         {
603                 mdelay(1);
604                 val = jread32(jme, JME_TXCS);
605         }
606
607         if(!i) {
608                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
609                 jme_reset_mac_processor(jme);
610         }
611
612
613 }
614
615 static void
616 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
617 {
618         struct jme_ring *rxring = &(jme->rxring[0]);
619         register volatile struct rxdesc* rxdesc = rxring->desc;
620         struct jme_buffer_info *rxbi = rxring->bufinf;
621         rxdesc += i;
622         rxbi += i;
623
624         rxdesc->dw[0] = 0;
625         rxdesc->dw[1] = 0;
626         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
627         rxdesc->desc1.bufaddrl  = cpu_to_le32(
628                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
629         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
630         if(jme->dev->features & NETIF_F_HIGHDMA)
631                 rxdesc->desc1.flags = RXFLAG_64BIT;
632         wmb();
633         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
634 }
635
636 static int
637 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
638 {
639         struct jme_ring *rxring = &(jme->rxring[0]);
640         struct jme_buffer_info *rxbi = rxring->bufinf + i;
641         unsigned long offset;
642         struct sk_buff* skb;
643
644         skb = netdev_alloc_skb(jme->dev,
645                 jme->dev->mtu + RX_EXTRA_LEN);
646         if(unlikely(!skb))
647                 return -ENOMEM;
648
649         if(unlikely(offset =
650                         (unsigned long)(skb->data)
651                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
652                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
653
654         rxbi->skb = skb;
655         rxbi->len = skb_tailroom(skb);
656         rxbi->mapping = pci_map_page(jme->pdev,
657                                         virt_to_page(skb->data),
658                                         offset_in_page(skb->data),
659                                         rxbi->len,
660                                         PCI_DMA_FROMDEVICE);
661
662         return 0;
663 }
664
665 static void
666 jme_free_rx_buf(struct jme_adapter *jme, int i)
667 {
668         struct jme_ring *rxring = &(jme->rxring[0]);
669         struct jme_buffer_info *rxbi = rxring->bufinf;
670         rxbi += i;
671
672         if(rxbi->skb) {
673                 pci_unmap_page(jme->pdev,
674                                  rxbi->mapping,
675                                  rxbi->len,
676                                  PCI_DMA_FROMDEVICE);
677                 dev_kfree_skb(rxbi->skb);
678                 rxbi->skb = NULL;
679                 rxbi->mapping = 0;
680                 rxbi->len = 0;
681         }
682 }
683
684 static void
685 jme_free_rx_resources(struct jme_adapter *jme)
686 {
687         int i;
688         struct jme_ring *rxring = &(jme->rxring[0]);
689
690         if(rxring->alloc) {
691                 for(i = 0 ; i < jme->rx_ring_size ; ++i)
692                         jme_free_rx_buf(jme, i);
693
694                 dma_free_coherent(&(jme->pdev->dev),
695                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
696                                   rxring->alloc,
697                                   rxring->dmaalloc);
698                 rxring->alloc    = NULL;
699                 rxring->desc     = NULL;
700                 rxring->dmaalloc = 0;
701                 rxring->dma      = 0;
702         }
703         rxring->next_to_use   = 0;
704         atomic_set(&rxring->next_to_clean, 0);
705 }
706
707 static int
708 jme_setup_rx_resources(struct jme_adapter *jme)
709 {
710         int i;
711         struct jme_ring *rxring = &(jme->rxring[0]);
712
713         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
714                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
715                                    &(rxring->dmaalloc),
716                                    GFP_ATOMIC);
717         if(!rxring->alloc) {
718                 rxring->desc = NULL;
719                 rxring->dmaalloc = 0;
720                 rxring->dma = 0;
721                 return -ENOMEM;
722         }
723
724         /*
725          * 16 Bytes align
726          */
727         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
728                                                 RING_DESC_ALIGN);
729         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
730         rxring->next_to_use     = 0;
731         atomic_set(&rxring->next_to_clean, 0);
732
733         /*
734          * Initiallize Receive Descriptors
735          */
736         for(i = 0 ; i < jme->rx_ring_size ; ++i) {
737                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
738                         jme_free_rx_resources(jme);
739                         return -ENOMEM;
740                 }
741
742                 jme_set_clean_rxdesc(jme, i);
743         }
744
745         return 0;
746 }
747
748 __always_inline static void
749 jme_enable_rx_engine(struct jme_adapter *jme)
750 {
751         /*
752          * Setup RX DMA Bass Address
753          */
754         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
755         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
756         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
757
758         /*
759          * Setup RX Descriptor Count
760          */
761         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
762
763         /*
764          * Setup Unicast Filter
765          */
766         jme_set_multi(jme->dev);
767
768         /*
769          * Enable RX Engine
770          */
771         wmb();
772         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
773                                 RXCS_QUEUESEL_Q0 |
774                                 RXCS_ENABLE |
775                                 RXCS_QST);
776 }
777
778 __always_inline static void
779 jme_restart_rx_engine(struct jme_adapter *jme)
780 {
781         /*
782          * Start RX Engine
783          */
784         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
785                                 RXCS_QUEUESEL_Q0 |
786                                 RXCS_ENABLE |
787                                 RXCS_QST);
788 }
789
790
791 __always_inline static void
792 jme_disable_rx_engine(struct jme_adapter *jme)
793 {
794         int i;
795         __u32 val;
796
797         /*
798          * Disable RX Engine
799          */
800         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
801
802         val = jread32(jme, JME_RXCS);
803         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
804         {
805                 mdelay(1);
806                 val = jread32(jme, JME_RXCS);
807         }
808
809         if(!i)
810                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
811
812 }
813
814 static int
815 jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
816 {
817         if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
818                 return false;
819
820         if(unlikely((flags & RXWBFLAG_TCPON) &&
821         !(flags & RXWBFLAG_TCPCS))) {
822                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
823                 goto out_sumerr;
824         }
825
826         if(unlikely((flags & RXWBFLAG_UDPON) &&
827         !(flags & RXWBFLAG_UDPCS))) {
828                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
829                 goto out_sumerr;
830         }
831
832         if(unlikely((flags & RXWBFLAG_IPV4) &&
833         !(flags & RXWBFLAG_IPCS))) {
834                 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
835                 goto out_sumerr;
836         }
837
838         return true;
839
840 out_sumerr:
841         csum_dbg(jme->dev->name, "%s%s%s%s\n",
842                 (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
843                 (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
844                 (flags & RXWBFLAG_UDPON)?"UDP ":"",
845                 (flags & RXWBFLAG_TCPON)?"TCP":"");
846         return false;
847 }
848
849 static void
850 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
851 {
852         struct jme_ring *rxring = &(jme->rxring[0]);
853         volatile struct rxdesc *rxdesc = rxring->desc;
854         struct jme_buffer_info *rxbi = rxring->bufinf;
855         struct sk_buff *skb;
856         int framesize;
857
858         rxdesc += idx;
859         rxbi += idx;
860
861         skb = rxbi->skb;
862         pci_dma_sync_single_for_cpu(jme->pdev,
863                                         rxbi->mapping,
864                                         rxbi->len,
865                                         PCI_DMA_FROMDEVICE);
866
867         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
868                 pci_dma_sync_single_for_device(jme->pdev,
869                                                 rxbi->mapping,
870                                                 rxbi->len,
871                                                 PCI_DMA_FROMDEVICE);
872
873                 ++(NET_STAT(jme).rx_dropped);
874         }
875         else {
876                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
877                                 - RX_PREPAD_SIZE;
878
879                 skb_reserve(skb, RX_PREPAD_SIZE);
880                 skb_put(skb, framesize);
881                 skb->protocol = eth_type_trans(skb, jme->dev);
882
883                 if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
884                         skb->ip_summed = CHECKSUM_UNNECESSARY;
885                 else
886                         skb->ip_summed = CHECKSUM_NONE;
887
888
889                 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
890                         vlan_dbg(jme->dev->name, "VLAN: %04x\n",
891                                         rxdesc->descwb.vlan);
892                         if(jme->vlgrp) {
893                                 vlan_dbg(jme->dev->name,
894                                         "VLAN Passed to kernel.\n");
895                                 jme->jme_vlan_rx(skb, jme->vlgrp,
896                                         le32_to_cpu(rxdesc->descwb.vlan));
897                                 NET_STAT(jme).rx_bytes += 4;
898                         }
899                 }
900                 else {
901                         jme->jme_rx(skb);
902                 }
903
904                 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
905                                 RXWBFLAG_DEST_MUL)
906                         ++(NET_STAT(jme).multicast);
907
908                 jme->dev->last_rx = jiffies;
909                 NET_STAT(jme).rx_bytes += framesize;
910                 ++(NET_STAT(jme).rx_packets);
911         }
912
913         jme_set_clean_rxdesc(jme, idx);
914
915 }
916
917
918
919 static int
920 jme_process_receive(struct jme_adapter *jme, int limit)
921 {
922         struct jme_ring *rxring = &(jme->rxring[0]);
923         volatile struct rxdesc *rxdesc = rxring->desc;
924         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
925
926         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
927                 goto out_inc;
928
929         if(unlikely(atomic_read(&jme->link_changing) != 1))
930                 goto out_inc;
931
932         if(unlikely(!netif_carrier_ok(jme->dev)))
933                 goto out_inc;
934
935         i = atomic_read(&rxring->next_to_clean);
936         while( limit-- > 0 )
937         {
938                 rxdesc = rxring->desc;
939                 rxdesc += i;
940
941                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
942                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
943                         goto out;
944
945                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
946
947                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
948
949                 if(unlikely(desccnt > 1 ||
950                 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
951
952                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
953                                 ++(NET_STAT(jme).rx_crc_errors);
954                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
955                                 ++(NET_STAT(jme).rx_fifo_errors);
956                         else
957                                 ++(NET_STAT(jme).rx_errors);
958
959                         if(desccnt > 1) {
960                                 rx_dbg(jme->dev->name,
961                                         "RX: More than one(%d) descriptor, "
962                                         "framelen=%d\n",
963                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
964                                 limit -= desccnt - 1;
965                         }
966
967                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
968                                 jme_set_clean_rxdesc(jme, j);
969                                 j = (j + 1) & (mask);
970                         }
971
972                 }
973                 else {
974                         jme_alloc_and_feed_skb(jme, i);
975                 }
976
977                 i = (i + desccnt) & (mask);
978         }
979
980
981 out:
982         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
983         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
984                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
985                         >> 4);
986
987         atomic_set(&rxring->next_to_clean, i);
988
989 out_inc:
990         atomic_inc(&jme->rx_cleaning);
991
992         return limit > 0 ? limit : 0;
993
994 }
995
996 static void
997 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
998 {
999         if(likely(atmp == dpi->cur)) {
1000                 dpi->cnt = 0;
1001                 return;
1002         }
1003
1004         if(dpi->attempt == atmp) {
1005                 ++(dpi->cnt);
1006         }
1007         else {
1008                 dpi->attempt = atmp;
1009                 dpi->cnt = 0;
1010         }
1011
1012 }
1013
1014 static void
1015 jme_dynamic_pcc(struct jme_adapter *jme)
1016 {
1017         register struct dynpcc_info *dpi = &(jme->dpi);
1018
1019         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1020                 jme_attempt_pcc(dpi, PCC_P3);
1021         else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1022         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1023                 jme_attempt_pcc(dpi, PCC_P2);
1024         else
1025                 jme_attempt_pcc(dpi, PCC_P1);
1026
1027         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1028                 jme_set_rx_pcc(jme, dpi->attempt);
1029                 dpi->cur = dpi->attempt;
1030                 dpi->cnt = 0;
1031         }
1032 }
1033
1034 static void
1035 jme_start_pcc_timer(struct jme_adapter *jme)
1036 {
1037         struct dynpcc_info *dpi = &(jme->dpi);
1038         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1039         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1040         dpi->intr_cnt           = 0;
1041         jwrite32(jme, JME_TMCSR,
1042                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1043 }
1044
1045 __always_inline static void
1046 jme_stop_pcc_timer(struct jme_adapter *jme)
1047 {
1048         jwrite32(jme, JME_TMCSR, 0);
1049 }
1050
1051 static void
1052 jme_pcc_tasklet(unsigned long arg)
1053 {
1054         struct jme_adapter *jme = (struct jme_adapter*)arg;
1055         struct net_device *netdev = jme->dev;
1056
1057         if(unlikely(!netif_carrier_ok(netdev) ||
1058                 (atomic_read(&jme->link_changing) != 1)
1059         )) {
1060                 jme_stop_pcc_timer(jme);
1061                 return;
1062         }
1063
1064         if(!(jme->flags & JME_FLAG_POLL))
1065                 jme_dynamic_pcc(jme);
1066
1067         jme_start_pcc_timer(jme);
1068 }
1069
1070 __always_inline static void
1071 jme_polling_mode(struct jme_adapter *jme)
1072 {
1073         jme_set_rx_pcc(jme, PCC_OFF);
1074 }
1075
1076 __always_inline static void
1077 jme_interrupt_mode(struct jme_adapter *jme)
1078 {
1079         jme_set_rx_pcc(jme, PCC_P1);
1080 }
1081
1082 static void
1083 jme_link_change_tasklet(unsigned long arg)
1084 {
1085         struct jme_adapter *jme = (struct jme_adapter*)arg;
1086         struct net_device *netdev = jme->dev;
1087         int timeout = WAIT_TASKLET_TIMEOUT;
1088         int rc;
1089
1090         if(!atomic_dec_and_test(&jme->link_changing))
1091                 goto out;
1092
1093         if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1094                 goto out;
1095
1096         jme->old_mtu = netdev->mtu;
1097         netif_stop_queue(netdev);
1098
1099         while(--timeout > 0 &&
1100                 (
1101                 atomic_read(&jme->rx_cleaning) != 1 ||
1102                 atomic_read(&jme->tx_cleaning) != 1
1103                 )) {
1104
1105                 mdelay(1);
1106         }
1107
1108         if(netif_carrier_ok(netdev)) {
1109                 jme_stop_pcc_timer(jme);
1110                 jme_reset_mac_processor(jme);
1111                 jme_free_rx_resources(jme);
1112                 jme_free_tx_resources(jme);
1113
1114                 if(jme->flags & JME_FLAG_POLL)
1115                         jme_polling_mode(jme);
1116         }
1117
1118         jme_check_link(netdev, 0);
1119         if(netif_carrier_ok(netdev)) {
1120                 rc = jme_setup_rx_resources(jme);
1121                 if(rc) {
1122                         jeprintk(netdev->name,
1123                                 "Allocating resources for RX error"
1124                                 ", Device STOPPED!\n");
1125                         goto out;
1126                 }
1127
1128
1129                 rc = jme_setup_tx_resources(jme);
1130                 if(rc) {
1131                         jeprintk(netdev->name,
1132                                 "Allocating resources for TX error"
1133                                 ", Device STOPPED!\n");
1134                         goto err_out_free_rx_resources;
1135                 }
1136
1137                 jme_enable_rx_engine(jme);
1138                 jme_enable_tx_engine(jme);
1139
1140                 netif_start_queue(netdev);
1141
1142                 if(jme->flags & JME_FLAG_POLL)
1143                         jme_interrupt_mode(jme);
1144
1145                 jme_start_pcc_timer(jme);
1146         }
1147
1148         goto out;
1149
1150 err_out_free_rx_resources:
1151         jme_free_rx_resources(jme);
1152 out:
1153         atomic_inc(&jme->link_changing);
1154 }
1155
1156 static void
1157 jme_rx_clean_tasklet(unsigned long arg)
1158 {
1159         struct jme_adapter *jme = (struct jme_adapter*)arg;
1160         struct dynpcc_info *dpi = &(jme->dpi);
1161
1162         jme_process_receive(jme, jme->rx_ring_size);
1163         ++(dpi->intr_cnt);
1164
1165 }
1166
1167 static int
1168 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1169 {
1170         struct jme_adapter *jme = jme_napi_priv(holder);
1171         struct net_device *netdev = jme->dev;
1172         int rest;
1173
1174         rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1175
1176         while(atomic_read(&jme->rx_empty) > 0) {
1177                 atomic_dec(&jme->rx_empty);
1178                 ++(NET_STAT(jme).rx_dropped);
1179                 jme_restart_rx_engine(jme);
1180         }
1181         atomic_inc(&jme->rx_empty);
1182
1183         if(rest) {
1184                 JME_RX_COMPLETE(netdev, holder);
1185                 jme_interrupt_mode(jme);
1186         }
1187
1188         JME_NAPI_WEIGHT_SET(budget, rest);
1189         return JME_NAPI_WEIGHT_VAL(budget) - rest;
1190 }
1191
1192 static void
1193 jme_rx_empty_tasklet(unsigned long arg)
1194 {
1195         struct jme_adapter *jme = (struct jme_adapter*)arg;
1196
1197         if(unlikely(atomic_read(&jme->link_changing) != 1))
1198                 return;
1199
1200         if(unlikely(!netif_carrier_ok(jme->dev)))
1201                 return;
1202
1203         queue_dbg(jme->dev->name, "RX Queue Full!\n");
1204
1205         jme_rx_clean_tasklet(arg);
1206
1207         while(atomic_read(&jme->rx_empty) > 0) {
1208                 atomic_dec(&jme->rx_empty);
1209                 ++(NET_STAT(jme).rx_dropped);
1210                 jme_restart_rx_engine(jme);
1211         }
1212         atomic_inc(&jme->rx_empty);
1213 }
1214
1215 static void
1216 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1217 {
1218         struct jme_ring *txring = jme->txring;
1219
1220         smp_wmb();
1221         if(unlikely(netif_queue_stopped(jme->dev) &&
1222         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1223
1224                 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1225                 netif_wake_queue(jme->dev);
1226
1227         }
1228
1229 }
1230
1231 static void
1232 jme_tx_clean_tasklet(unsigned long arg)
1233 {
1234         struct jme_adapter *jme = (struct jme_adapter*)arg;
1235         struct jme_ring *txring = &(jme->txring[0]);
1236         volatile struct txdesc *txdesc = txring->desc;
1237         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1238         int i, j, cnt = 0, max, err, mask;
1239
1240         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1241                 goto out;
1242
1243         if(unlikely(atomic_read(&jme->link_changing) != 1))
1244                 goto out;
1245
1246         if(unlikely(!netif_carrier_ok(jme->dev)))
1247                 goto out;
1248
1249         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1250         mask = jme->tx_ring_mask;
1251
1252         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1253
1254         for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1255
1256                 ctxbi = txbi + i;
1257
1258                 if(likely(ctxbi->skb &&
1259                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1260
1261                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1262
1263                         tx_dbg(jme->dev->name,
1264                                 "Tx Tasklet: Clean %d+%d\n",
1265                                 i, ctxbi->nr_desc);
1266
1267                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1268                                 ttxbi = txbi + ((i + j) & (mask));
1269                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1270
1271                                 pci_unmap_page(jme->pdev,
1272                                                  ttxbi->mapping,
1273                                                  ttxbi->len,
1274                                                  PCI_DMA_TODEVICE);
1275
1276                                 ttxbi->mapping = 0;
1277                                 ttxbi->len = 0;
1278                         }
1279
1280                         dev_kfree_skb(ctxbi->skb);
1281
1282                         cnt += ctxbi->nr_desc;
1283
1284                         if(unlikely(err))
1285                                 ++(NET_STAT(jme).tx_carrier_errors);
1286                         else {
1287                                 ++(NET_STAT(jme).tx_packets);
1288                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1289                         }
1290
1291                         ctxbi->skb = NULL;
1292                         ctxbi->len = 0;
1293                         ctxbi->start_xmit = 0;
1294                 }
1295                 else {
1296                         if(!ctxbi->skb)
1297                                 tx_dbg(jme->dev->name,
1298                                         "Tx Tasklet:"
1299                                         " Stopped due to no skb.\n");
1300                         else
1301                                 tx_dbg(jme->dev->name,
1302                                         "Tx Tasklet:"
1303                                         "Stopped due to not done.\n");
1304                         break;
1305                 }
1306
1307                 i = (i + ctxbi->nr_desc) & mask;
1308
1309                 ctxbi->nr_desc = 0;
1310         }
1311
1312         tx_dbg(jme->dev->name,
1313                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1314                 i, jiffies);
1315
1316         atomic_set(&txring->next_to_clean, i);
1317         atomic_add(cnt, &txring->nr_free);
1318
1319         jme_wake_queue_if_stopped(jme);
1320
1321 out:
1322         atomic_inc(&jme->tx_cleaning);
1323 }
1324
1325 static void
1326 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1327 {
1328         /*
1329          * Disable interrupt
1330          */
1331         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1332
1333         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1334                 tasklet_schedule(&jme->linkch_task);
1335                 goto out_reenable;
1336         }
1337
1338         if(intrstat & INTR_TMINTR)
1339                 tasklet_schedule(&jme->pcc_task);
1340
1341         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1342                 tasklet_schedule(&jme->txclean_task);
1343
1344         if(jme->flags & JME_FLAG_POLL) {
1345                 if(intrstat & INTR_RX0EMP)
1346                         atomic_inc(&jme->rx_empty);
1347
1348                 if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1349                         if(likely(JME_RX_SCHEDULE_PREP(jme))) {
1350                                 jme_polling_mode(jme);
1351                                 JME_RX_SCHEDULE(jme);
1352                         }
1353                 }
1354         }
1355         else {
1356                 if(intrstat & INTR_RX0EMP) {
1357                         atomic_inc(&jme->rx_empty);
1358                         tasklet_schedule(&jme->rxempty_task);
1359                 }
1360                 else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1361                         tasklet_schedule(&jme->rxclean_task);
1362         }
1363
1364 out_reenable:
1365         /*
1366          * Write 1 clear interrupt status
1367          */
1368         jwrite32f(jme, JME_IEVE, intrstat);
1369
1370         /*
1371          * Re-enable interrupt
1372          */
1373         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1374
1375
1376 }
1377
1378 static irqreturn_t
1379 jme_intr(int irq, void *dev_id)
1380 {
1381         struct net_device *netdev = dev_id;
1382         struct jme_adapter *jme = netdev_priv(netdev);
1383         __u32 intrstat;
1384
1385         intrstat = jread32(jme, JME_IEVE);
1386
1387         /*
1388          * Check if it's really an interrupt for us
1389          */
1390         if(unlikely(intrstat == 0))
1391                 return IRQ_NONE;
1392
1393         /*
1394          * Check if the device still exist
1395          */
1396         if(unlikely(intrstat == ~((typeof(intrstat))0)))
1397                 return IRQ_NONE;
1398
1399         jme_intr_msi(jme, intrstat);
1400
1401         return IRQ_HANDLED;
1402 }
1403
1404 static irqreturn_t
1405 jme_msi(int irq, void *dev_id)
1406 {
1407         struct net_device *netdev = dev_id;
1408         struct jme_adapter *jme = netdev_priv(netdev);
1409         __u32 intrstat;
1410
1411         pci_dma_sync_single_for_cpu(jme->pdev,
1412                                     jme->shadow_dma,
1413                                     sizeof(__u32) * SHADOW_REG_NR,
1414                                     PCI_DMA_FROMDEVICE);
1415         intrstat = jme->shadow_regs[SHADOW_IEVE];
1416         jme->shadow_regs[SHADOW_IEVE] = 0;
1417
1418         jme_intr_msi(jme, intrstat);
1419
1420         return IRQ_HANDLED;
1421 }
1422
1423 static irqreturn_t
1424 jme_msix_misc(int irq, void *dev_id)
1425 {
1426         struct net_device *netdev = dev_id;
1427         struct jme_adapter *jme = netdev_priv(netdev);
1428         __u32 intrstat;
1429
1430         pci_dma_sync_single_for_cpu(jme->pdev,
1431                                     jme->shadow_dma,
1432                                     sizeof(__u32) * SHADOW_REG_NR,
1433                                     PCI_DMA_FROMDEVICE);
1434         intrstat = jme->shadow_regs[SHADOW_IEVE];
1435         jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_MISC;
1436
1437         /*
1438          * Disable interrupt
1439          */
1440         jwrite32f(jme, JME_IENC, INTR_EN_MISC);
1441
1442         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1443                 tasklet_schedule(&jme->linkch_task);
1444                 goto out_reenable;
1445         }
1446
1447         if(intrstat & INTR_TMINTR)
1448                 tasklet_schedule(&jme->pcc_task);
1449
1450 out_reenable:
1451         /*
1452          * Write 1 clear interrupt status
1453          */
1454         jwrite32f(jme, JME_IEVE, INTR_EN_MISC);
1455
1456         /*
1457          * Re-enable interrupt
1458          */
1459         jwrite32f(jme, JME_IENS, INTR_EN_MISC);
1460
1461         return IRQ_HANDLED;
1462 }
1463
1464 static irqreturn_t
1465 jme_msix_tx(int irq, void *dev_id)
1466 {
1467         struct net_device *netdev = dev_id;
1468         struct jme_adapter *jme = netdev_priv(netdev);
1469
1470         /*
1471          * Disable interrupt
1472          */
1473         jwrite32f(jme, JME_IENC, INTR_EN_TX);
1474
1475         if(unlikely(atomic_read(&jme->link_changing) != 1))
1476                 goto out_reenable;
1477
1478         tasklet_schedule(&jme->txclean_task);
1479
1480 out_reenable:
1481         /*
1482          * Write 1 clear interrupt status
1483          */
1484         jwrite32f(jme, JME_IEVE, INTR_EN_TX | INTR_TX0);
1485
1486         /*
1487          * Re-enable interrupt
1488          */
1489         jwrite32f(jme, JME_IENS, INTR_EN_TX);
1490
1491         return IRQ_HANDLED;
1492 }
1493
1494 static irqreturn_t
1495 jme_msix_rx(int irq, void *dev_id)
1496 {
1497         struct net_device *netdev = dev_id;
1498         struct jme_adapter *jme = netdev_priv(netdev);
1499         __u32 intrstat;
1500
1501         pci_dma_sync_single_for_cpu(jme->pdev,
1502                                     jme->shadow_dma,
1503                                     sizeof(__u32) * SHADOW_REG_NR,
1504                                     PCI_DMA_FROMDEVICE);
1505         intrstat = jme->shadow_regs[SHADOW_IEVE];
1506         jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_RX0;
1507
1508         /*
1509          * Disable interrupt
1510          */
1511         jwrite32f(jme, JME_IENC, INTR_EN_RX0);
1512
1513         if(unlikely(atomic_read(&jme->link_changing) != 1))
1514                 goto out_reenable;
1515
1516         if(jme->flags & JME_FLAG_POLL) {
1517                 if(intrstat & INTR_RX0EMP)
1518                         atomic_inc(&jme->rx_empty);
1519
1520                 if(likely(JME_RX_SCHEDULE_PREP(jme))) {
1521                         jme_polling_mode(jme);
1522                         JME_RX_SCHEDULE(jme);
1523                 }
1524         }
1525         else {
1526                 if(intrstat & INTR_RX0EMP) {
1527                         atomic_inc(&jme->rx_empty);
1528                         tasklet_schedule(&jme->rxempty_task);
1529                 }
1530                 else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1531                         tasklet_schedule(&jme->rxclean_task);
1532         }
1533
1534 out_reenable:
1535         /*
1536          * Write 1 clear interrupt status
1537          */
1538         jwrite32f(jme, JME_IEVE, INTR_EN_RX0 | INTR_RX0);
1539
1540         /*
1541          * Re-enable interrupt
1542          */
1543         jwrite32f(jme, JME_IENS, INTR_EN_RX0);
1544
1545         return IRQ_HANDLED;
1546 }
1547
1548 static void
1549 jme_reset_link(struct jme_adapter *jme)
1550 {
1551         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1552 }
1553
1554 static void
1555 jme_restart_an(struct jme_adapter *jme)
1556 {
1557         __u32 bmcr;
1558         unsigned long flags;
1559
1560         spin_lock_irqsave(&jme->phy_lock, flags);
1561         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1562         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1563         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1564         spin_unlock_irqrestore(&jme->phy_lock, flags);
1565 }
1566
1567 static void
1568 jme_setup_msix_info(struct jme_adapter *jme, struct msix_entry *msix_ent)
1569 {
1570         int i;
1571
1572         for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1573                 jme->msix[i].requested = false;
1574                 jme->msix[i].vector = msix_ent[i].vector;
1575                 strcpy(jme->msix[i].name, jme->dev->name);
1576         }
1577
1578         jme->msix[0].handler    = jme_msix_misc;
1579         jme->msix[1].handler    = jme_msix_tx;
1580         jme->msix[2].handler    = jme_msix_rx;
1581
1582         strcat(jme->msix[0].name, "-misc");
1583         strcat(jme->msix[1].name, "-tx");
1584         strcat(jme->msix[2].name, "-rx");
1585 }
1586
1587 static void
1588 jme_fill_msix_regs(struct jme_adapter *jme)
1589 {
1590         __u32 mask = 1, reg_msix = 0;
1591         int i, vec;
1592
1593         for(i = 0 ; i < 32 ; ++i) {
1594                 if(mask & INTR_EN_TX)
1595                         vec = 1;
1596                 else if(mask & INTR_EN_RX0)
1597                         vec = 2;
1598                 else
1599                         vec = 0;
1600
1601                 if(!(i & 7))
1602                         reg_msix = 0;
1603                 reg_msix |= (vec & 7) << ((i & 7) << 2);
1604                 if((i & 7) == 7)
1605                         jwrite32(jme,
1606                                 JME_MSIX_ENT + ((i >> 3) << 2),
1607                                 reg_msix);
1608
1609                 mask <<= 1;
1610         }
1611 }
1612
1613 static int
1614 jme_request_msix_irq(struct jme_adapter *jme)
1615 {
1616         int i, rc;
1617         struct jme_msix_info *msix_info;
1618
1619         for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1620                 msix_info = jme->msix + i;
1621                 rc = request_irq(msix_info->vector,
1622                                 msix_info->handler,
1623                                 0,
1624                                 msix_info->name,
1625                                 jme->dev);
1626                 if(rc)
1627                         break;
1628 #if 0
1629 #ifdef CONFIG_SMP
1630                 /*
1631                  * Try to set different cpumask for each irq,
1632                  * ignoring assign fail since it has no critical
1633                  * effect to the working function.
1634                  */
1635                 if(irq_can_set_affinity(msix_info->vector))
1636                         irq_set_affinity(msix_info->vector,
1637                                         cpumask_of_cpu(i % num_online_cpus()));
1638 #endif
1639 #endif
1640                 msix_info->requested = true;
1641         }
1642
1643         return rc;
1644 }
1645
1646 static void
1647 jme_free_msix(struct jme_adapter *jme)
1648 {
1649         int i;
1650         struct jme_msix_info *msix_info;
1651
1652         for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1653                 msix_info = jme->msix + i;
1654                 if(msix_info->requested)
1655                         free_irq(msix_info->vector, jme->dev);
1656                 else
1657                         break;
1658                 msix_info->requested = false;
1659         }
1660         pci_disable_msix(jme->pdev);
1661 }
1662
1663 static int
1664 jme_request_msix(struct jme_adapter *jme)
1665 {
1666         int i, rc;
1667         struct msix_entry msix_ent[JME_MSIX_VEC_NR];
1668
1669         for (i = 0; i < JME_MSIX_VEC_NR; i++) {
1670                 msix_ent[i].entry = i;
1671                 msix_ent[i].vector = 0;
1672         }
1673
1674         rc = pci_enable_msix(jme->pdev, msix_ent, JME_MSIX_VEC_NR);
1675         if (rc)
1676                 goto out;
1677
1678         jme_setup_msix_info(jme, msix_ent);
1679         jme_fill_msix_regs(jme);
1680
1681         rc = jme_request_msix_irq(jme);
1682         if(rc)
1683                 goto out_free_msix;
1684
1685         return 0;
1686
1687 out_free_msix:
1688         jme_free_msix(jme);
1689 out:
1690         return rc;
1691 }
1692
1693 static int
1694 jme_request_irq(struct jme_adapter *jme)
1695 {
1696         int rc;
1697         struct net_device *netdev = jme->dev;
1698         irq_handler_t handler = jme_intr;
1699         int irq_flags = IRQF_SHARED;
1700
1701
1702         if(!jme_request_msix(jme)) {
1703                 jme->flags |= JME_FLAG_MSIX;
1704                 return 0;
1705         }
1706
1707         if(!pci_enable_msi(jme->pdev)) {
1708                 jme->flags |= JME_FLAG_MSI;
1709                 handler = jme_msi;
1710                 irq_flags = 0;
1711         }
1712
1713         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1714                           netdev);
1715         if(rc) {
1716                 jeprintk(netdev->name,
1717                         "Unable to request %s interrupt (return: %d)\n",
1718                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1719
1720                 if(jme->flags & JME_FLAG_MSI) {
1721                         pci_disable_msi(jme->pdev);
1722                         jme->flags &= ~JME_FLAG_MSI;
1723                 }
1724         }
1725         else {
1726                 netdev->irq = jme->pdev->irq;
1727         }
1728
1729         return rc;
1730 }
1731
1732 static void
1733 jme_free_irq(struct jme_adapter *jme)
1734 {
1735         if(jme->flags & JME_FLAG_MSIX) {
1736                 jme_free_msix(jme);
1737                 jme->flags &= ~JME_FLAG_MSIX;
1738         }
1739         else {
1740                 free_irq(jme->pdev->irq, jme->dev);
1741                 if (jme->flags & JME_FLAG_MSI) {
1742                         pci_disable_msi(jme->pdev);
1743                         jme->flags &= ~JME_FLAG_MSI;
1744                         jme->dev->irq = jme->pdev->irq;
1745                 }
1746         }
1747 }
1748
1749 static int
1750 jme_open(struct net_device *netdev)
1751 {
1752         struct jme_adapter *jme = netdev_priv(netdev);
1753         int rc, timeout = 10;
1754
1755         while(
1756                 --timeout > 0 &&
1757                 (
1758                 atomic_read(&jme->link_changing) != 1 ||
1759                 atomic_read(&jme->rx_cleaning) != 1 ||
1760                 atomic_read(&jme->tx_cleaning) != 1
1761                 )
1762         )
1763                 msleep(1);
1764
1765         if(!timeout) {
1766                 rc = -EBUSY;
1767                 goto err_out;
1768         }
1769
1770         jme_clear_pm(jme);
1771         jme_reset_mac_processor(jme);
1772         JME_NAPI_ENABLE(jme);
1773
1774         rc = jme_request_irq(jme);
1775         if(rc)
1776                 goto err_out;
1777
1778         jme_enable_shadow(jme);
1779         jme_start_irq(jme);
1780
1781         if(jme->flags & JME_FLAG_SSET)
1782                 jme_set_settings(netdev, &jme->old_ecmd);
1783         else
1784                 jme_reset_phy_processor(jme);
1785
1786         jme_reset_link(jme);
1787
1788         return 0;
1789
1790 err_out:
1791         netif_stop_queue(netdev);
1792         netif_carrier_off(netdev);
1793         return rc;
1794 }
1795
1796 static void
1797 jme_set_100m_half(struct jme_adapter *jme)
1798 {
1799         __u32 bmcr, tmp;
1800
1801         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1802         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1803                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1804         tmp |= BMCR_SPEED100;
1805
1806         if (bmcr != tmp)
1807                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1808
1809         if(jme->fpgaver)
1810                 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1811         else
1812                 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1813 }
1814
1815 static void
1816 jme_phy_off(struct jme_adapter *jme)
1817 {
1818         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1819 }
1820
1821
1822 static int
1823 jme_close(struct net_device *netdev)
1824 {
1825         struct jme_adapter *jme = netdev_priv(netdev);
1826
1827         netif_stop_queue(netdev);
1828         netif_carrier_off(netdev);
1829
1830         jme_stop_irq(jme);
1831         jme_disable_shadow(jme);
1832         jme_free_irq(jme);
1833
1834         JME_NAPI_DISABLE(jme);
1835
1836         tasklet_kill(&jme->linkch_task);
1837         tasklet_kill(&jme->txclean_task);
1838         tasklet_kill(&jme->rxclean_task);
1839         tasklet_kill(&jme->rxempty_task);
1840
1841         jme_reset_mac_processor(jme);
1842         jme_free_rx_resources(jme);
1843         jme_free_tx_resources(jme);
1844         jme->phylink = 0;
1845         jme_phy_off(jme);
1846
1847         return 0;
1848 }
1849
1850 static int
1851 jme_alloc_txdesc(struct jme_adapter *jme,
1852                         struct sk_buff *skb)
1853 {
1854         struct jme_ring *txring = jme->txring;
1855         int idx, nr_alloc, mask = jme->tx_ring_mask;
1856
1857         idx = txring->next_to_use;
1858         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1859
1860         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1861                 return -1;
1862
1863         atomic_sub(nr_alloc, &txring->nr_free);
1864
1865         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1866
1867         return idx;
1868 }
1869
1870 static void
1871 jme_fill_tx_map(struct pci_dev *pdev,
1872                 volatile struct txdesc *txdesc,
1873                 struct jme_buffer_info *txbi,
1874                 struct page *page,
1875                 __u32 page_offset,
1876                 __u32 len,
1877                 __u8 hidma)
1878 {
1879         dma_addr_t dmaaddr;
1880
1881         dmaaddr = pci_map_page(pdev,
1882                                 page,
1883                                 page_offset,
1884                                 len,
1885                                 PCI_DMA_TODEVICE);
1886
1887         pci_dma_sync_single_for_device(pdev,
1888                                        dmaaddr,
1889                                        len,
1890                                        PCI_DMA_TODEVICE);
1891
1892         txdesc->dw[0] = 0;
1893         txdesc->dw[1] = 0;
1894         txdesc->desc2.flags     = TXFLAG_OWN;
1895         txdesc->desc2.flags     |= (hidma)?TXFLAG_64BIT:0;
1896         txdesc->desc2.datalen   = cpu_to_le16(len);
1897         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1898         txdesc->desc2.bufaddrl  = cpu_to_le32(
1899                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
1900
1901         txbi->mapping = dmaaddr;
1902         txbi->len = len;
1903 }
1904
1905 static void
1906 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1907 {
1908         struct jme_ring *txring = jme->txring;
1909         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1910         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1911         __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1912         int i, nr_frags = skb_shinfo(skb)->nr_frags;
1913         int mask = jme->tx_ring_mask;
1914         struct skb_frag_struct *frag;
1915         __u32 len;
1916
1917         for(i = 0 ; i < nr_frags ; ++i) {
1918                 frag = &skb_shinfo(skb)->frags[i];
1919                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1920                 ctxbi = txbi + ((idx + i + 2) & (mask));
1921
1922                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1923                                  frag->page_offset, frag->size, hidma);
1924         }
1925
1926         len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1927         ctxdesc = txdesc + ((idx + 1) & (mask));
1928         ctxbi = txbi + ((idx + 1) & (mask));
1929         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1930                         offset_in_page(skb->data), len, hidma);
1931
1932 }
1933
1934 static int
1935 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1936 {
1937         if(unlikely(skb_shinfo(skb)->gso_size &&
1938                         skb_header_cloned(skb) &&
1939                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1940                 dev_kfree_skb(skb);
1941                 return -1;
1942         }
1943
1944         return 0;
1945 }
1946
1947 static int
1948 jme_tx_tso(struct sk_buff *skb,
1949                 volatile __u16 *mss, __u8 *flags)
1950 {
1951         if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1952                 *flags |= TXFLAG_LSEN;
1953
1954                 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1955                         struct iphdr *iph = ip_hdr(skb);
1956
1957                         iph->check = 0;
1958                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1959                                                                 iph->daddr, 0,
1960                                                                 IPPROTO_TCP,
1961                                                                 0);
1962                 }
1963                 else {
1964                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
1965
1966                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1967                                                                 &ip6h->daddr, 0,
1968                                                                 IPPROTO_TCP,
1969                                                                 0);
1970                 }
1971
1972                 return 0;
1973         }
1974
1975         return 1;
1976 }
1977
1978 static void
1979 jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1980 {
1981         if(skb->ip_summed == CHECKSUM_PARTIAL) {
1982                 __u8 ip_proto;
1983
1984                 switch (skb->protocol) {
1985                 case __constant_htons(ETH_P_IP):
1986                         ip_proto = ip_hdr(skb)->protocol;
1987                         break;
1988                 case __constant_htons(ETH_P_IPV6):
1989                         ip_proto = ipv6_hdr(skb)->nexthdr;
1990                         break;
1991                 default:
1992                         ip_proto = 0;
1993                         break;
1994                 }
1995
1996                 switch(ip_proto) {
1997                 case IPPROTO_TCP:
1998                         *flags |= TXFLAG_TCPCS;
1999                         break;
2000                 case IPPROTO_UDP:
2001                         *flags |= TXFLAG_UDPCS;
2002                         break;
2003                 default:
2004                         jeprintk("jme", "Error upper layer protocol.\n");
2005                         break;
2006                 }
2007         }
2008 }
2009
2010 __always_inline static void
2011 jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
2012 {
2013         if(vlan_tx_tag_present(skb)) {
2014                 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
2015                 *flags |= TXFLAG_TAGON;
2016                 *vlan = vlan_tx_tag_get(skb);
2017         }
2018 }
2019
2020 static int
2021 jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2022 {
2023         struct jme_ring *txring = jme->txring;
2024         volatile struct txdesc *txdesc;
2025         struct jme_buffer_info *txbi;
2026         __u8 flags;
2027
2028         txdesc = (volatile struct txdesc*)txring->desc + idx;
2029         txbi = txring->bufinf + idx;
2030
2031         txdesc->dw[0] = 0;
2032         txdesc->dw[1] = 0;
2033         txdesc->dw[2] = 0;
2034         txdesc->dw[3] = 0;
2035         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2036         /*
2037          * Set OWN bit at final.
2038          * When kernel transmit faster than NIC.
2039          * And NIC trying to send this descriptor before we tell
2040          * it to start sending this TX queue.
2041          * Other fields are already filled correctly.
2042          */
2043         wmb();
2044         flags = TXFLAG_OWN | TXFLAG_INT;
2045         //Set checksum flags while not tso
2046         if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2047                 jme_tx_csum(skb, &flags);
2048         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2049         txdesc->desc1.flags = flags;
2050         /*
2051          * Set tx buffer info after telling NIC to send
2052          * For better tx_clean timing
2053          */
2054         wmb();
2055         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2056         txbi->skb = skb;
2057         txbi->len = skb->len;
2058         if(!(txbi->start_xmit = jiffies))
2059                 txbi->start_xmit = (0UL-1);
2060
2061         return 0;
2062 }
2063
2064 static void
2065 jme_stop_queue_if_full(struct jme_adapter *jme)
2066 {
2067         struct jme_ring *txring = jme->txring;
2068         struct jme_buffer_info *txbi = txring->bufinf;
2069
2070         txbi += atomic_read(&txring->next_to_clean);
2071
2072         smp_wmb();
2073         if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2074                 netif_stop_queue(jme->dev);
2075                 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
2076                 smp_wmb();
2077                 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
2078                         netif_wake_queue(jme->dev);
2079                         queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
2080                 }
2081         }
2082
2083         if(unlikely(    txbi->start_xmit &&
2084                         (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2085                         txbi->skb)) {
2086                 netif_stop_queue(jme->dev);
2087         }
2088 }
2089
2090 /*
2091  * This function is already protected by netif_tx_lock()
2092  */
2093 static int
2094 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2095 {
2096         struct jme_adapter *jme = netdev_priv(netdev);
2097         int idx;
2098
2099         if(skb_shinfo(skb)->nr_frags) {
2100                 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
2101                         skb_shinfo(skb)->nr_frags,
2102                         skb_headlen(skb),
2103                         skb->len,
2104                         skb_shinfo(skb)->gso_size,
2105                         skb->ip_summed);
2106         }
2107
2108         if(unlikely(jme_expand_header(jme, skb))) {
2109                 ++(NET_STAT(jme).tx_dropped);
2110                 return NETDEV_TX_OK;
2111         }
2112
2113         idx = jme_alloc_txdesc(jme, skb);
2114
2115         if(unlikely(idx<0)) {
2116                 netif_stop_queue(netdev);
2117                 jeprintk(netdev->name,
2118                                 "BUG! Tx ring full when queue awake!\n");
2119
2120                 return NETDEV_TX_BUSY;
2121         }
2122
2123         jme_map_tx_skb(jme, skb, idx);
2124         jme_fill_first_tx_desc(jme, skb, idx);
2125
2126         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
2127
2128         jwrite32(jme, JME_TXCS, jme->reg_txcs |
2129                                 TXCS_SELECT_QUEUE0 |
2130                                 TXCS_QUEUE0S |
2131                                 TXCS_ENABLE);
2132         netdev->trans_start = jiffies;
2133
2134         jme_stop_queue_if_full(jme);
2135
2136         return NETDEV_TX_OK;
2137 }
2138
2139 static int
2140 jme_set_macaddr(struct net_device *netdev, void *p)
2141 {
2142         struct jme_adapter *jme = netdev_priv(netdev);
2143         struct sockaddr *addr = p;
2144         __u32 val;
2145
2146         if(netif_running(netdev))
2147                 return -EBUSY;
2148
2149         spin_lock(&jme->macaddr_lock);
2150         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2151
2152         val = (addr->sa_data[3] & 0xff) << 24 |
2153               (addr->sa_data[2] & 0xff) << 16 |
2154               (addr->sa_data[1] & 0xff) <<  8 |
2155               (addr->sa_data[0] & 0xff);
2156         jwrite32(jme, JME_RXUMA_LO, val);
2157         val = (addr->sa_data[5] & 0xff) << 8 |
2158               (addr->sa_data[4] & 0xff);
2159         jwrite32(jme, JME_RXUMA_HI, val);
2160         spin_unlock(&jme->macaddr_lock);
2161
2162         return 0;
2163 }
2164
2165 static void
2166 jme_set_multi(struct net_device *netdev)
2167 {
2168         struct jme_adapter *jme = netdev_priv(netdev);
2169         u32 mc_hash[2] = {};
2170         int i;
2171         unsigned long flags;
2172
2173         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2174
2175         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2176
2177         if (netdev->flags & IFF_PROMISC) {
2178                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
2179         }
2180         else if (netdev->flags & IFF_ALLMULTI) {
2181                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2182         }
2183         else if(netdev->flags & IFF_MULTICAST) {
2184                 struct dev_mc_list *mclist;
2185                 int bit_nr;
2186
2187                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2188                 for (i = 0, mclist = netdev->mc_list;
2189                         mclist && i < netdev->mc_count;
2190                         ++i, mclist = mclist->next) {
2191
2192                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2193                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2194                 }
2195
2196                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2197                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2198         }
2199
2200         wmb();
2201         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2202
2203         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2204 }
2205
2206 static int
2207 jme_change_mtu(struct net_device *netdev, int new_mtu)
2208 {
2209         struct jme_adapter *jme = netdev_priv(netdev);
2210
2211         if(new_mtu == jme->old_mtu)
2212                 return 0;
2213
2214         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2215                 ((new_mtu) < IPV6_MIN_MTU))
2216                 return -EINVAL;
2217
2218         if(new_mtu > 4000) {
2219                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2220                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2221                 jme_restart_rx_engine(jme);
2222         }
2223         else {
2224                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2225                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2226                 jme_restart_rx_engine(jme);
2227         }
2228
2229         if(new_mtu > 1900) {
2230                 netdev->features &= ~(NETIF_F_HW_CSUM |
2231                                 NETIF_F_TSO |
2232                                 NETIF_F_TSO6);
2233         }
2234         else {
2235                 if(jme->flags & JME_FLAG_TXCSUM)
2236                         netdev->features |= NETIF_F_HW_CSUM;
2237                 if(jme->flags & JME_FLAG_TSO)
2238                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2239         }
2240
2241         netdev->mtu = new_mtu;
2242         jme_reset_link(jme);
2243
2244         return 0;
2245 }
2246
2247 static void
2248 jme_tx_timeout(struct net_device *netdev)
2249 {
2250         struct jme_adapter *jme = netdev_priv(netdev);
2251
2252         jme->phylink = 0;
2253         jme_reset_phy_processor(jme);
2254         if(jme->flags & JME_FLAG_SSET)
2255                 jme_set_settings(netdev, &jme->old_ecmd);
2256
2257         /*
2258          * Force to Reset the link again
2259          */
2260         jme_reset_link(jme);
2261 }
2262
2263 static void
2264 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2265 {
2266         struct jme_adapter *jme = netdev_priv(netdev);
2267
2268         jme->vlgrp = grp;
2269 }
2270
2271 static void
2272 jme_get_drvinfo(struct net_device *netdev,
2273                      struct ethtool_drvinfo *info)
2274 {
2275         struct jme_adapter *jme = netdev_priv(netdev);
2276
2277         strcpy(info->driver, DRV_NAME);
2278         strcpy(info->version, DRV_VERSION);
2279         strcpy(info->bus_info, pci_name(jme->pdev));
2280 }
2281
2282 static int
2283 jme_get_regs_len(struct net_device *netdev)
2284 {
2285         return JME_REG_LEN; 
2286 }
2287
2288 static void
2289 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
2290 {
2291         int i;
2292
2293         for(i = 0 ; i < len ; i += 4)
2294                 p[i >> 2] = jread32(jme, reg + i);
2295 }
2296
2297 static void
2298 mdio_memcpy(struct jme_adapter *jme, __u32 *p, int reg_nr)
2299 {
2300         int i;
2301         __u16 *p16 = (__u16*)p;
2302
2303         for(i = 0 ; i < reg_nr ; ++i)
2304                 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2305 }
2306
2307 static void
2308 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2309 {
2310         struct jme_adapter *jme = netdev_priv(netdev);
2311         __u32 *p32 = (__u32*)p;
2312
2313         memset(p, 0xFF, JME_REG_LEN);
2314
2315         regs->version = 1;
2316         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2317
2318         p32 += 0x100 >> 2;
2319         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2320
2321         p32 += 0x100 >> 2;
2322         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2323
2324         p32 += 0x100 >> 2;
2325         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2326
2327         p32 += 0x100 >> 2;
2328         mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2329 }
2330
2331 static int
2332 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2333 {
2334         struct jme_adapter *jme = netdev_priv(netdev);
2335
2336         ecmd->tx_coalesce_usecs = PCC_TX_TO;
2337         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2338
2339         if(jme->flags & JME_FLAG_POLL) {
2340                 ecmd->use_adaptive_rx_coalesce = false;
2341                 ecmd->rx_coalesce_usecs = 0;
2342                 ecmd->rx_max_coalesced_frames = 0;
2343                 return 0;
2344         }
2345
2346         ecmd->use_adaptive_rx_coalesce = true;
2347
2348         switch(jme->dpi.cur) {
2349         case PCC_P1:
2350                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2351                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2352                 break;
2353         case PCC_P2:
2354                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2355                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2356                 break;
2357         case PCC_P3:
2358                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2359                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2360                 break;
2361         default:
2362                 break;
2363         }
2364
2365         return 0;
2366 }
2367
2368 static int
2369 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2370 {
2371         struct jme_adapter *jme = netdev_priv(netdev);
2372         struct dynpcc_info *dpi = &(jme->dpi);
2373
2374         if(netif_running(netdev))
2375                 return -EBUSY;
2376
2377         if(ecmd->use_adaptive_rx_coalesce
2378         && (jme->flags & JME_FLAG_POLL)) {
2379                 jme->flags &= ~JME_FLAG_POLL;
2380                 jme->jme_rx = netif_rx;
2381                 jme->jme_vlan_rx = vlan_hwaccel_rx;
2382                 dpi->cur                = PCC_P1;
2383                 dpi->attempt            = PCC_P1;
2384                 dpi->cnt                = 0;
2385                 jme_set_rx_pcc(jme, PCC_P1);
2386                 jme_interrupt_mode(jme);
2387         }
2388         else if(!(ecmd->use_adaptive_rx_coalesce)
2389         && !(jme->flags & JME_FLAG_POLL)) {
2390                 jme->flags |= JME_FLAG_POLL;
2391                 jme->jme_rx = netif_receive_skb;
2392                 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2393                 jme_interrupt_mode(jme);
2394         }
2395
2396         return 0;
2397 }
2398
2399 static void
2400 jme_get_pauseparam(struct net_device *netdev,
2401                         struct ethtool_pauseparam *ecmd)
2402 {
2403         struct jme_adapter *jme = netdev_priv(netdev);
2404         unsigned long flags;
2405         __u32 val;
2406
2407         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2408         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2409
2410         spin_lock_irqsave(&jme->phy_lock, flags);
2411         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2412         spin_unlock_irqrestore(&jme->phy_lock, flags);
2413
2414         ecmd->autoneg =
2415                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2416 }
2417
2418 static int
2419 jme_set_pauseparam(struct net_device *netdev,
2420                         struct ethtool_pauseparam *ecmd)
2421 {
2422         struct jme_adapter *jme = netdev_priv(netdev);
2423         unsigned long flags;
2424         __u32 val;
2425
2426         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
2427                 (ecmd->tx_pause != 0)) {
2428
2429                 if(ecmd->tx_pause)
2430                         jme->reg_txpfc |= TXPFC_PF_EN;
2431                 else
2432                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2433
2434                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2435         }
2436
2437         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2438         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2439                 (ecmd->rx_pause != 0)) {
2440
2441                 if(ecmd->rx_pause)
2442                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2443                 else
2444                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2445
2446                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2447         }
2448         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2449
2450         spin_lock_irqsave(&jme->phy_lock, flags);
2451         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2452         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
2453                 (ecmd->autoneg != 0)) {
2454
2455                 if(ecmd->autoneg)
2456                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2457                 else
2458                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2459
2460                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2461                                 MII_ADVERTISE, val);
2462         }
2463         spin_unlock_irqrestore(&jme->phy_lock, flags);
2464
2465         return 0;
2466 }
2467
2468 static void
2469 jme_get_wol(struct net_device *netdev,
2470                 struct ethtool_wolinfo *wol)
2471 {
2472         struct jme_adapter *jme = netdev_priv(netdev);
2473
2474         wol->supported = WAKE_MAGIC | WAKE_PHY;
2475
2476         wol->wolopts = 0;
2477
2478         if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2479                 wol->wolopts |= WAKE_PHY;
2480
2481         if(jme->reg_pmcs & PMCS_MFEN)
2482                 wol->wolopts |= WAKE_MAGIC;
2483
2484 }
2485
2486 static int
2487 jme_set_wol(struct net_device *netdev,
2488                 struct ethtool_wolinfo *wol)
2489 {
2490         struct jme_adapter *jme = netdev_priv(netdev);
2491
2492         if(wol->wolopts & (WAKE_MAGICSECURE |
2493                                 WAKE_UCAST |
2494                                 WAKE_MCAST |
2495                                 WAKE_BCAST |
2496                                 WAKE_ARP))
2497                 return -EOPNOTSUPP;
2498
2499         jme->reg_pmcs = 0;
2500
2501         if(wol->wolopts & WAKE_PHY)
2502                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2503
2504         if(wol->wolopts & WAKE_MAGIC)
2505                 jme->reg_pmcs |= PMCS_MFEN;
2506
2507
2508         return 0;
2509 }
2510
2511 static int
2512 jme_get_settings(struct net_device *netdev,
2513                      struct ethtool_cmd *ecmd)
2514 {
2515         struct jme_adapter *jme = netdev_priv(netdev);
2516         int rc;
2517         unsigned long flags;
2518
2519         spin_lock_irqsave(&jme->phy_lock, flags);
2520         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2521         spin_unlock_irqrestore(&jme->phy_lock, flags);
2522         return rc;
2523 }
2524
2525 static int
2526 jme_set_settings(struct net_device *netdev,
2527                      struct ethtool_cmd *ecmd)
2528 {
2529         struct jme_adapter *jme = netdev_priv(netdev);
2530         int rc, fdc=0;
2531         unsigned long flags;
2532
2533         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2534                 return -EINVAL;
2535
2536         if(jme->mii_if.force_media &&
2537         ecmd->autoneg != AUTONEG_ENABLE &&
2538         (jme->mii_if.full_duplex != ecmd->duplex))
2539                 fdc = 1;
2540
2541         spin_lock_irqsave(&jme->phy_lock, flags);
2542         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2543         spin_unlock_irqrestore(&jme->phy_lock, flags);
2544
2545         if(!rc && fdc)
2546                 jme_reset_link(jme);
2547
2548         if(!rc) {
2549                 jme->flags |= JME_FLAG_SSET;
2550                 jme->old_ecmd = *ecmd;
2551         }
2552
2553         return rc;
2554 }
2555
2556 static __u32
2557 jme_get_link(struct net_device *netdev)
2558 {
2559         struct jme_adapter *jme = netdev_priv(netdev);
2560         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2561 }
2562
2563 static u32
2564 jme_get_rx_csum(struct net_device *netdev)
2565 {
2566         struct jme_adapter *jme = netdev_priv(netdev);
2567
2568         return jme->reg_rxmcs & RXMCS_CHECKSUM;
2569 }
2570
2571 static int
2572 jme_set_rx_csum(struct net_device *netdev, u32 on)
2573 {
2574         struct jme_adapter *jme = netdev_priv(netdev);
2575         unsigned long flags;
2576
2577         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2578         if(on)
2579                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2580         else
2581                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2582         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2583         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2584
2585         return 0;
2586 }
2587
2588 static int
2589 jme_set_tx_csum(struct net_device *netdev, u32 on)
2590 {
2591         struct jme_adapter *jme = netdev_priv(netdev);
2592
2593         if(on) {
2594                 jme->flags |= JME_FLAG_TXCSUM;
2595                 if(netdev->mtu <= 1900)
2596                         netdev->features |= NETIF_F_HW_CSUM;
2597         }
2598         else {
2599                 jme->flags &= ~JME_FLAG_TXCSUM;
2600                 netdev->features &= ~NETIF_F_HW_CSUM;
2601         }
2602
2603         return 0;
2604 }
2605
2606 static int
2607 jme_set_tso(struct net_device *netdev, u32 on)
2608 {
2609         struct jme_adapter *jme = netdev_priv(netdev);
2610
2611         if (on) {
2612                 jme->flags |= JME_FLAG_TSO;
2613                 if(netdev->mtu <= 1900)
2614                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2615         }
2616         else {
2617                 jme->flags &= ~JME_FLAG_TSO;
2618                 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2619         }
2620
2621         return 0;
2622 }
2623
2624 static int
2625 jme_nway_reset(struct net_device *netdev)
2626 {
2627         struct jme_adapter *jme = netdev_priv(netdev);
2628         jme_restart_an(jme);
2629         return 0;
2630 }
2631
2632 static __u8
2633 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2634 {
2635         __u32 val;
2636         int to;
2637
2638         val = jread32(jme, JME_SMBCSR);
2639         to = JME_SMB_BUSY_TIMEOUT;
2640         while((val & SMBCSR_BUSY) && --to) {
2641                 msleep(1);
2642                 val = jread32(jme, JME_SMBCSR);
2643         }
2644         if(!to) {
2645                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2646                 return 0xFF;
2647         }
2648
2649         jwrite32(jme, JME_SMBINTF,
2650                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2651                 SMBINTF_HWRWN_READ |
2652                 SMBINTF_HWCMD);
2653
2654         val = jread32(jme, JME_SMBINTF);
2655         to = JME_SMB_BUSY_TIMEOUT;
2656         while((val & SMBINTF_HWCMD) && --to) {
2657                 msleep(1);
2658                 val = jread32(jme, JME_SMBINTF);
2659         }
2660         if(!to) {
2661                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2662                 return 0xFF;
2663         }
2664
2665         return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2666 }
2667
2668 static void
2669 jme_smb_write(struct jme_adapter *jme, unsigned int addr, __u8 data)
2670 {
2671         __u32 val;
2672         int to;
2673
2674         val = jread32(jme, JME_SMBCSR);
2675         to = JME_SMB_BUSY_TIMEOUT;
2676         while((val & SMBCSR_BUSY) && --to) {
2677                 msleep(1);
2678                 val = jread32(jme, JME_SMBCSR);
2679         }
2680         if(!to) {
2681                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2682                 return;
2683         }
2684
2685         jwrite32(jme, JME_SMBINTF,
2686                 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2687                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2688                 SMBINTF_HWRWN_WRITE |
2689                 SMBINTF_HWCMD);
2690
2691         val = jread32(jme, JME_SMBINTF);
2692         to = JME_SMB_BUSY_TIMEOUT;
2693         while((val & SMBINTF_HWCMD) && --to) {
2694                 msleep(1);
2695                 val = jread32(jme, JME_SMBINTF);
2696         }
2697         if(!to) {
2698                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2699                 return;
2700         }
2701
2702         mdelay(2);
2703 }
2704
2705 static int
2706 jme_get_eeprom_len(struct net_device *netdev)
2707 {
2708         struct jme_adapter *jme = netdev_priv(netdev);
2709         __u32 val;
2710         val = jread32(jme, JME_SMBCSR);
2711         return (val & SMBCSR_EEPROMD)?JME_SMB_LEN:0;
2712 }
2713
2714 static int
2715 jme_get_eeprom(struct net_device *netdev,
2716                 struct ethtool_eeprom *eeprom, u8 *data)
2717 {
2718         struct jme_adapter *jme = netdev_priv(netdev);
2719         int i, offset = eeprom->offset, len = eeprom->len, idx;
2720
2721         /*
2722          * ethtool will check the boundary for us
2723          */
2724         memset(data, 0xFF, len);
2725         eeprom->magic = JME_EEPROM_MAGIC;
2726         for(i = 0 ; i < len ; ++i) {
2727                 idx = i + offset;
2728                 data[i] = jme_smb_read(jme, idx);
2729                 if(data[i] == 0xFF)
2730                         break;
2731                 if((idx > 1) && !((idx - 2) % 3) && (data[i] & 0x80))
2732                         len = (len > i + 3)?i + 3:len;
2733         }
2734
2735         return 0;
2736 }
2737
2738 static int
2739 jme_set_eeprom(struct net_device *netdev,
2740                 struct ethtool_eeprom *eeprom, u8 *data)
2741 {
2742         struct jme_adapter *jme = netdev_priv(netdev);
2743         int i, offset = eeprom->offset, len = eeprom->len;
2744
2745         if (eeprom->magic != JME_EEPROM_MAGIC)
2746                 return -EINVAL;
2747
2748         /*
2749          * ethtool will check the boundary for us
2750          */
2751         for(i = 0 ; i < len ; ++i)
2752                 jme_smb_write(jme, i + offset, data[i]);
2753
2754         return 0;
2755 }
2756
2757 static const struct ethtool_ops jme_ethtool_ops = {
2758         .get_drvinfo            = jme_get_drvinfo,
2759         .get_regs_len           = jme_get_regs_len,
2760         .get_regs               = jme_get_regs,
2761         .get_coalesce           = jme_get_coalesce,
2762         .set_coalesce           = jme_set_coalesce,
2763         .get_pauseparam         = jme_get_pauseparam,
2764         .set_pauseparam         = jme_set_pauseparam,
2765         .get_wol                = jme_get_wol,
2766         .set_wol                = jme_set_wol,
2767         .get_settings           = jme_get_settings,
2768         .set_settings           = jme_set_settings,
2769         .get_link               = jme_get_link,
2770         .get_rx_csum            = jme_get_rx_csum,
2771         .set_rx_csum            = jme_set_rx_csum,
2772         .set_tx_csum            = jme_set_tx_csum,
2773         .set_tso                = jme_set_tso,
2774         .set_sg                 = ethtool_op_set_sg,
2775         .nway_reset             = jme_nway_reset,
2776         .get_eeprom_len         = jme_get_eeprom_len,
2777         .get_eeprom             = jme_get_eeprom,
2778         .set_eeprom             = jme_set_eeprom,
2779 };
2780
2781 static int
2782 jme_pci_dma64(struct pci_dev *pdev)
2783 {
2784         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2785                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2786                         dprintk("jme", "64Bit DMA Selected.\n");
2787                         return 1;
2788                 }
2789
2790         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2791                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2792                         dprintk("jme", "40Bit DMA Selected.\n");
2793                         return 1;
2794                 }
2795
2796         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2797                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2798                         dprintk("jme", "32Bit DMA Selected.\n");
2799                         return 0;
2800                 }
2801
2802         return -1;
2803 }
2804
2805 __always_inline static void
2806 jme_phy_init(struct jme_adapter *jme)
2807 {
2808         __u16 reg26;
2809
2810         reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2811         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2812 }
2813
2814 __always_inline static void
2815 jme_set_gmii(struct jme_adapter *jme)
2816 {
2817         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
2818 }
2819
2820 static void
2821 jme_check_hw_ver(struct jme_adapter *jme)
2822 {
2823         __u32 chipmode;
2824
2825         chipmode = jread32(jme, JME_CHIPMODE);
2826
2827         jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2828         jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
2829 }
2830
2831 static int __devinit
2832 jme_init_one(struct pci_dev *pdev,
2833              const struct pci_device_id *ent)
2834 {
2835         int rc = 0, using_dac, i;
2836         struct net_device *netdev;
2837         struct jme_adapter *jme;
2838         __u16 bmcr, bmsr;
2839
2840         /*
2841          * set up PCI device basics
2842          */
2843         rc = pci_enable_device(pdev);
2844         if(rc) {
2845                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2846                 goto err_out;
2847         }
2848
2849         using_dac = jme_pci_dma64(pdev);
2850         if(using_dac < 0) {
2851                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2852                 rc = -EIO;
2853                 goto err_out_disable_pdev;
2854         }
2855
2856         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2857                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2858                 rc = -ENOMEM;
2859                 goto err_out_disable_pdev;
2860         }
2861
2862         rc = pci_request_regions(pdev, DRV_NAME);
2863         if(rc) {
2864                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2865                 goto err_out_disable_pdev;
2866         }
2867
2868         pci_set_master(pdev);
2869
2870         /*
2871          * alloc and init net device
2872          */
2873         netdev = alloc_etherdev(sizeof(*jme));
2874         if(!netdev) {
2875                 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
2876                 rc = -ENOMEM;
2877                 goto err_out_release_regions;
2878         }
2879         netdev->open                    = jme_open;
2880         netdev->stop                    = jme_close;
2881         netdev->hard_start_xmit         = jme_start_xmit;
2882         netdev->set_mac_address         = jme_set_macaddr;
2883         netdev->set_multicast_list      = jme_set_multi;
2884         netdev->change_mtu              = jme_change_mtu;
2885         netdev->ethtool_ops             = &jme_ethtool_ops;
2886         netdev->tx_timeout              = jme_tx_timeout;
2887         netdev->watchdog_timeo          = TX_TIMEOUT;
2888         netdev->vlan_rx_register        = jme_vlan_rx_register;
2889         NETDEV_GET_STATS(netdev, &jme_get_stats);
2890         netdev->features                =       NETIF_F_HW_CSUM |
2891                                                 NETIF_F_SG |
2892                                                 NETIF_F_TSO |
2893                                                 NETIF_F_TSO6 |
2894                                                 NETIF_F_HW_VLAN_TX |
2895                                                 NETIF_F_HW_VLAN_RX;
2896         if(using_dac)
2897                 netdev->features        |=      NETIF_F_HIGHDMA;
2898
2899         SET_NETDEV_DEV(netdev, &pdev->dev);
2900         pci_set_drvdata(pdev, netdev);
2901
2902         /*
2903          * init adapter info
2904          */
2905         jme = netdev_priv(netdev);
2906         jme->pdev = pdev;
2907         jme->dev = netdev;
2908         jme->jme_rx = netif_rx;
2909         jme->jme_vlan_rx = vlan_hwaccel_rx;
2910         jme->old_mtu = netdev->mtu = 1500;
2911         jme->phylink = 0;
2912         jme->tx_ring_size = 1 << 10;
2913         jme->tx_ring_mask = jme->tx_ring_size - 1;
2914         jme->tx_wake_threshold = 1 << 9;
2915         jme->rx_ring_size = 1 << 9;
2916         jme->rx_ring_mask = jme->rx_ring_size - 1;
2917         jme->regs = ioremap(pci_resource_start(pdev, 0),
2918                              pci_resource_len(pdev, 0));
2919         if (!(jme->regs)) {
2920                 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
2921                 rc = -ENOMEM;
2922                 goto err_out_free_netdev;
2923         }
2924         jme->shadow_regs = pci_alloc_consistent(pdev,
2925                                                 sizeof(__u32) * SHADOW_REG_NR,
2926                                                 &(jme->shadow_dma));
2927         if (!(jme->shadow_regs)) {
2928                 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
2929                 rc = -ENOMEM;
2930                 goto err_out_unmap;
2931         }
2932
2933         NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2934
2935         spin_lock_init(&jme->phy_lock);
2936         spin_lock_init(&jme->macaddr_lock);
2937         spin_lock_init(&jme->rxmcs_lock);
2938
2939         atomic_set(&jme->link_changing, 1);
2940         atomic_set(&jme->rx_cleaning, 1);
2941         atomic_set(&jme->tx_cleaning, 1);
2942         atomic_set(&jme->rx_empty, 1);
2943
2944         tasklet_init(&jme->pcc_task,
2945                      &jme_pcc_tasklet,
2946                      (unsigned long) jme);
2947         tasklet_init(&jme->linkch_task,
2948                      &jme_link_change_tasklet,
2949                      (unsigned long) jme);
2950         tasklet_init(&jme->txclean_task,
2951                      &jme_tx_clean_tasklet,
2952                      (unsigned long) jme);
2953         tasklet_init(&jme->rxclean_task,
2954                      &jme_rx_clean_tasklet,
2955                      (unsigned long) jme);
2956         tasklet_init(&jme->rxempty_task,
2957                      &jme_rx_empty_tasklet,
2958                      (unsigned long) jme);
2959         jme->dpi.cur = PCC_P1;
2960
2961         if(pdev->device == JME_GE_DEVICE)
2962                 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2963         else
2964                 jme->reg_ghc = GHC_DPX | GHC_SPEED_100M;
2965         jme->reg_rxcs = RXCS_DEFAULT;
2966         jme->reg_rxmcs = RXMCS_DEFAULT;
2967         jme->reg_txpfc = 0;
2968         jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
2969         jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
2970
2971         /*
2972          * Get Max Read Req Size from PCI Config Space
2973          */
2974         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2975         switch(jme->mrrs) {
2976                 case MRRS_128B:
2977                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2978                         break;
2979                 case MRRS_256B:
2980                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2981                         break;
2982                 default:
2983                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2984                         break;
2985         };
2986
2987
2988         /*
2989          * Must check before reset_mac_processor
2990          */
2991         jme_check_hw_ver(jme);
2992         jme->mii_if.dev = netdev;
2993         if(jme->fpgaver) {
2994                 jme->mii_if.phy_id = 0;
2995                 for(i = 1 ; i < 32 ; ++i) {
2996                         bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2997                         bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2998                         if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2999                                 jme->mii_if.phy_id = i;
3000                                 break;
3001                         }
3002                 }
3003
3004                 if(!jme->mii_if.phy_id) {
3005                         rc = -EIO;
3006                         printk(KERN_ERR PFX "Can not find phy_id.\n");
3007                          goto err_out_free_shadow;
3008                 }
3009
3010                 jme->reg_ghc |= GHC_LINK_POLL;
3011         }
3012         else {
3013                 jme->mii_if.phy_id = 1;
3014         }
3015         if(pdev->device == JME_GE_DEVICE)
3016                 jme->mii_if.supports_gmii = true;
3017         else
3018                 jme->mii_if.supports_gmii = false;
3019         jme->mii_if.mdio_read = jme_mdio_read;
3020         jme->mii_if.mdio_write = jme_mdio_write;
3021
3022         jme_clear_pm(jme);
3023         if(jme->fpgaver)
3024                 jme_set_gmii(jme);
3025         else
3026                 jme_phy_init(jme);
3027         jme_phy_off(jme);
3028
3029         /*
3030          * Reset MAC processor and reload EEPROM for MAC Address
3031          */
3032         jme_reset_mac_processor(jme);
3033         rc = jme_reload_eeprom(jme);
3034         if(rc) {
3035                 printk(KERN_ERR PFX
3036                         "Reload eeprom for reading MAC Address error.\n");
3037                 goto err_out_free_shadow;
3038         }
3039         jme_load_macaddr(netdev);
3040
3041
3042         /*
3043          * Tell stack that we are not ready to work until open()
3044          */
3045         netif_carrier_off(netdev);
3046         netif_stop_queue(netdev);
3047
3048         /*
3049          * Register netdev
3050          */
3051         rc = register_netdev(netdev);
3052         if(rc) {
3053                 printk(KERN_ERR PFX "Cannot register net device.\n");
3054                 goto err_out_free_shadow;
3055         }
3056
3057         jprintk(netdev->name,
3058                 "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
3059                 (jme->fpgaver != 0)?" (FPGA)":"",
3060                 (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
3061                 netdev->dev_addr[0],
3062                 netdev->dev_addr[1],
3063                 netdev->dev_addr[2],
3064                 netdev->dev_addr[3],
3065                 netdev->dev_addr[4],
3066                 netdev->dev_addr[5]);
3067
3068         return 0;
3069
3070 err_out_free_shadow:
3071         pci_free_consistent(pdev,
3072                             sizeof(__u32) * SHADOW_REG_NR,
3073                             jme->shadow_regs,
3074                             jme->shadow_dma);
3075 err_out_unmap:
3076         iounmap(jme->regs);
3077 err_out_free_netdev:
3078         pci_set_drvdata(pdev, NULL);
3079         free_netdev(netdev);
3080 err_out_release_regions:
3081         pci_release_regions(pdev);
3082 err_out_disable_pdev:
3083         pci_disable_device(pdev);
3084 err_out:
3085         return rc;
3086 }
3087
3088 static void __devexit
3089 jme_remove_one(struct pci_dev *pdev)
3090 {
3091         struct net_device *netdev = pci_get_drvdata(pdev);
3092         struct jme_adapter *jme = netdev_priv(netdev);
3093
3094         unregister_netdev(netdev);
3095         pci_free_consistent(pdev,
3096                             sizeof(__u32) * SHADOW_REG_NR,
3097                             jme->shadow_regs,
3098                             jme->shadow_dma);
3099         iounmap(jme->regs);
3100         pci_set_drvdata(pdev, NULL);
3101         free_netdev(netdev);
3102         pci_release_regions(pdev);
3103         pci_disable_device(pdev);
3104
3105 }
3106
3107 static int
3108 jme_suspend(struct pci_dev *pdev, pm_message_t state)
3109 {
3110         struct net_device *netdev = pci_get_drvdata(pdev);
3111         struct jme_adapter *jme = netdev_priv(netdev);
3112         int timeout = 100;
3113
3114         atomic_dec(&jme->link_changing);
3115
3116         netif_device_detach(netdev);
3117         netif_stop_queue(netdev);
3118         jme_stop_irq(jme);
3119         jme_free_irq(jme);
3120
3121         while(--timeout > 0 &&
3122         (
3123                 atomic_read(&jme->rx_cleaning) != 1 ||
3124                 atomic_read(&jme->tx_cleaning) != 1
3125         )) {
3126                 mdelay(1);
3127         }
3128         if(!timeout) {
3129                 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
3130                 return -EBUSY;
3131         }
3132         jme_disable_shadow(jme);
3133
3134         if(netif_carrier_ok(netdev)) {
3135                 jme_stop_pcc_timer(jme);
3136                 jme_reset_mac_processor(jme);
3137                 jme_free_rx_resources(jme);
3138                 jme_free_tx_resources(jme);
3139                 netif_carrier_off(netdev);
3140                 jme->phylink = 0;
3141
3142                 if(jme->flags & JME_FLAG_POLL)
3143                         jme_polling_mode(jme);
3144         }
3145
3146
3147         pci_save_state(pdev);
3148         if(jme->reg_pmcs) {
3149                 jme_set_100m_half(jme);
3150                 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3151                 pci_enable_wake(pdev, PCI_D3hot, true);
3152                 pci_enable_wake(pdev, PCI_D3cold, true);
3153         }
3154         else {
3155                 jme_phy_off(jme);
3156                 pci_enable_wake(pdev, PCI_D3hot, false);
3157                 pci_enable_wake(pdev, PCI_D3cold, false);
3158         }
3159         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3160
3161         return 0;
3162 }
3163
3164 static int
3165 jme_resume(struct pci_dev *pdev)
3166 {
3167         struct net_device *netdev = pci_get_drvdata(pdev);
3168         struct jme_adapter *jme = netdev_priv(netdev);
3169
3170         jme_clear_pm(jme);
3171         pci_restore_state(pdev);
3172
3173         if(jme->flags & JME_FLAG_SSET)
3174                 jme_set_settings(netdev, &jme->old_ecmd);
3175         else
3176                 jme_reset_phy_processor(jme);
3177
3178         jme_reset_mac_processor(jme);
3179         jme_enable_shadow(jme);
3180         jme_request_irq(jme);
3181         jme_start_irq(jme);
3182         netif_device_attach(netdev);
3183
3184         atomic_inc(&jme->link_changing);
3185
3186         jme_reset_link(jme);
3187
3188         return 0;
3189 }
3190
3191 static struct pci_device_id jme_pci_tbl[] = {
3192         { PCI_VDEVICE(JMICRON, JME_GE_DEVICE) },
3193         { PCI_VDEVICE(JMICRON, JME_FE_DEVICE) },
3194         { }
3195 };
3196
3197 static struct pci_driver jme_driver = {
3198         .name           = DRV_NAME,
3199         .id_table       = jme_pci_tbl,
3200         .probe          = jme_init_one,
3201         .remove         = __devexit_p(jme_remove_one),
3202 #ifdef CONFIG_PM
3203         .suspend        = jme_suspend,
3204         .resume         = jme_resume,
3205 #endif /* CONFIG_PM */
3206 };
3207
3208 static int __init
3209 jme_init_module(void)
3210 {
3211         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
3212                "driver version %s\n", DRV_VERSION);
3213         return pci_register_driver(&jme_driver);
3214 }
3215
3216 static void __exit
3217 jme_cleanup_module(void)
3218 {
3219         pci_unregister_driver(&jme_driver);
3220 }
3221
3222 module_init(jme_init_module);
3223 module_exit(jme_cleanup_module);
3224
3225 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3226 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3227 MODULE_LICENSE("GPL");
3228 MODULE_VERSION(DRV_VERSION);
3229 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3230