]> bbs.cooldavid.org Git - jme.git/blob - jme.c
b35706a98fb86e81a4b3324cee8b912222fc2af1
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * TODO:
26  *      -  Implement MSI-X.
27  *         Along with multiple RX queue, for CPU load balancing.
28  *      -  Decode register dump for ethtool.
29  *      -  Implement NAPI?
30  *              PCC Support Both Packet Counter and Timeout Interrupt for
31  *              receive and transmit complete, does NAPI really needed?
32  */
33
34 #include <linux/version.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/crc32.h>
43 #include <linux/delay.h>
44 #include <linux/spinlock.h>
45 #include <linux/in.h>
46 #include <linux/ip.h>
47 #include <linux/ipv6.h>
48 #include <linux/tcp.h>
49 #include <linux/udp.h>
50 #include <linux/if_vlan.h>
51 #include "jme.h"
52
53 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
54 static struct net_device_stats *
55 jme_get_stats(struct net_device *netdev)
56 {
57         struct jme_adapter *jme = netdev_priv(netdev);
58         return &jme->stats;
59 }
60 #endif
61
62 static int
63 jme_mdio_read(struct net_device *netdev, int phy, int reg)
64 {
65         struct jme_adapter *jme = netdev_priv(netdev);
66         int i, val;
67
68         jwrite32(jme, JME_SMI, SMI_OP_REQ |
69                                 smi_phy_addr(phy) |
70                                 smi_reg_addr(reg));
71
72         wmb();
73         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
74                 udelay(1);
75                 val = jread32(jme, JME_SMI);
76                 if ((val & SMI_OP_REQ) == 0)
77                         break;
78         }
79
80         if (i == 0) {
81                 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
82                 return 0;
83         }
84
85         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
86 }
87
88 static void
89 jme_mdio_write(struct net_device *netdev,
90                                 int phy, int reg, int val)
91 {
92         struct jme_adapter *jme = netdev_priv(netdev);
93         int i;
94
95         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
96                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
97                 smi_phy_addr(phy) | smi_reg_addr(reg));
98
99         wmb();
100         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
101                 udelay(1);
102                 val = jread32(jme, JME_SMI);
103                 if ((val & SMI_OP_REQ) == 0)
104                         break;
105         }
106
107         if (i == 0)
108                 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
109
110         return;
111 }
112
113 __always_inline static void
114 jme_reset_phy_processor(struct jme_adapter *jme)
115 {
116         __u32 val;
117
118         jme_mdio_write(jme->dev,
119                         jme->mii_if.phy_id,
120                         MII_ADVERTISE, ADVERTISE_ALL |
121                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
122
123         jme_mdio_write(jme->dev,
124                         jme->mii_if.phy_id,
125                         MII_CTRL1000,
126                         ADVERTISE_1000FULL | ADVERTISE_1000HALF);
127
128         val = jme_mdio_read(jme->dev,
129                                 jme->mii_if.phy_id,
130                                 MII_BMCR);
131
132         jme_mdio_write(jme->dev,
133                         jme->mii_if.phy_id,
134                         MII_BMCR, val | BMCR_RESET);
135
136         return;
137 }
138
139 static void
140 jme_setup_wakeup_frame(struct jme_adapter *jme,
141                 __u32 *mask, __u32 crc, int fnr)
142 {
143         int i;
144
145         /*
146          * Setup CRC pattern
147          */
148         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
149         wmb();
150         jwrite32(jme, JME_WFODP, crc);
151         wmb();
152
153         /*
154          * Setup Mask
155          */
156         for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
157                 jwrite32(jme, JME_WFOI,
158                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
159                                 (fnr & WFOI_FRAME_SEL));
160                 wmb();
161                 jwrite32(jme, JME_WFODP, mask[i]);
162                 wmb();
163         }
164 }
165
166 __always_inline static void
167 jme_reset_mac_processor(struct jme_adapter *jme)
168 {
169         __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
170         __u32 crc = 0xCDCDCDCD;
171         int i;
172
173         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
174         udelay(2);
175         jwrite32(jme, JME_GHC, jme->reg_ghc);
176         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
177         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
178         for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
179                 jme_setup_wakeup_frame(jme, mask, crc, i);
180         jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
181         jwrite32(jme, JME_GPREG1, 0);
182 }
183
184 __always_inline static void
185 jme_clear_pm(struct jme_adapter *jme)
186 {
187         jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
188         pci_set_power_state(jme->pdev, PCI_D0);
189         pci_enable_wake(jme->pdev, PCI_D0, false);
190 }
191
192 static int
193 jme_reload_eeprom(struct jme_adapter *jme)
194 {
195         __u32 val;
196         int i;
197
198         val = jread32(jme, JME_SMBCSR);
199
200         if(val & SMBCSR_EEPROMD)
201         {
202                 val |= SMBCSR_CNACK;
203                 jwrite32(jme, JME_SMBCSR, val);
204                 val |= SMBCSR_RELOAD;
205                 jwrite32(jme, JME_SMBCSR, val);
206                 mdelay(12);
207
208                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
209                 {
210                         mdelay(1);
211                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
212                                 break;
213                 }
214
215                 if(i == 0) {
216                         jeprintk(jme->dev->name, "eeprom reload timeout\n");
217                         return -EIO;
218                 }
219         }
220         else
221                 return -EIO;
222
223         return 0;
224 }
225
226 static void
227 jme_load_macaddr(struct net_device *netdev)
228 {
229         struct jme_adapter *jme = netdev_priv(netdev);
230         unsigned char macaddr[6];
231         __u32 val;
232
233         spin_lock(&jme->macaddr_lock);
234         val = jread32(jme, JME_RXUMA_LO);
235         macaddr[0] = (val >>  0) & 0xFF;
236         macaddr[1] = (val >>  8) & 0xFF;
237         macaddr[2] = (val >> 16) & 0xFF;
238         macaddr[3] = (val >> 24) & 0xFF;
239         val = jread32(jme, JME_RXUMA_HI);
240         macaddr[4] = (val >>  0) & 0xFF;
241         macaddr[5] = (val >>  8) & 0xFF;
242         memcpy(netdev->dev_addr, macaddr, 6);
243         spin_unlock(&jme->macaddr_lock);
244 }
245
246 __always_inline static void
247 jme_set_rx_pcc(struct jme_adapter *jme, int p)
248 {
249         switch(p) {
250         case PCC_P1:
251                 jwrite32(jme, JME_PCCRX0,
252                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
253                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
254                 break;
255         case PCC_P2:
256                 jwrite32(jme, JME_PCCRX0,
257                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
258                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
259                 break;
260         case PCC_P3:
261                 jwrite32(jme, JME_PCCRX0,
262                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
263                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
264                 break;
265         default:
266                 break;
267         }
268
269         dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
270 }
271
272 static void
273 jme_start_irq(struct jme_adapter *jme)
274 {
275         register struct dynpcc_info *dpi = &(jme->dpi);
276
277         jme_set_rx_pcc(jme, PCC_P1);
278         dpi->cur                = PCC_P1;
279         dpi->attempt            = PCC_P1;
280         dpi->cnt                = 0;
281
282         jwrite32(jme, JME_PCCTX,
283                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
284                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
285                         PCCTXQ0_EN
286                 );
287
288         /*
289          * Enable Interrupts
290          */
291         jwrite32(jme, JME_IENS, INTR_ENABLE);
292 }
293
294 __always_inline static void
295 jme_stop_irq(struct jme_adapter *jme)
296 {
297         /*
298          * Disable Interrupts
299          */
300         jwrite32(jme, JME_IENC, INTR_ENABLE);
301 }
302
303
304 __always_inline static void
305 jme_enable_shadow(struct jme_adapter *jme)
306 {
307         jwrite32(jme,
308                  JME_SHBA_LO,
309                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
310 }
311
312 __always_inline static void
313 jme_disable_shadow(struct jme_adapter *jme)
314 {
315         jwrite32(jme, JME_SHBA_LO, 0x0);
316 }
317
318 static int
319 jme_check_link(struct net_device *netdev, int testonly)
320 {
321         struct jme_adapter *jme = netdev_priv(netdev);
322         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
323         char linkmsg[64];
324         int rc = 0;
325
326         linkmsg[0] = '\0';
327         phylink = jread32(jme, JME_PHY_LINK);
328
329         if (phylink & PHY_LINK_UP) {
330                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
331                         /*
332                          * If we did not enable AN
333                          * Speed/Duplex Info should be obtained from SMI
334                          */
335                         phylink = PHY_LINK_UP;
336
337                         bmcr = jme_mdio_read(jme->dev,
338                                                 jme->mii_if.phy_id,
339                                                 MII_BMCR);
340
341
342                         phylink |= ((bmcr & BMCR_SPEED1000) &&
343                                         (bmcr & BMCR_SPEED100) == 0) ?
344                                         PHY_LINK_SPEED_1000M :
345                                         (bmcr & BMCR_SPEED100) ?
346                                         PHY_LINK_SPEED_100M :
347                                         PHY_LINK_SPEED_10M;
348
349                         phylink |= (bmcr & BMCR_FULLDPLX) ?
350                                          PHY_LINK_DUPLEX : 0;
351
352                         strcat(linkmsg, "Forced: ");
353                 }
354                 else {
355                         /*
356                          * Keep polling for speed/duplex resolve complete
357                          */
358                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
359                                 --cnt) {
360
361                                 udelay(1);
362                                 phylink = jread32(jme, JME_PHY_LINK);
363
364                         }
365
366                         if(!cnt)
367                                 jeprintk(netdev->name,
368                                         "Waiting speed resolve timeout.\n");
369
370                         strcat(linkmsg, "ANed: ");
371                 }
372
373                 if(jme->phylink == phylink) {
374                         rc = 1;
375                         goto out;
376                 }
377                 if(testonly)
378                         goto out;
379
380                 jme->phylink = phylink;
381
382                 switch(phylink & PHY_LINK_SPEED_MASK) {
383                         case PHY_LINK_SPEED_10M:
384                                 ghc = GHC_SPEED_10M;
385                                 strcat(linkmsg, "10 Mbps, ");
386                                 break;
387                         case PHY_LINK_SPEED_100M:
388                                 ghc = GHC_SPEED_100M;
389                                 strcat(linkmsg, "100 Mbps, ");
390                                 break;
391                         case PHY_LINK_SPEED_1000M:
392                                 ghc = GHC_SPEED_1000M;
393                                 strcat(linkmsg, "1000 Mbps, ");
394                                 break;
395                         default:
396                                 ghc = 0;
397                                 break;
398                 }
399                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
400
401                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
402                                         "Full-Duplex, " :
403                                         "Half-Duplex, ");
404
405                 if(phylink & PHY_LINK_MDI_STAT)
406                         strcat(linkmsg, "MDI-X");
407                 else
408                         strcat(linkmsg, "MDI");
409
410                 if(phylink & PHY_LINK_DUPLEX)
411                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
412                 else {
413                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
414                                                 TXMCS_BACKOFF |
415                                                 TXMCS_CARRIERSENSE |
416                                                 TXMCS_COLLISION);
417                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
418                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
419                                 TXTRHD_TXREN |
420                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
421                 }
422
423                 jme->reg_ghc = ghc;
424                 jwrite32(jme, JME_GHC, ghc);
425
426                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
427                 netif_carrier_on(netdev);
428         }
429         else {
430                 if(testonly)
431                         goto out;
432
433                 jprintk(netdev->name, "Link is down.\n");
434                 jme->phylink = 0;
435                 netif_carrier_off(netdev);
436         }
437
438 out:
439         return rc;
440 }
441
442 static int
443 jme_setup_tx_resources(struct jme_adapter *jme)
444 {
445         struct jme_ring *txring = &(jme->txring[0]);
446
447         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
448                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
449                                    &(txring->dmaalloc),
450                                    GFP_ATOMIC);
451
452         if(!txring->alloc) {
453                 txring->desc = NULL;
454                 txring->dmaalloc = 0;
455                 txring->dma = 0;
456                 return -ENOMEM;
457         }
458
459         /*
460          * 16 Bytes align
461          */
462         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
463                                                 RING_DESC_ALIGN);
464         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
465         txring->next_to_use     = 0;
466         txring->next_to_clean   = 0;
467         atomic_set(&txring->nr_free, jme->tx_ring_size);
468
469         /*
470          * Initialize Transmit Descriptors
471          */
472         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
473         memset(txring->bufinf, 0,
474                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
475
476         return 0;
477 }
478
479 static void
480 jme_free_tx_resources(struct jme_adapter *jme)
481 {
482         int i;
483         struct jme_ring *txring = &(jme->txring[0]);
484         struct jme_buffer_info *txbi = txring->bufinf;
485
486         if(txring->alloc) {
487                 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
488                         txbi = txring->bufinf + i;
489                         if(txbi->skb) {
490                                 dev_kfree_skb(txbi->skb);
491                                 txbi->skb = NULL;
492                         }
493                         txbi->mapping   = 0;
494                         txbi->len       = 0;
495                         txbi->nr_desc   = 0;
496                 }
497
498                 dma_free_coherent(&(jme->pdev->dev),
499                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
500                                   txring->alloc,
501                                   txring->dmaalloc);
502
503                 txring->alloc           = NULL;
504                 txring->desc            = NULL;
505                 txring->dmaalloc        = 0;
506                 txring->dma             = 0;
507         }
508         txring->next_to_use     = 0;
509         txring->next_to_clean   = 0;
510         atomic_set(&txring->nr_free, 0);
511
512 }
513
514 __always_inline static void
515 jme_enable_tx_engine(struct jme_adapter *jme)
516 {
517         /*
518          * Select Queue 0
519          */
520         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
521
522         /*
523          * Setup TX Queue 0 DMA Bass Address
524          */
525         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
526         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
527         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
528
529         /*
530          * Setup TX Descptor Count
531          */
532         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
533
534         /*
535          * Enable TX Engine
536          */
537         wmb();
538         jwrite32(jme, JME_TXCS, jme->reg_txcs |
539                                 TXCS_SELECT_QUEUE0 |
540                                 TXCS_ENABLE);
541
542 }
543
544 __always_inline static void
545 jme_restart_tx_engine(struct jme_adapter *jme)
546 {
547         /*
548          * Restart TX Engine
549          */
550         jwrite32(jme, JME_TXCS, jme->reg_txcs |
551                                 TXCS_SELECT_QUEUE0 |
552                                 TXCS_ENABLE);
553 }
554
555 __always_inline static void
556 jme_disable_tx_engine(struct jme_adapter *jme)
557 {
558         int i;
559         __u32 val;
560
561         /*
562          * Disable TX Engine
563          */
564         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
565
566         val = jread32(jme, JME_TXCS);
567         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
568         {
569                 mdelay(1);
570                 val = jread32(jme, JME_TXCS);
571         }
572
573         if(!i) {
574                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
575                 jme_reset_mac_processor(jme);
576         }
577
578
579 }
580
581 static void
582 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
583 {
584         struct jme_ring *rxring = jme->rxring;
585         register volatile struct rxdesc* rxdesc = rxring->desc;
586         struct jme_buffer_info *rxbi = rxring->bufinf;
587         rxdesc += i;
588         rxbi += i;
589
590         rxdesc->dw[0] = 0;
591         rxdesc->dw[1] = 0;
592         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
593         rxdesc->desc1.bufaddrl  = cpu_to_le32(
594                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
595         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
596         if(jme->dev->features & NETIF_F_HIGHDMA)
597                 rxdesc->desc1.flags = RXFLAG_64BIT;
598         wmb();
599         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
600 }
601
602 static int
603 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
604 {
605         struct jme_ring *rxring = &(jme->rxring[0]);
606         struct jme_buffer_info *rxbi = rxring->bufinf + i;
607         unsigned long offset;
608         struct sk_buff* skb;
609
610         skb = netdev_alloc_skb(jme->dev,
611                 jme->dev->mtu + RX_EXTRA_LEN);
612         if(unlikely(!skb))
613                 return -ENOMEM;
614
615         if(unlikely(offset =
616                         (unsigned long)(skb->data)
617                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
618                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
619
620         rxbi->skb = skb;
621         rxbi->len = skb_tailroom(skb);
622         rxbi->mapping = pci_map_page(jme->pdev,
623                                         virt_to_page(skb->data),
624                                         offset_in_page(skb->data),
625                                         rxbi->len,
626                                         PCI_DMA_FROMDEVICE);
627
628         return 0;
629 }
630
631 static void
632 jme_free_rx_buf(struct jme_adapter *jme, int i)
633 {
634         struct jme_ring *rxring = &(jme->rxring[0]);
635         struct jme_buffer_info *rxbi = rxring->bufinf;
636         rxbi += i;
637
638         if(rxbi->skb) {
639                 pci_unmap_page(jme->pdev,
640                                  rxbi->mapping,
641                                  rxbi->len,
642                                  PCI_DMA_FROMDEVICE);
643                 dev_kfree_skb(rxbi->skb);
644                 rxbi->skb = NULL;
645                 rxbi->mapping = 0;
646                 rxbi->len = 0;
647         }
648 }
649
650 static void
651 jme_free_rx_resources(struct jme_adapter *jme)
652 {
653         int i;
654         struct jme_ring *rxring = &(jme->rxring[0]);
655
656         if(rxring->alloc) {
657                 for(i = 0 ; i < jme->rx_ring_size ; ++i)
658                         jme_free_rx_buf(jme, i);
659
660                 dma_free_coherent(&(jme->pdev->dev),
661                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
662                                   rxring->alloc,
663                                   rxring->dmaalloc);
664                 rxring->alloc    = NULL;
665                 rxring->desc     = NULL;
666                 rxring->dmaalloc = 0;
667                 rxring->dma      = 0;
668         }
669         rxring->next_to_use   = 0;
670         rxring->next_to_clean = 0;
671 }
672
673 static int
674 jme_setup_rx_resources(struct jme_adapter *jme)
675 {
676         int i;
677         struct jme_ring *rxring = &(jme->rxring[0]);
678
679         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
680                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
681                                    &(rxring->dmaalloc),
682                                    GFP_ATOMIC);
683         if(!rxring->alloc) {
684                 rxring->desc = NULL;
685                 rxring->dmaalloc = 0;
686                 rxring->dma = 0;
687                 return -ENOMEM;
688         }
689
690         /*
691          * 16 Bytes align
692          */
693         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
694                                                 RING_DESC_ALIGN);
695         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
696         rxring->next_to_use     = 0;
697         rxring->next_to_clean   = 0;
698
699         /*
700          * Initiallize Receive Descriptors
701          */
702         for(i = 0 ; i < jme->rx_ring_size ; ++i) {
703                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
704                         jme_free_rx_resources(jme);
705                         return -ENOMEM;
706                 }
707
708                 jme_set_clean_rxdesc(jme, i);
709         }
710
711         return 0;
712 }
713
714 __always_inline static void
715 jme_enable_rx_engine(struct jme_adapter *jme)
716 {
717         /*
718          * Setup RX DMA Bass Address
719          */
720         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
721         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
722         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
723
724         /*
725          * Setup RX Descriptor Count
726          */
727         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
728
729         /*
730          * Setup Unicast Filter
731          */
732         jme_set_multi(jme->dev);
733
734         /*
735          * Enable RX Engine
736          */
737         wmb();
738         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
739                                 RXCS_QUEUESEL_Q0 |
740                                 RXCS_ENABLE |
741                                 RXCS_QST);
742 }
743
744 __always_inline static void
745 jme_restart_rx_engine(struct jme_adapter *jme)
746 {
747         /*
748          * Start RX Engine
749          */
750         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
751                                 RXCS_QUEUESEL_Q0 |
752                                 RXCS_ENABLE |
753                                 RXCS_QST);
754 }
755
756
757 __always_inline static void
758 jme_disable_rx_engine(struct jme_adapter *jme)
759 {
760         int i;
761         __u32 val;
762
763         /*
764          * Disable RX Engine
765          */
766         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
767
768         val = jread32(jme, JME_RXCS);
769         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
770         {
771                 mdelay(1);
772                 val = jread32(jme, JME_RXCS);
773         }
774
775         if(!i)
776                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
777
778 }
779
780 static void
781 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
782 {
783         struct jme_ring *rxring = &(jme->rxring[0]);
784         volatile struct rxdesc *rxdesc = rxring->desc;
785         struct jme_buffer_info *rxbi = rxring->bufinf;
786         struct sk_buff *skb;
787         int framesize;
788
789         rxdesc += idx;
790         rxbi += idx;
791
792         skb = rxbi->skb;
793         pci_dma_sync_single_for_cpu(jme->pdev,
794                                         rxbi->mapping,
795                                         rxbi->len,
796                                         PCI_DMA_FROMDEVICE);
797
798         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
799                 pci_dma_sync_single_for_device(jme->pdev,
800                                                 rxbi->mapping,
801                                                 rxbi->len,
802                                                 PCI_DMA_FROMDEVICE);
803
804                 ++(NET_STAT(jme).rx_dropped);
805         }
806         else {
807                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
808                                 - RX_PREPAD_SIZE;
809
810                 skb_reserve(skb, RX_PREPAD_SIZE);
811                 skb_put(skb, framesize);
812                 skb->protocol = eth_type_trans(skb, jme->dev);
813
814                 if((rxdesc->descwb.flags &
815                                         (RXWBFLAG_TCPON |
816                                         RXWBFLAG_UDPON |
817                                         RXWBFLAG_IPV4)))
818                         skb->ip_summed = CHECKSUM_UNNECESSARY;
819                 else
820                         skb->ip_summed = CHECKSUM_NONE;
821
822
823                 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
824                         vlan_dbg(jme->dev->name, "VLAN: %04x\n",
825                                         rxdesc->descwb.vlan);
826                         if(jme->vlgrp) {
827                                 vlan_dbg(jme->dev->name,
828                                         "VLAN Passed to kernel.\n");
829                                 vlan_hwaccel_rx(skb, jme->vlgrp,
830                                         le32_to_cpu(rxdesc->descwb.vlan));
831                                 NET_STAT(jme).rx_bytes += 4;
832                         }
833                 }
834                 else {
835                         netif_rx(skb);
836                 }
837
838                 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
839                                 RXWBFLAG_DEST_MUL)
840                         ++(NET_STAT(jme).multicast);
841
842                 jme->dev->last_rx = jiffies;
843                 NET_STAT(jme).rx_bytes += framesize;
844                 ++(NET_STAT(jme).rx_packets);
845         }
846
847         jme_set_clean_rxdesc(jme, idx);
848
849 }
850
851 static int
852 jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
853 {
854         if(unlikely((flags & RXWBFLAG_TCPON) &&
855         !(flags & RXWBFLAG_TCPCS))) {
856                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
857                 return 1;
858         }
859         else if(unlikely((flags & RXWBFLAG_UDPON) &&
860         !(flags & RXWBFLAG_UDPCS))) {
861                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
862                 return 1;
863         }
864         else if(unlikely((flags & RXWBFLAG_IPV4) &&
865         !(flags & RXWBFLAG_IPCS))) {
866                 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
867                 return 1;
868         }
869         else {
870                 return 0;
871         }
872 }
873
874 static int
875 jme_process_receive(struct jme_adapter *jme, int limit)
876 {
877         struct jme_ring *rxring = &(jme->rxring[0]);
878         volatile struct rxdesc *rxdesc = rxring->desc;
879         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
880
881         i = rxring->next_to_clean;
882         while( limit-- > 0 )
883         {
884                 rxdesc = rxring->desc;
885                 rxdesc += i;
886
887                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
888                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
889                         goto out;
890
891                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
892
893                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
894
895                 if(unlikely(desccnt > 1 ||
896                 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
897                 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
898
899                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
900                                 ++(NET_STAT(jme).rx_crc_errors);
901                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
902                                 ++(NET_STAT(jme).rx_fifo_errors);
903                         else
904                                 ++(NET_STAT(jme).rx_errors);
905
906                         if(desccnt > 1) {
907                                 rx_dbg(jme->dev->name,
908                                         "RX: More than one(%d) descriptor, "
909                                         "framelen=%d\n",
910                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
911                                 limit -= desccnt - 1;
912                         }
913
914                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
915                                 jme_set_clean_rxdesc(jme, j);
916
917                                 j = (j + 1) & (mask);
918                         }
919
920                 }
921                 else {
922                         jme_alloc_and_feed_skb(jme, i);
923                 }
924
925                 i = (i + desccnt) & (mask);
926         }
927
928 out:
929         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
930         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
931                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
932                         >> 4);
933
934         rxring->next_to_clean = i;
935
936         return limit > 0 ? limit : 0;
937
938 }
939
940 static void
941 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
942 {
943         if(likely(atmp == dpi->cur))
944                 return;
945
946         if(dpi->attempt == atmp) {
947                 ++(dpi->cnt);
948         }
949         else {
950                 dpi->attempt = atmp;
951                 dpi->cnt = 0;
952         }
953
954 }
955
956 static void
957 jme_dynamic_pcc(struct jme_adapter *jme)
958 {
959         register struct dynpcc_info *dpi = &(jme->dpi);
960
961         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
962                 jme_attempt_pcc(dpi, PCC_P3);
963         else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
964         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
965                 jme_attempt_pcc(dpi, PCC_P2);
966         else
967                 jme_attempt_pcc(dpi, PCC_P1);
968
969         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
970                 jme_set_rx_pcc(jme, dpi->attempt);
971                 dpi->cur = dpi->attempt;
972                 dpi->cnt = 0;
973         }
974 }
975
976 static void
977 jme_start_pcc_timer(struct jme_adapter *jme)
978 {
979         struct dynpcc_info *dpi = &(jme->dpi);
980         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
981         dpi->last_pkts          = NET_STAT(jme).rx_packets;
982         dpi->intr_cnt           = 0;
983         jwrite32(jme, JME_TMCSR,
984                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
985 }
986
987 static void
988 jme_stop_pcc_timer(struct jme_adapter *jme)
989 {
990         jwrite32(jme, JME_TMCSR, 0);
991 }
992
993 static void
994 jme_pcc_tasklet(unsigned long arg)
995 {
996         struct jme_adapter *jme = (struct jme_adapter*)arg;
997         struct net_device *netdev = jme->dev;
998
999
1000         if(unlikely(!netif_carrier_ok(netdev) ||
1001                 (atomic_read(&jme->link_changing) != 1)
1002         )) {
1003                 jme_stop_pcc_timer(jme);
1004                 return;
1005         }
1006
1007         jme_dynamic_pcc(jme);
1008         jme_start_pcc_timer(jme);
1009 }
1010
1011 static void
1012 jme_link_change_tasklet(unsigned long arg)
1013 {
1014         struct jme_adapter *jme = (struct jme_adapter*)arg;
1015         struct net_device *netdev = jme->dev;
1016         int timeout = WAIT_TASKLET_TIMEOUT;
1017         int rc;
1018
1019         if(!atomic_dec_and_test(&jme->link_changing))
1020                 goto out;
1021
1022         if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1023                 goto out;
1024
1025         jme->old_mtu = netdev->mtu;
1026         netif_stop_queue(netdev);
1027
1028         while(--timeout > 0 &&
1029                 (
1030                 atomic_read(&jme->rx_cleaning) != 1 ||
1031                 atomic_read(&jme->tx_cleaning) != 1
1032                 )) {
1033
1034                 mdelay(1);
1035         }
1036
1037         if(netif_carrier_ok(netdev)) {
1038                 jme_stop_pcc_timer(jme);
1039                 jme_reset_mac_processor(jme);
1040                 jme_free_rx_resources(jme);
1041                 jme_free_tx_resources(jme);
1042         }
1043
1044         jme_check_link(netdev, 0);
1045         if(netif_carrier_ok(netdev)) {
1046                 rc = jme_setup_rx_resources(jme);
1047                 if(rc) {
1048                         jeprintk(netdev->name,
1049                                 "Allocating resources for RX error"
1050                                 ", Device STOPPED!\n");
1051                         goto out;
1052                 }
1053
1054
1055                 rc = jme_setup_tx_resources(jme);
1056                 if(rc) {
1057                         jeprintk(netdev->name,
1058                                 "Allocating resources for TX error"
1059                                 ", Device STOPPED!\n");
1060                         goto err_out_free_rx_resources;
1061                 }
1062
1063                 jme_enable_rx_engine(jme);
1064                 jme_enable_tx_engine(jme);
1065
1066                 netif_start_queue(netdev);
1067                 jme_start_pcc_timer(jme);
1068         }
1069
1070         goto out;
1071
1072 err_out_free_rx_resources:
1073         jme_free_rx_resources(jme);
1074 out:
1075         atomic_inc(&jme->link_changing);
1076 }
1077
1078 static void
1079 jme_rx_clean_tasklet(unsigned long arg)
1080 {
1081         struct jme_adapter *jme = (struct jme_adapter*)arg;
1082         struct dynpcc_info *dpi = &(jme->dpi);
1083
1084         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1085                 goto out;
1086
1087         if(unlikely(atomic_read(&jme->link_changing) != 1))
1088                 goto out;
1089
1090         if(unlikely(!netif_carrier_ok(jme->dev)))
1091                 goto out;
1092
1093         jme_process_receive(jme, jme->rx_ring_size);
1094         ++(dpi->intr_cnt);
1095
1096 out:
1097         atomic_inc(&jme->rx_cleaning);
1098 }
1099
1100 static void
1101 jme_rx_empty_tasklet(unsigned long arg)
1102 {
1103         struct jme_adapter *jme = (struct jme_adapter*)arg;
1104
1105         if(unlikely(atomic_read(&jme->link_changing) != 1))
1106                 return;
1107
1108         if(unlikely(!netif_carrier_ok(jme->dev)))
1109                 return;
1110
1111         queue_dbg(jme->dev->name, "RX Queue Full!\n");
1112
1113         jme_rx_clean_tasklet(arg);
1114         jme_restart_rx_engine(jme);
1115 }
1116
1117 static void
1118 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1119 {
1120         struct jme_ring *txring = jme->txring;
1121
1122         smp_wmb();
1123         if(unlikely(netif_queue_stopped(jme->dev) &&
1124         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1125
1126                 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1127                 netif_wake_queue(jme->dev);
1128
1129         }
1130
1131 }
1132
1133 static void
1134 jme_tx_clean_tasklet(unsigned long arg)
1135 {
1136         struct jme_adapter *jme = (struct jme_adapter*)arg;
1137         struct jme_ring *txring = &(jme->txring[0]);
1138         volatile struct txdesc *txdesc = txring->desc;
1139         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1140         int i, j, cnt = 0, max, err, mask;
1141
1142         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1143                 goto out;
1144
1145         if(unlikely(atomic_read(&jme->link_changing) != 1))
1146                 goto out;
1147
1148         if(unlikely(!netif_carrier_ok(jme->dev)))
1149                 goto out;
1150
1151         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1152         mask = jme->tx_ring_mask;
1153
1154         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1155
1156         for(i = txring->next_to_clean ; cnt < max ; ) {
1157
1158                 ctxbi = txbi + i;
1159
1160                 if(likely(ctxbi->skb &&
1161                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1162
1163                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1164
1165                         tx_dbg(jme->dev->name,
1166                                 "Tx Tasklet: Clean %d+%d\n",
1167                                 i, ctxbi->nr_desc);
1168
1169                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1170                                 ttxbi = txbi + ((i + j) & (mask));
1171                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1172
1173                                 pci_unmap_page(jme->pdev,
1174                                                  ttxbi->mapping,
1175                                                  ttxbi->len,
1176                                                  PCI_DMA_TODEVICE);
1177
1178                                 ttxbi->mapping = 0;
1179                                 ttxbi->len = 0;
1180                         }
1181
1182                         dev_kfree_skb(ctxbi->skb);
1183
1184                         cnt += ctxbi->nr_desc;
1185
1186                         if(unlikely(err))
1187                                 ++(NET_STAT(jme).tx_carrier_errors);
1188                         else {
1189                                 ++(NET_STAT(jme).tx_packets);
1190                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1191                         }
1192
1193                         ctxbi->skb = NULL;
1194                         ctxbi->len = 0;
1195                 }
1196                 else {
1197                         if(!ctxbi->skb)
1198                                 tx_dbg(jme->dev->name,
1199                                         "Tx Tasklet:"
1200                                         " Stopped due to no skb.\n");
1201                         else
1202                                 tx_dbg(jme->dev->name,
1203                                         "Tx Tasklet:"
1204                                         "Stopped due to not done.\n");
1205                         break;
1206                 }
1207
1208                 i = (i + ctxbi->nr_desc) & mask;
1209
1210                 ctxbi->nr_desc = 0;
1211         }
1212
1213         tx_dbg(jme->dev->name,
1214                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1215                 i, jiffies);
1216         txring->next_to_clean = i;
1217
1218         atomic_add(cnt, &txring->nr_free);
1219
1220         jme_wake_queue_if_stopped(jme);
1221
1222 out:
1223         atomic_inc(&jme->tx_cleaning);
1224 }
1225
1226 static void
1227 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1228 {
1229         __u32 handled;
1230
1231         /*
1232          * Disable interrupt
1233          */
1234         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1235
1236         /*
1237          * Write 1 clear interrupt status
1238          */
1239         jwrite32f(jme, JME_IEVE, intrstat);
1240
1241         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1242                 tasklet_schedule(&jme->linkch_task);
1243                 goto out_reenable;
1244         }
1245
1246         if(intrstat & INTR_TMINTR)
1247                 tasklet_schedule(&jme->pcc_task);
1248
1249         if(intrstat & INTR_RX0EMP)
1250                 tasklet_schedule(&jme->rxempty_task);
1251
1252         if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1253                 tasklet_schedule(&jme->rxclean_task);
1254
1255         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1256                 tasklet_schedule(&jme->txclean_task);
1257
1258         handled = INTR_ENABLE | INTR_RX0 | INTR_TX0 | INTR_PAUSERCV;
1259         if((intrstat & ~(handled)) != 0) {
1260                 /*
1261                  * Some interrupt not handled
1262                  * but not enabled also (for debug)
1263                  */
1264                 dprintk(jme->dev->name,
1265                         "UN-handled interrupt.(%08x)\n",
1266                         intrstat & ~(handled));
1267         }
1268
1269 out_reenable:
1270         /*
1271          * Re-enable interrupt
1272          */
1273         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1274
1275
1276 }
1277
1278 static irqreturn_t
1279 jme_intr(int irq, void *dev_id)
1280 {
1281         struct net_device *netdev = dev_id;
1282         struct jme_adapter *jme = netdev_priv(netdev);
1283         __u32 intrstat;
1284
1285         intrstat = jread32(jme, JME_IEVE);
1286
1287         /*
1288          * Check if it's really an interrupt for us
1289          */
1290         if(unlikely(intrstat == 0))
1291                 return IRQ_NONE;
1292
1293         /*
1294          * Check if the device still exist
1295          */
1296         if(unlikely(intrstat == ~((typeof(intrstat))0)))
1297                 return IRQ_NONE;
1298
1299         jme_intr_msi(jme, intrstat);
1300
1301         return IRQ_HANDLED;
1302 }
1303
1304 static irqreturn_t
1305 jme_msi(int irq, void *dev_id)
1306 {
1307         struct net_device *netdev = dev_id;
1308         struct jme_adapter *jme = netdev_priv(netdev);
1309         __u32 intrstat;
1310
1311         pci_dma_sync_single_for_cpu(jme->pdev,
1312                                     jme->shadow_dma,
1313                                     sizeof(__u32) * SHADOW_REG_NR,
1314                                     PCI_DMA_FROMDEVICE);
1315         intrstat = jme->shadow_regs[SHADOW_IEVE];
1316         jme->shadow_regs[SHADOW_IEVE] = 0;
1317
1318         jme_intr_msi(jme, intrstat);
1319
1320         return IRQ_HANDLED;
1321 }
1322
1323
1324 static void
1325 jme_reset_link(struct jme_adapter *jme)
1326 {
1327         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1328 }
1329
1330 static void
1331 jme_restart_an(struct jme_adapter *jme)
1332 {
1333         __u32 bmcr;
1334         unsigned long flags;
1335
1336         spin_lock_irqsave(&jme->phy_lock, flags);
1337         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1338         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1339         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1340         spin_unlock_irqrestore(&jme->phy_lock, flags);
1341 }
1342
1343 static int
1344 jme_request_irq(struct jme_adapter *jme)
1345 {
1346         int rc;
1347         struct net_device *netdev = jme->dev;
1348         irq_handler_t handler = jme_intr;
1349         int irq_flags = IRQF_SHARED;
1350
1351         if (!pci_enable_msi(jme->pdev)) {
1352                 jme->flags |= JME_FLAG_MSI;
1353                 handler = jme_msi;
1354                 irq_flags = 0;
1355         }
1356
1357         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1358                           netdev);
1359         if(rc) {
1360                 jeprintk(netdev->name,
1361                         "Unable to request %s interrupt (return: %d)\n",
1362                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1363
1364                 if(jme->flags & JME_FLAG_MSI) {
1365                         pci_disable_msi(jme->pdev);
1366                         jme->flags &= ~JME_FLAG_MSI;
1367                 }
1368         }
1369         else {
1370                 netdev->irq = jme->pdev->irq;
1371         }
1372
1373         return rc;
1374 }
1375
1376 static void
1377 jme_free_irq(struct jme_adapter *jme)
1378 {
1379         free_irq(jme->pdev->irq, jme->dev);
1380         if (jme->flags & JME_FLAG_MSI) {
1381                 pci_disable_msi(jme->pdev);
1382                 jme->flags &= ~JME_FLAG_MSI;
1383                 jme->dev->irq = jme->pdev->irq;
1384         }
1385 }
1386
1387 static int
1388 jme_open(struct net_device *netdev)
1389 {
1390         struct jme_adapter *jme = netdev_priv(netdev);
1391         int rc, timeout = 100;
1392
1393         while(
1394                 --timeout > 0 &&
1395                 (
1396                 atomic_read(&jme->link_changing) != 1 ||
1397                 atomic_read(&jme->rx_cleaning) != 1 ||
1398                 atomic_read(&jme->tx_cleaning) != 1
1399                 )
1400         )
1401                 msleep(10);
1402
1403         if(!timeout) {
1404                 rc = -EBUSY;
1405                 goto err_out;
1406         }
1407
1408         jme_clear_pm(jme);
1409         jme_reset_mac_processor(jme);
1410
1411         rc = jme_request_irq(jme);
1412         if(rc)
1413                 goto err_out;
1414
1415         jme_enable_shadow(jme);
1416         jme_start_irq(jme);
1417
1418         if(jme->flags & JME_FLAG_SSET)
1419                 jme_set_settings(netdev, &jme->old_ecmd);
1420         else
1421                 jme_reset_phy_processor(jme);
1422
1423         jme_reset_link(jme);
1424
1425         return 0;
1426
1427 err_out:
1428         netif_stop_queue(netdev);
1429         netif_carrier_off(netdev);
1430         return rc;
1431 }
1432
1433 static void
1434 jme_set_100m_half(struct jme_adapter *jme)
1435 {
1436         __u32 bmcr, tmp;
1437
1438         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1439         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1440                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1441         tmp |= BMCR_SPEED100;
1442
1443         if (bmcr != tmp)
1444                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1445
1446         jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1447 }
1448
1449 static void
1450 jme_phy_off(struct jme_adapter *jme)
1451 {
1452         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1453 }
1454
1455
1456 static int
1457 jme_close(struct net_device *netdev)
1458 {
1459         struct jme_adapter *jme = netdev_priv(netdev);
1460
1461         netif_stop_queue(netdev);
1462         netif_carrier_off(netdev);
1463
1464         jme_stop_irq(jme);
1465         jme_disable_shadow(jme);
1466         jme_free_irq(jme);
1467
1468         tasklet_kill(&jme->linkch_task);
1469         tasklet_kill(&jme->txclean_task);
1470         tasklet_kill(&jme->rxclean_task);
1471         tasklet_kill(&jme->rxempty_task);
1472
1473         jme_reset_mac_processor(jme);
1474         jme_free_rx_resources(jme);
1475         jme_free_tx_resources(jme);
1476         jme->phylink = 0;
1477         jme_phy_off(jme);
1478
1479         return 0;
1480 }
1481
1482 static int
1483 jme_alloc_txdesc(struct jme_adapter *jme,
1484                         struct sk_buff *skb)
1485 {
1486         struct jme_ring *txring = jme->txring;
1487         int idx, nr_alloc, mask = jme->tx_ring_mask;
1488
1489         idx = txring->next_to_use;
1490         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1491
1492         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1493                 return -1;
1494
1495         atomic_sub(nr_alloc, &txring->nr_free);
1496
1497         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1498
1499         return idx;
1500 }
1501
1502 static void
1503 jme_fill_tx_map(struct pci_dev *pdev,
1504                 volatile struct txdesc *txdesc,
1505                 struct jme_buffer_info *txbi,
1506                 struct page *page,
1507                 __u32 page_offset,
1508                 __u32 len,
1509                 __u8 hidma)
1510 {
1511         dma_addr_t dmaaddr;
1512
1513         dmaaddr = pci_map_page(pdev,
1514                                 page,
1515                                 page_offset,
1516                                 len,
1517                                 PCI_DMA_TODEVICE);
1518
1519         pci_dma_sync_single_for_device(pdev,
1520                                        dmaaddr,
1521                                        len,
1522                                        PCI_DMA_TODEVICE);
1523
1524         txdesc->dw[0] = 0;
1525         txdesc->dw[1] = 0;
1526         txdesc->desc2.flags     = TXFLAG_OWN;
1527         txdesc->desc2.flags     |= (hidma)?TXFLAG_64BIT:0;
1528         txdesc->desc2.datalen   = cpu_to_le16(len);
1529         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1530         txdesc->desc2.bufaddrl  = cpu_to_le32(
1531                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
1532
1533         txbi->mapping = dmaaddr;
1534         txbi->len = len;
1535 }
1536
1537 static void
1538 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1539 {
1540         struct jme_ring *txring = jme->txring;
1541         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1542         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1543         __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1544         int i, nr_frags = skb_shinfo(skb)->nr_frags;
1545         int mask = jme->tx_ring_mask;
1546         struct skb_frag_struct *frag;
1547         __u32 len;
1548
1549         for(i = 0 ; i < nr_frags ; ++i) {
1550                 frag = &skb_shinfo(skb)->frags[i];
1551                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1552                 ctxbi = txbi + ((idx + i + 2) & (mask));
1553
1554                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1555                                  frag->page_offset, frag->size, hidma);
1556         }
1557
1558         len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1559         ctxdesc = txdesc + ((idx + 1) & (mask));
1560         ctxbi = txbi + ((idx + 1) & (mask));
1561         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1562                         offset_in_page(skb->data), len, hidma);
1563
1564 }
1565
1566 static int
1567 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1568 {
1569         if(unlikely(skb_shinfo(skb)->gso_size &&
1570                         skb_header_cloned(skb) &&
1571                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1572                 dev_kfree_skb(skb);
1573                 return -1;
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int
1580 jme_tx_tso(struct sk_buff *skb,
1581                 volatile __u16 *mss, __u8 *flags)
1582 {
1583         if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1584                 *flags |= TXFLAG_LSEN;
1585
1586                 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1587                         struct iphdr *iph = ip_hdr(skb);
1588
1589                         iph->check = 0;
1590                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1591                                                                 iph->daddr, 0,
1592                                                                 IPPROTO_TCP,
1593                                                                 0);
1594                 }
1595                 else {
1596                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
1597
1598                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1599                                                                 &ip6h->daddr, 0,
1600                                                                 IPPROTO_TCP,
1601                                                                 0);
1602                 }
1603
1604                 return 0;
1605         }
1606
1607         return 1;
1608 }
1609
1610 static void
1611 jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1612 {
1613         if(skb->ip_summed == CHECKSUM_PARTIAL) {
1614                 __u8 ip_proto;
1615
1616                 switch (skb->protocol) {
1617                 case __constant_htons(ETH_P_IP):
1618                         ip_proto = ip_hdr(skb)->protocol;
1619                         break;
1620                 case __constant_htons(ETH_P_IPV6):
1621                         ip_proto = ipv6_hdr(skb)->nexthdr;
1622                         break;
1623                 default:
1624                         ip_proto = 0;
1625                         break;
1626                 }
1627
1628                 switch(ip_proto) {
1629                 case IPPROTO_TCP:
1630                         *flags |= TXFLAG_TCPCS;
1631                         break;
1632                 case IPPROTO_UDP:
1633                         *flags |= TXFLAG_UDPCS;
1634                         break;
1635                 default:
1636                         jeprintk("jme", "Error upper layer protocol.\n");
1637                         break;
1638                 }
1639         }
1640 }
1641
1642 __always_inline static void
1643 jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
1644 {
1645         if(vlan_tx_tag_present(skb)) {
1646                 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
1647                 *flags |= TXFLAG_TAGON;
1648                 *vlan = vlan_tx_tag_get(skb);
1649         }
1650 }
1651
1652 static int
1653 jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1654 {
1655         struct jme_ring *txring = jme->txring;
1656         volatile struct txdesc *txdesc;
1657         struct jme_buffer_info *txbi;
1658         __u8 flags;
1659
1660         txdesc = (volatile struct txdesc*)txring->desc + idx;
1661         txbi = txring->bufinf + idx;
1662
1663         txdesc->dw[0] = 0;
1664         txdesc->dw[1] = 0;
1665         txdesc->dw[2] = 0;
1666         txdesc->dw[3] = 0;
1667         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1668         /*
1669          * Set OWN bit at final.
1670          * When kernel transmit faster than NIC.
1671          * And NIC trying to send this descriptor before we tell
1672          * it to start sending this TX queue.
1673          * Other fields are already filled correctly.
1674          */
1675         wmb();
1676         flags = TXFLAG_OWN | TXFLAG_INT;
1677         //Set checksum flags while not tso
1678         if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1679                 jme_tx_csum(skb, &flags);
1680         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1681         txdesc->desc1.flags = flags;
1682         /*
1683          * Set tx buffer info after telling NIC to send
1684          * For better tx_clean timing
1685          */
1686         wmb();
1687         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1688         txbi->skb = skb;
1689         txbi->len = skb->len;
1690
1691         return 0;
1692 }
1693
1694 static void
1695 jme_stop_queue_if_full(struct jme_adapter *jme)
1696 {
1697         struct jme_ring *txring = jme->txring;
1698
1699         smp_wmb();
1700         if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1701                 netif_stop_queue(jme->dev);
1702                 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
1703                 smp_wmb();
1704                 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
1705                         netif_wake_queue(jme->dev);
1706                         queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
1707                 }
1708         }
1709
1710 }
1711
1712 /*
1713  * This function is already protected by netif_tx_lock()
1714  */
1715 static int
1716 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1717 {
1718         struct jme_adapter *jme = netdev_priv(netdev);
1719         int idx;
1720
1721         if(skb_shinfo(skb)->nr_frags) {
1722                 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
1723                         skb_shinfo(skb)->nr_frags,
1724                         skb_headlen(skb),
1725                         skb->len,
1726                         skb_shinfo(skb)->gso_size,
1727                         skb->ip_summed);
1728         }
1729
1730         if(unlikely(jme_expand_header(jme, skb))) {
1731                 ++(NET_STAT(jme).tx_dropped);
1732                 return NETDEV_TX_OK;
1733         }
1734
1735         idx = jme_alloc_txdesc(jme, skb);
1736
1737         if(unlikely(idx<0)) {
1738                 netif_stop_queue(netdev);
1739                 jeprintk(netdev->name,
1740                                 "BUG! Tx ring full when queue awake!\n");
1741
1742                 return NETDEV_TX_BUSY;
1743         }
1744
1745         jme_map_tx_skb(jme, skb, idx);
1746         jme_fill_first_tx_desc(jme, skb, idx);
1747
1748         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
1749
1750         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1751                                 TXCS_SELECT_QUEUE0 |
1752                                 TXCS_QUEUE0S |
1753                                 TXCS_ENABLE);
1754         netdev->trans_start = jiffies;
1755
1756         jme_stop_queue_if_full(jme);
1757
1758         return NETDEV_TX_OK;
1759 }
1760
1761 static int
1762 jme_set_macaddr(struct net_device *netdev, void *p)
1763 {
1764         struct jme_adapter *jme = netdev_priv(netdev);
1765         struct sockaddr *addr = p;
1766         __u32 val;
1767
1768         if(netif_running(netdev))
1769                 return -EBUSY;
1770
1771         spin_lock(&jme->macaddr_lock);
1772         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1773
1774         val = addr->sa_data[3] << 24 |
1775               addr->sa_data[2] << 16 |
1776               addr->sa_data[1] <<  8 |
1777               addr->sa_data[0];
1778         jwrite32(jme, JME_RXUMA_LO, val);
1779         val = addr->sa_data[5] << 8 |
1780               addr->sa_data[4];
1781         jwrite32(jme, JME_RXUMA_HI, val);
1782         spin_unlock(&jme->macaddr_lock);
1783
1784         return 0;
1785 }
1786
1787 static void
1788 jme_set_multi(struct net_device *netdev)
1789 {
1790         struct jme_adapter *jme = netdev_priv(netdev);
1791         u32 mc_hash[2] = {};
1792         int i;
1793         unsigned long flags;
1794
1795         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1796
1797         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1798
1799         if (netdev->flags & IFF_PROMISC) {
1800                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1801         }
1802         else if (netdev->flags & IFF_ALLMULTI) {
1803                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1804         }
1805         else if(netdev->flags & IFF_MULTICAST) {
1806                 struct dev_mc_list *mclist;
1807                 int bit_nr;
1808
1809                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1810                 for (i = 0, mclist = netdev->mc_list;
1811                         mclist && i < netdev->mc_count;
1812                         ++i, mclist = mclist->next) {
1813
1814                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1815                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1816                 }
1817
1818                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1819                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1820         }
1821
1822         wmb();
1823         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1824
1825         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1826 }
1827
1828 static int
1829 jme_change_mtu(struct net_device *netdev, int new_mtu)
1830 {
1831         struct jme_adapter *jme = netdev_priv(netdev);
1832
1833         if(new_mtu == jme->old_mtu)
1834                 return 0;
1835
1836         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1837                 ((new_mtu) < IPV6_MIN_MTU))
1838                 return -EINVAL;
1839
1840         if(new_mtu > 4000) {
1841                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1842                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1843                 jme_restart_rx_engine(jme);
1844         }
1845         else {
1846                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1847                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1848                 jme_restart_rx_engine(jme);
1849         }
1850
1851         if(new_mtu > 1900) {
1852                 netdev->features &= ~(NETIF_F_HW_CSUM |
1853                                 NETIF_F_TSO |
1854                                 NETIF_F_TSO6);
1855         }
1856         else {
1857                 if(jme->flags & JME_FLAG_TXCSUM)
1858                         netdev->features |= NETIF_F_HW_CSUM;
1859                 if(jme->flags & JME_FLAG_TSO)
1860                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1861         }
1862
1863         netdev->mtu = new_mtu;
1864         jme_reset_link(jme);
1865
1866         return 0;
1867 }
1868
1869 static void
1870 jme_tx_timeout(struct net_device *netdev)
1871 {
1872         struct jme_adapter *jme = netdev_priv(netdev);
1873
1874         /*
1875          * Reset the link
1876          * And the link change will reinitialize all RX/TX resources
1877          */
1878         jme->phylink = 0;
1879         jme_reset_link(jme);
1880 }
1881
1882 static void
1883 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1884 {
1885         struct jme_adapter *jme = netdev_priv(netdev);
1886
1887         jme->vlgrp = grp;
1888 }
1889
1890 static void
1891 jme_get_drvinfo(struct net_device *netdev,
1892                      struct ethtool_drvinfo *info)
1893 {
1894         struct jme_adapter *jme = netdev_priv(netdev);
1895
1896         strcpy(info->driver, DRV_NAME);
1897         strcpy(info->version, DRV_VERSION);
1898         strcpy(info->bus_info, pci_name(jme->pdev));
1899 }
1900
1901 static int
1902 jme_get_regs_len(struct net_device *netdev)
1903 {
1904         return 0x400;
1905 }
1906
1907 static void
1908 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1909 {
1910         int i;
1911
1912         for(i = 0 ; i < len ; i += 4)
1913                 p[i >> 2] = jread32(jme, reg + i);
1914
1915 }
1916
1917 static void
1918 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1919 {
1920         struct jme_adapter *jme = netdev_priv(netdev);
1921         __u32 *p32 = (__u32*)p;
1922
1923         memset(p, 0, 0x400);
1924
1925         regs->version = 1;
1926         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1927
1928         p32 += 0x100 >> 2;
1929         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1930
1931         p32 += 0x100 >> 2;
1932         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1933
1934         p32 += 0x100 >> 2;
1935         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1936
1937 }
1938
1939 static int
1940 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1941 {
1942         struct jme_adapter *jme = netdev_priv(netdev);
1943
1944         ecmd->use_adaptive_rx_coalesce = true;
1945         ecmd->tx_coalesce_usecs = PCC_TX_TO;
1946         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1947
1948         switch(jme->dpi.cur) {
1949         case PCC_P1:
1950                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1951                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1952                 break;
1953         case PCC_P2:
1954                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1955                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1956                 break;
1957         case PCC_P3:
1958                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1959                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1960                 break;
1961         default:
1962                 break;
1963         }
1964
1965         return 0;
1966 }
1967
1968 static void
1969 jme_get_pauseparam(struct net_device *netdev,
1970                         struct ethtool_pauseparam *ecmd)
1971 {
1972         struct jme_adapter *jme = netdev_priv(netdev);
1973         unsigned long flags;
1974         __u32 val;
1975
1976         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1977         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1978
1979         spin_lock_irqsave(&jme->phy_lock, flags);
1980         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1981         spin_unlock_irqrestore(&jme->phy_lock, flags);
1982
1983         ecmd->autoneg =
1984                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1985 }
1986
1987 static int
1988 jme_set_pauseparam(struct net_device *netdev,
1989                         struct ethtool_pauseparam *ecmd)
1990 {
1991         struct jme_adapter *jme = netdev_priv(netdev);
1992         unsigned long flags;
1993         __u32 val;
1994
1995         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1996                 (ecmd->tx_pause != 0)) {
1997
1998                 if(ecmd->tx_pause)
1999                         jme->reg_txpfc |= TXPFC_PF_EN;
2000                 else
2001                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2002
2003                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2004         }
2005
2006         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2007         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2008                 (ecmd->rx_pause != 0)) {
2009
2010                 if(ecmd->rx_pause)
2011                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2012                 else
2013                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2014
2015                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2016         }
2017         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2018
2019         spin_lock_irqsave(&jme->phy_lock, flags);
2020         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2021         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
2022                 (ecmd->autoneg != 0)) {
2023
2024                 if(ecmd->autoneg)
2025                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2026                 else
2027                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2028
2029                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2030                                 MII_ADVERTISE, val);
2031         }
2032         spin_unlock_irqrestore(&jme->phy_lock, flags);
2033
2034         return 0;
2035 }
2036
2037 static void
2038 jme_get_wol(struct net_device *netdev,
2039                 struct ethtool_wolinfo *wol)
2040 {
2041         struct jme_adapter *jme = netdev_priv(netdev);
2042
2043         wol->supported = WAKE_MAGIC | WAKE_PHY;
2044
2045         wol->wolopts = 0;
2046
2047         if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2048                 wol->wolopts |= WAKE_PHY;
2049
2050         if(jme->reg_pmcs & PMCS_MFEN)
2051                 wol->wolopts |= WAKE_MAGIC;
2052
2053 }
2054
2055 static int
2056 jme_set_wol(struct net_device *netdev,
2057                 struct ethtool_wolinfo *wol)
2058 {
2059         struct jme_adapter *jme = netdev_priv(netdev);
2060
2061         if(wol->wolopts & (WAKE_MAGICSECURE |
2062                                 WAKE_UCAST |
2063                                 WAKE_MCAST |
2064                                 WAKE_BCAST |
2065                                 WAKE_ARP))
2066                 return -EOPNOTSUPP;
2067
2068         jme->reg_pmcs = 0;
2069
2070         if(wol->wolopts & WAKE_PHY)
2071                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2072
2073         if(wol->wolopts & WAKE_MAGIC)
2074                 jme->reg_pmcs |= PMCS_MFEN;
2075
2076
2077         return 0;
2078 }
2079
2080 static int
2081 jme_get_settings(struct net_device *netdev,
2082                      struct ethtool_cmd *ecmd)
2083 {
2084         struct jme_adapter *jme = netdev_priv(netdev);
2085         int rc;
2086         unsigned long flags;
2087
2088         spin_lock_irqsave(&jme->phy_lock, flags);
2089         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2090         spin_unlock_irqrestore(&jme->phy_lock, flags);
2091         return rc;
2092 }
2093
2094 static int
2095 jme_set_settings(struct net_device *netdev,
2096                      struct ethtool_cmd *ecmd)
2097 {
2098         struct jme_adapter *jme = netdev_priv(netdev);
2099         int rc, fdc=0;
2100         unsigned long flags;
2101
2102         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2103                 return -EINVAL;
2104
2105         if(jme->mii_if.force_media &&
2106         ecmd->autoneg != AUTONEG_ENABLE &&
2107         (jme->mii_if.full_duplex != ecmd->duplex))
2108                 fdc = 1;
2109
2110         spin_lock_irqsave(&jme->phy_lock, flags);
2111         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2112         spin_unlock_irqrestore(&jme->phy_lock, flags);
2113
2114         if(!rc && fdc)
2115                 jme_reset_link(jme);
2116
2117         if(!rc) {
2118                 jme->flags |= JME_FLAG_SSET;
2119                 jme->old_ecmd = *ecmd;
2120         }
2121
2122         return rc;
2123 }
2124
2125 static __u32
2126 jme_get_link(struct net_device *netdev)
2127 {
2128         struct jme_adapter *jme = netdev_priv(netdev);
2129         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2130 }
2131
2132 static u32
2133 jme_get_rx_csum(struct net_device *netdev)
2134 {
2135         struct jme_adapter *jme = netdev_priv(netdev);
2136
2137         return jme->reg_rxmcs & RXMCS_CHECKSUM;
2138 }
2139
2140 static int
2141 jme_set_rx_csum(struct net_device *netdev, u32 on)
2142 {
2143         struct jme_adapter *jme = netdev_priv(netdev);
2144         unsigned long flags;
2145
2146         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2147         if(on)
2148                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2149         else
2150                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2151         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2152         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2153
2154         return 0;
2155 }
2156
2157 static int
2158 jme_set_tx_csum(struct net_device *netdev, u32 on)
2159 {
2160         struct jme_adapter *jme = netdev_priv(netdev);
2161
2162         if(on) {
2163                 jme->flags |= JME_FLAG_TXCSUM;
2164                 if(netdev->mtu <= 1900)
2165                         netdev->features |= NETIF_F_HW_CSUM;
2166         }
2167         else {
2168                 jme->flags &= ~JME_FLAG_TXCSUM;
2169                 netdev->features &= ~NETIF_F_HW_CSUM;
2170         }
2171
2172         return 0;
2173 }
2174
2175 static int
2176 jme_set_tso(struct net_device *netdev, u32 on)
2177 {
2178         struct jme_adapter *jme = netdev_priv(netdev);
2179
2180         if (on) {
2181                 jme->flags |= JME_FLAG_TSO;
2182                 if(netdev->mtu <= 1900)
2183                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2184         }
2185         else {
2186                 jme->flags &= ~JME_FLAG_TSO;
2187                 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2188         }
2189
2190         return 0;
2191 }
2192
2193 static int
2194 jme_nway_reset(struct net_device *netdev)
2195 {
2196         struct jme_adapter *jme = netdev_priv(netdev);
2197         jme_restart_an(jme);
2198         return 0;
2199 }
2200
2201 static const struct ethtool_ops jme_ethtool_ops = {
2202         .get_drvinfo            = jme_get_drvinfo,
2203         .get_regs_len           = jme_get_regs_len,
2204         .get_regs               = jme_get_regs,
2205         .get_coalesce           = jme_get_coalesce,
2206         .get_pauseparam         = jme_get_pauseparam,
2207         .set_pauseparam         = jme_set_pauseparam,
2208         .get_wol                = jme_get_wol,
2209         .set_wol                = jme_set_wol,
2210         .get_settings           = jme_get_settings,
2211         .set_settings           = jme_set_settings,
2212         .get_link               = jme_get_link,
2213         .get_rx_csum            = jme_get_rx_csum,
2214         .set_rx_csum            = jme_set_rx_csum,
2215         .set_tx_csum            = jme_set_tx_csum,
2216         .set_tso                = jme_set_tso,
2217         .set_sg                 = ethtool_op_set_sg,
2218         .nway_reset             = jme_nway_reset,
2219 };
2220
2221 static int
2222 jme_pci_dma64(struct pci_dev *pdev)
2223 {
2224         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2225                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2226                         dprintk("jme", "64Bit DMA Selected.\n");
2227                         return 1;
2228                 }
2229
2230         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2231                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2232                         dprintk("jme", "40Bit DMA Selected.\n");
2233                         return 1;
2234                 }
2235
2236         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2237                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2238                         dprintk("jme", "32Bit DMA Selected.\n");
2239                         return 0;
2240                 }
2241
2242         return -1;
2243 }
2244
2245 __always_inline static void
2246 jme_set_phy_ps(struct jme_adapter *jme)
2247 {
2248         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
2249 }
2250
2251 static int __devinit
2252 jme_init_one(struct pci_dev *pdev,
2253              const struct pci_device_id *ent)
2254 {
2255         int rc = 0, using_dac;
2256         struct net_device *netdev;
2257         struct jme_adapter *jme;
2258
2259         /*
2260          * set up PCI device basics
2261          */
2262         rc = pci_enable_device(pdev);
2263         if(rc) {
2264                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2265                 goto err_out;
2266         }
2267
2268         using_dac = jme_pci_dma64(pdev);
2269         if(using_dac < 0) {
2270                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2271                 rc = -EIO;
2272                 goto err_out_disable_pdev;
2273         }
2274
2275         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2276                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2277                 rc = -ENOMEM;
2278                 goto err_out_disable_pdev;
2279         }
2280
2281         rc = pci_request_regions(pdev, DRV_NAME);
2282         if(rc) {
2283                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2284                 goto err_out_disable_pdev;
2285         }
2286
2287         pci_set_master(pdev);
2288
2289         /*
2290          * alloc and init net device
2291          */
2292         netdev = alloc_etherdev(sizeof(*jme));
2293         if(!netdev) {
2294                 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
2295                 rc = -ENOMEM;
2296                 goto err_out_release_regions;
2297         }
2298         netdev->open                    = jme_open;
2299         netdev->stop                    = jme_close;
2300         netdev->hard_start_xmit         = jme_start_xmit;
2301         netdev->set_mac_address         = jme_set_macaddr;
2302         netdev->set_multicast_list      = jme_set_multi;
2303         netdev->change_mtu              = jme_change_mtu;
2304         netdev->ethtool_ops             = &jme_ethtool_ops;
2305         netdev->tx_timeout              = jme_tx_timeout;
2306         netdev->watchdog_timeo          = TX_TIMEOUT;
2307         netdev->vlan_rx_register        = jme_vlan_rx_register;
2308         NETDEV_GET_STATS(netdev, &jme_get_stats);
2309         netdev->features                =       NETIF_F_HW_CSUM |
2310                                                 NETIF_F_SG |
2311                                                 NETIF_F_TSO |
2312                                                 NETIF_F_TSO6 |
2313                                                 NETIF_F_HW_VLAN_TX |
2314                                                 NETIF_F_HW_VLAN_RX;
2315         if(using_dac)
2316                 netdev->features        |=      NETIF_F_HIGHDMA;
2317
2318         SET_NETDEV_DEV(netdev, &pdev->dev);
2319         pci_set_drvdata(pdev, netdev);
2320
2321         /*
2322          * init adapter info
2323          */
2324         jme = netdev_priv(netdev);
2325         jme->pdev = pdev;
2326         jme->dev = netdev;
2327         jme->old_mtu = netdev->mtu = 1500;
2328         jme->phylink = 0;
2329         jme->tx_ring_size = 1 << 10;
2330         jme->tx_ring_mask = jme->tx_ring_size - 1;
2331         jme->tx_wake_threshold = 1 << 9;
2332         jme->rx_ring_size = 1 << 9;
2333         jme->rx_ring_mask = jme->rx_ring_size - 1;
2334         jme->regs = ioremap(pci_resource_start(pdev, 0),
2335                              pci_resource_len(pdev, 0));
2336         if (!(jme->regs)) {
2337                 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
2338                 rc = -ENOMEM;
2339                 goto err_out_free_netdev;
2340         }
2341         jme->shadow_regs = pci_alloc_consistent(pdev,
2342                                                 sizeof(__u32) * SHADOW_REG_NR,
2343                                                 &(jme->shadow_dma));
2344         if (!(jme->shadow_regs)) {
2345                 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
2346                 rc = -ENOMEM;
2347                 goto err_out_unmap;
2348         }
2349
2350         spin_lock_init(&jme->phy_lock);
2351         spin_lock_init(&jme->macaddr_lock);
2352         spin_lock_init(&jme->rxmcs_lock);
2353
2354         atomic_set(&jme->link_changing, 1);
2355         atomic_set(&jme->rx_cleaning, 1);
2356         atomic_set(&jme->tx_cleaning, 1);
2357
2358         tasklet_init(&jme->pcc_task,
2359                      &jme_pcc_tasklet,
2360                      (unsigned long) jme);
2361         tasklet_init(&jme->linkch_task,
2362                      &jme_link_change_tasklet,
2363                      (unsigned long) jme);
2364         tasklet_init(&jme->txclean_task,
2365                      &jme_tx_clean_tasklet,
2366                      (unsigned long) jme);
2367         tasklet_init(&jme->rxclean_task,
2368                      &jme_rx_clean_tasklet,
2369                      (unsigned long) jme);
2370         tasklet_init(&jme->rxempty_task,
2371                      &jme_rx_empty_tasklet,
2372                      (unsigned long) jme);
2373         jme->mii_if.dev = netdev;
2374         jme->mii_if.phy_id = 1;
2375         jme->mii_if.supports_gmii = 1;
2376         jme->mii_if.mdio_read = jme_mdio_read;
2377         jme->mii_if.mdio_write = jme_mdio_write;
2378
2379         jme->dpi.cur = PCC_P1;
2380
2381         jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2382         jme->reg_rxcs = RXCS_DEFAULT;
2383         jme->reg_rxmcs = RXMCS_DEFAULT;
2384         jme->reg_txpfc = 0;
2385         jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
2386         jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
2387         /*
2388          * Get Max Read Req Size from PCI Config Space
2389          */
2390         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2391         switch(jme->mrrs) {
2392                 case MRRS_128B:
2393                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2394                         break;
2395                 case MRRS_256B:
2396                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2397                         break;
2398                 default:
2399                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2400                         break;
2401         };
2402
2403
2404         /*
2405          * Reset MAC processor and reload EEPROM for MAC Address
2406          */
2407         jme_clear_pm(jme);
2408         jme_set_phy_ps(jme);
2409         jme_phy_off(jme);
2410         jme_reset_mac_processor(jme);
2411         rc = jme_reload_eeprom(jme);
2412         if(rc) {
2413                 printk(KERN_ERR PFX
2414                         "Reload eeprom for reading MAC Address error.\n");
2415                 goto err_out_free_shadow;
2416         }
2417         jme_load_macaddr(netdev);
2418
2419
2420         /*
2421          * Tell stack that we are not ready to work until open()
2422          */
2423         netif_carrier_off(netdev);
2424         netif_stop_queue(netdev);
2425
2426         /*
2427          * Register netdev
2428          */
2429         rc = register_netdev(netdev);
2430         if(rc) {
2431                 printk(KERN_ERR PFX "Cannot register net device.\n");
2432                 goto err_out_free_shadow;
2433         }
2434
2435         jprintk(netdev->name,
2436                 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2437                 netdev->dev_addr[0],
2438                 netdev->dev_addr[1],
2439                 netdev->dev_addr[2],
2440                 netdev->dev_addr[3],
2441                 netdev->dev_addr[4],
2442                 netdev->dev_addr[5]);
2443
2444         return 0;
2445
2446 err_out_free_shadow:
2447         pci_free_consistent(pdev,
2448                             sizeof(__u32) * SHADOW_REG_NR,
2449                             jme->shadow_regs,
2450                             jme->shadow_dma);
2451 err_out_unmap:
2452         iounmap(jme->regs);
2453 err_out_free_netdev:
2454         pci_set_drvdata(pdev, NULL);
2455         free_netdev(netdev);
2456 err_out_release_regions:
2457         pci_release_regions(pdev);
2458 err_out_disable_pdev:
2459         pci_disable_device(pdev);
2460 err_out:
2461         return rc;
2462 }
2463
2464 static void __devexit
2465 jme_remove_one(struct pci_dev *pdev)
2466 {
2467         struct net_device *netdev = pci_get_drvdata(pdev);
2468         struct jme_adapter *jme = netdev_priv(netdev);
2469
2470         unregister_netdev(netdev);
2471         pci_free_consistent(pdev,
2472                             sizeof(__u32) * SHADOW_REG_NR,
2473                             jme->shadow_regs,
2474                             jme->shadow_dma);
2475         iounmap(jme->regs);
2476         pci_set_drvdata(pdev, NULL);
2477         free_netdev(netdev);
2478         pci_release_regions(pdev);
2479         pci_disable_device(pdev);
2480
2481 }
2482
2483 static int
2484 jme_suspend(struct pci_dev *pdev, pm_message_t state)
2485 {
2486         struct net_device *netdev = pci_get_drvdata(pdev);
2487         struct jme_adapter *jme = netdev_priv(netdev);
2488         int timeout = 100;
2489
2490         atomic_dec(&jme->link_changing);
2491
2492         netif_device_detach(netdev);
2493         netif_stop_queue(netdev);
2494         jme_stop_irq(jme);
2495         jme_free_irq(jme);
2496
2497         while(--timeout > 0 &&
2498         (
2499                 atomic_read(&jme->rx_cleaning) != 1 ||
2500                 atomic_read(&jme->tx_cleaning) != 1
2501         )) {
2502                 mdelay(1);
2503         }
2504         if(!timeout) {
2505                 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2506                 return -EBUSY;
2507         }
2508         jme_disable_shadow(jme);
2509
2510         if(netif_carrier_ok(netdev)) {
2511                 jme_stop_pcc_timer(jme);
2512                 jme_reset_mac_processor(jme);
2513                 jme_free_rx_resources(jme);
2514                 jme_free_tx_resources(jme);
2515                 netif_carrier_off(netdev);
2516                 jme->phylink = 0;
2517         }
2518
2519
2520         pci_save_state(pdev);
2521         if(jme->reg_pmcs) {
2522                 jme_set_100m_half(jme);
2523                 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2524                 pci_enable_wake(pdev, PCI_D3hot, true);
2525                 pci_enable_wake(pdev, PCI_D3cold, true);
2526         }
2527         else {
2528                 jme_phy_off(jme);
2529                 pci_enable_wake(pdev, PCI_D3hot, false);
2530                 pci_enable_wake(pdev, PCI_D3cold, false);
2531         }
2532         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2533
2534         return 0;
2535 }
2536
2537 static int
2538 jme_resume(struct pci_dev *pdev)
2539 {
2540         struct net_device *netdev = pci_get_drvdata(pdev);
2541         struct jme_adapter *jme = netdev_priv(netdev);
2542
2543         jme_clear_pm(jme);
2544         pci_restore_state(pdev);
2545
2546         if(jme->flags & JME_FLAG_SSET)
2547                 jme_set_settings(netdev, &jme->old_ecmd);
2548         else
2549                 jme_reset_phy_processor(jme);
2550
2551         jme_reset_mac_processor(jme);
2552         jme_enable_shadow(jme);
2553         jme_request_irq(jme);
2554         jme_start_irq(jme);
2555         netif_device_attach(netdev);
2556
2557         atomic_inc(&jme->link_changing);
2558
2559         jme_reset_link(jme);
2560
2561         return 0;
2562 }
2563
2564 static struct pci_device_id jme_pci_tbl[] = {
2565         { PCI_VDEVICE(JMICRON, 0x250) },
2566         { }
2567 };
2568
2569 static struct pci_driver jme_driver = {
2570         .name           = DRV_NAME,
2571         .id_table       = jme_pci_tbl,
2572         .probe          = jme_init_one,
2573         .remove         = __devexit_p(jme_remove_one),
2574 #ifdef CONFIG_PM
2575         .suspend        = jme_suspend,
2576         .resume         = jme_resume,
2577 #endif /* CONFIG_PM */
2578 };
2579
2580 static int __init
2581 jme_init_module(void)
2582 {
2583         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2584                "driver version %s\n", DRV_VERSION);
2585         return pci_register_driver(&jme_driver);
2586 }
2587
2588 static void __exit
2589 jme_cleanup_module(void)
2590 {
2591         pci_unregister_driver(&jme_driver);
2592 }
2593
2594 module_init(jme_init_module);
2595 module_exit(jme_cleanup_module);
2596
2597 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2598 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2599 MODULE_LICENSE("GPL");
2600 MODULE_VERSION(DRV_VERSION);
2601 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2602