]> bbs.cooldavid.org Git - jme.git/blob - jme.c
cc1d4191822e133eb74ac2b67afae12e0cf8c37c
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * Note:
26  *      Backdoor for changing "FIFO Threshold for processing next packet"
27  *         Using:
28  *              ethtool -C eth1 adaptive-rx on adaptive-tx on \
29  *              rx-usecs 250 rx-frames-low N
30  *         N := 16 | 32 | 64 | 128
31  */
32
33 /*
34  * Timeline before release:
35  *      Stage 5: Advanced offloading support.
36  *      0.8:
37  *      -  Implement VLAN offloading.
38  *      0.9:
39  *      -  Implement scatter-gather offloading.
40  *         Use pci_map_page on scattered sk_buff for HIGHMEM support
41  *      -  Implement TCP Segement offloading.
42  *              Due to TX FIFO size, we should turn off tso when mtu > 1500.
43  *
44  *      Stage 6: CPU Load balancing.
45  *      1.0:
46  *      -  Implement MSI-X.
47  *         Along with multiple RX queue, for CPU load balancing.
48  *
49  *      Stage 7:
50  *      -  Cleanup/re-orginize code, performence tuneing(alignment etc...).
51  *      -  Test and Release 1.0
52  *
53  *      Non-Critical:
54  *      -  Use NAPI instead of rx_tasklet?
55  *              PCC Support Both Packet Counter and Timeout Interrupt for
56  *              receive and transmit complete, does NAPI really needed?
57  *      -  Decode register dump for ethtool.
58  */
59
60 #include <linux/version.h>
61 #include <linux/module.h>
62 #include <linux/kernel.h>
63 #include <linux/pci.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/ethtool.h>
67 #include <linux/mii.h>
68 #include <linux/crc32.h>
69 #include <linux/delay.h>
70 #include <linux/spinlock.h>
71 #include <linux/in.h>
72 #include <linux/ip.h>
73 #include <linux/ipv6.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include "jme.h"
77
78 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
79 static struct net_device_stats *
80 jme_get_stats(struct net_device *netdev)
81 {
82         struct jme_adapter *jme = netdev_priv(netdev);
83         return &jme->stats;
84 }
85 #endif
86
87 static int
88 jme_mdio_read(struct net_device *netdev, int phy, int reg)
89 {
90         struct jme_adapter *jme = netdev_priv(netdev);
91         int i, val;
92
93         jwrite32(jme, JME_SMI, SMI_OP_REQ |
94                                 smi_phy_addr(phy) |
95                                 smi_reg_addr(reg));
96
97         wmb();
98         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
99                 udelay(1);
100                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
101                         break;
102         }
103
104         if (i == 0) {
105                 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
106                 return 0;
107         }
108
109         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
110 }
111
112 static void
113 jme_mdio_write(struct net_device *netdev,
114                                 int phy, int reg, int val)
115 {
116         struct jme_adapter *jme = netdev_priv(netdev);
117         int i;
118
119         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
120                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
121                 smi_phy_addr(phy) | smi_reg_addr(reg));
122
123         wmb();
124         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
125                 udelay(1);
126                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
127                         break;
128         }
129
130         if (i == 0)
131                 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
132
133         return;
134 }
135
136 __always_inline static void
137 jme_reset_phy_processor(struct jme_adapter *jme)
138 {
139         __u32 val;
140
141         jme_mdio_write(jme->dev,
142                         jme->mii_if.phy_id,
143                         MII_ADVERTISE, ADVERTISE_ALL |
144                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
145
146         jme_mdio_write(jme->dev,
147                         jme->mii_if.phy_id,
148                         MII_CTRL1000,
149                         ADVERTISE_1000FULL | ADVERTISE_1000HALF);
150
151         val = jme_mdio_read(jme->dev,
152                                 jme->mii_if.phy_id,
153                                 MII_BMCR);
154
155         jme_mdio_write(jme->dev,
156                         jme->mii_if.phy_id,
157                         MII_BMCR, val | BMCR_RESET);
158
159         return;
160 }
161
162
163 __always_inline static void
164 jme_reset_mac_processor(struct jme_adapter *jme)
165 {
166         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
167         udelay(2);
168         jwrite32(jme, JME_GHC, jme->reg_ghc);
169         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
170         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
171         jwrite32(jme, JME_WFODP, 0);
172         jwrite32(jme, JME_WFOI, 0);
173         jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
174         jwrite32(jme, JME_GPREG1, 0);
175 }
176
177 __always_inline static void
178 jme_clear_pm(struct jme_adapter *jme)
179 {
180         jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
181         pci_set_power_state(jme->pdev, PCI_D0);
182         pci_enable_wake(jme->pdev, PCI_D0, 0);
183 }
184
185 static int
186 jme_reload_eeprom(struct jme_adapter *jme)
187 {
188         __u32 val;
189         int i;
190
191         val = jread32(jme, JME_SMBCSR);
192
193         if(val & SMBCSR_EEPROMD)
194         {
195                 val |= SMBCSR_CNACK;
196                 jwrite32(jme, JME_SMBCSR, val);
197                 val |= SMBCSR_RELOAD;
198                 jwrite32(jme, JME_SMBCSR, val);
199                 mdelay(12);
200
201                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
202                 {
203                         mdelay(1);
204                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
205                                 break;
206                 }
207
208                 if(i == 0) {
209                         jeprintk(jme->dev->name, "eeprom reload timeout\n");
210                         return -EIO;
211                 }
212         }
213         else
214                 return -EIO;
215
216         return 0;
217 }
218
219 static void
220 jme_load_macaddr(struct net_device *netdev)
221 {
222         struct jme_adapter *jme = netdev_priv(netdev);
223         unsigned char macaddr[6];
224         __u32 val;
225
226         spin_lock(&jme->macaddr_lock);
227         val = jread32(jme, JME_RXUMA_LO);
228         macaddr[0] = (val >>  0) & 0xFF;
229         macaddr[1] = (val >>  8) & 0xFF;
230         macaddr[2] = (val >> 16) & 0xFF;
231         macaddr[3] = (val >> 24) & 0xFF;
232         val = jread32(jme, JME_RXUMA_HI);
233         macaddr[4] = (val >>  0) & 0xFF;
234         macaddr[5] = (val >>  8) & 0xFF;
235         memcpy(netdev->dev_addr, macaddr, 6);
236         spin_unlock(&jme->macaddr_lock);
237 }
238
239 __always_inline static void
240 jme_set_rx_pcc(struct jme_adapter *jme, int p)
241 {
242         switch(p) {
243         case PCC_P1:
244                 jwrite32(jme, JME_PCCRX0,
245                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
246                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
247                 break;
248         case PCC_P2:
249                 jwrite32(jme, JME_PCCRX0,
250                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
251                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
252                 break;
253         case PCC_P3:
254                 jwrite32(jme, JME_PCCRX0,
255                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
256                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
257                 break;
258         default:
259                 break;
260         }
261
262         dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
263 }
264
265 static void
266 jme_start_irq(struct jme_adapter *jme)
267 {
268         register struct dynpcc_info *dpi = &(jme->dpi);
269
270         jme_set_rx_pcc(jme, PCC_P1);
271         dpi->cur                = PCC_P1;
272         dpi->attempt            = PCC_P1;
273         dpi->cnt                = 0;
274
275         jwrite32(jme, JME_PCCTX,
276                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
277                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
278                         PCCTXQ0_EN
279                 );
280
281         /*
282          * Enable Interrupts
283          */
284         jwrite32(jme, JME_IENS, INTR_ENABLE);
285 }
286
287 __always_inline static void
288 jme_stop_irq(struct jme_adapter *jme)
289 {
290         /*
291          * Disable Interrupts
292          */
293         jwrite32(jme, JME_IENC, INTR_ENABLE);
294 }
295
296
297 __always_inline static void
298 jme_enable_shadow(struct jme_adapter *jme)
299 {
300         jwrite32(jme,
301                  JME_SHBA_LO,
302                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
303 }
304
305 __always_inline static void
306 jme_disable_shadow(struct jme_adapter *jme)
307 {
308         jwrite32(jme, JME_SHBA_LO, 0x0);
309 }
310
311 static int
312 jme_check_link(struct net_device *netdev, int testonly)
313 {
314         struct jme_adapter *jme = netdev_priv(netdev);
315         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
316         char linkmsg[64];
317         int rc = 0;
318
319         phylink = jread32(jme, JME_PHY_LINK);
320
321         if (phylink & PHY_LINK_UP) {
322                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
323                         /*
324                          * If we did not enable AN
325                          * Speed/Duplex Info should be obtained from SMI
326                          */
327                         phylink = PHY_LINK_UP;
328
329                         bmcr = jme_mdio_read(jme->dev,
330                                                 jme->mii_if.phy_id,
331                                                 MII_BMCR);
332
333
334                         phylink |= ((bmcr & BMCR_SPEED1000) &&
335                                         (bmcr & BMCR_SPEED100) == 0) ?
336                                         PHY_LINK_SPEED_1000M :
337                                         (bmcr & BMCR_SPEED100) ?
338                                         PHY_LINK_SPEED_100M :
339                                         PHY_LINK_SPEED_10M;
340
341                         phylink |= (bmcr & BMCR_FULLDPLX) ?
342                                          PHY_LINK_DUPLEX : 0;
343
344                         strcpy(linkmsg, "Forced: ");
345                 }
346                 else {
347                         /*
348                          * Keep polling for speed/duplex resolve complete
349                          */
350                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
351                                 --cnt) {
352
353                                 udelay(1);
354                                 phylink = jread32(jme, JME_PHY_LINK);
355
356                         }
357
358                         if(!cnt)
359                                 jeprintk(netdev->name,
360                                         "Waiting speed resolve timeout.\n");
361
362                         strcpy(linkmsg, "ANed: ");
363                 }
364
365                 if(jme->phylink == phylink) {
366                         rc = 1;
367                         goto out;
368                 }
369                 if(testonly)
370                         goto out;
371
372                 jme->phylink = phylink;
373
374                 switch(phylink & PHY_LINK_SPEED_MASK) {
375                         case PHY_LINK_SPEED_10M:
376                                 ghc = GHC_SPEED_10M;
377                                 strcpy(linkmsg, "10 Mbps, ");
378                                 break;
379                         case PHY_LINK_SPEED_100M:
380                                 ghc = GHC_SPEED_100M;
381                                 strcpy(linkmsg, "100 Mbps, ");
382                                 break;
383                         case PHY_LINK_SPEED_1000M:
384                                 ghc = GHC_SPEED_1000M;
385                                 strcpy(linkmsg, "1000 Mbps, ");
386                                 break;
387                         default:
388                                 ghc = 0;
389                                 break;
390                 }
391                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
392
393                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
394                                         "Full-Duplex, " :
395                                         "Half-Duplex, ");
396
397                 if(phylink & PHY_LINK_MDI_STAT)
398                         strcat(linkmsg, "MDI-X");
399                 else
400                         strcat(linkmsg, "MDI");
401
402                 if(phylink & PHY_LINK_DUPLEX)
403                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
404                 else {
405                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
406                                                 TXMCS_BACKOFF |
407                                                 TXMCS_CARRIERSENSE |
408                                                 TXMCS_COLLISION);
409                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
410                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
411                                 TXTRHD_TXREN |
412                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
413                 }
414
415                 jme->reg_ghc = ghc;
416                 jwrite32(jme, JME_GHC, ghc);
417
418                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
419                 netif_carrier_on(netdev);
420         }
421         else {
422                 if(testonly)
423                         goto out;
424
425                 jprintk(netdev->name, "Link is down.\n");
426                 jme->phylink = 0;
427                 netif_carrier_off(netdev);
428         }
429
430 out:
431         return rc;
432 }
433
434
435 static int
436 jme_alloc_txdesc(struct jme_adapter *jme,
437                         int nr_alloc)
438 {
439         struct jme_ring *txring = jme->txring;
440         int idx;
441
442         idx = txring->next_to_use;
443
444         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
445                 return -1;
446
447         atomic_sub(nr_alloc, &txring->nr_free);
448
449         if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
450                 txring->next_to_use -= RING_DESC_NR;
451
452         return idx;
453 }
454
455 static void
456 jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
457 {
458         if(skb->ip_summed == CHECKSUM_PARTIAL) {
459                 __u8 ip_proto;
460
461                 switch (skb->protocol) {
462                 case __constant_htons(ETH_P_IP):
463                         ip_proto = ip_hdr(skb)->protocol;
464                         break;
465                 case __constant_htons(ETH_P_IPV6):
466                         ip_proto = ipv6_hdr(skb)->nexthdr;
467                         break;
468                 default:
469                         ip_proto = 0;
470                         break;
471                 }
472
473
474                 switch(ip_proto) {
475                 case IPPROTO_TCP:
476                         *flags |= TXFLAG_TCPCS;
477                         break;
478                 case IPPROTO_UDP:
479                         *flags |= TXFLAG_UDPCS;
480                         break;
481                 default:
482                         jeprintk("jme", "Error upper layer protocol.\n");
483                         break;
484                 }
485         }
486 }
487
488 static int
489 jme_set_new_txdesc(struct jme_adapter *jme,
490                         struct sk_buff *skb)
491 {
492         struct jme_ring *txring = jme->txring;
493         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
494         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
495         dma_addr_t dmaaddr;
496         int i, idx, nr_desc;
497         __u8 flags;
498
499         nr_desc = 2;
500         idx = jme_alloc_txdesc(jme, nr_desc);
501
502         if(unlikely(idx<0))
503                 return NETDEV_TX_BUSY;
504
505         for(i = 1 ; i < nr_desc  ; ++i) {
506                 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
507                 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
508
509                 dmaaddr = pci_map_single(jme->pdev,
510                                          skb->data,
511                                          skb->len,
512                                          PCI_DMA_TODEVICE);
513
514                 pci_dma_sync_single_for_device(jme->pdev,
515                                                dmaaddr,
516                                                skb->len,
517                                                PCI_DMA_TODEVICE);
518
519                 ctxdesc->dw[0] = 0;
520                 ctxdesc->dw[1] = 0;
521                 ctxdesc->desc2.flags    = TXFLAG_OWN;
522                 if(jme->dev->features & NETIF_F_HIGHDMA)
523                         ctxdesc->desc2.flags |= TXFLAG_64BIT;
524                 ctxdesc->desc2.datalen  = cpu_to_le16(skb->len);
525                 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
526                 ctxdesc->desc2.bufaddrl = cpu_to_le32(
527                                                 (__u64)dmaaddr & 0xFFFFFFFFUL);
528
529                 ctxbi->mapping = dmaaddr;
530                 ctxbi->len = skb->len;
531         }
532
533         ctxdesc = txdesc + idx;
534         ctxbi = txbi + idx;
535
536         ctxdesc->dw[0] = 0;
537         ctxdesc->dw[1] = 0;
538         ctxdesc->dw[2] = 0;
539         ctxdesc->dw[3] = 0;
540         ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
541         /*
542          * Set OWN bit at final.
543          * When kernel transmit faster than NIC.
544          * And NIC trying to send this descriptor before we tell
545          * it to start sending this TX queue.
546          * Other fields are already filled correctly.
547          */
548         wmb();
549         flags = TXFLAG_OWN | TXFLAG_INT; 
550         jme_tx_csum(skb, jme->dev->mtu, &flags);
551         ctxdesc->desc1.flags = flags;
552         /*
553          * Set tx buffer info after telling NIC to send
554          * For better tx_clean timing
555          */
556         wmb();
557         ctxbi->nr_desc = nr_desc;
558         ctxbi->skb = skb;
559
560         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
561
562         return 0;
563 }
564
565
566 static int
567 jme_setup_tx_resources(struct jme_adapter *jme)
568 {
569         struct jme_ring *txring = &(jme->txring[0]);
570
571         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
572                                            TX_RING_ALLOC_SIZE,
573                                            &(txring->dmaalloc),
574                                            GFP_ATOMIC);
575
576         if(!txring->alloc) {
577                 txring->desc = NULL;
578                 txring->dmaalloc = 0;
579                 txring->dma = 0;
580                 return -ENOMEM;
581         }
582
583         /*
584          * 16 Bytes align
585          */
586         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
587                                                 RING_DESC_ALIGN);
588         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
589         txring->next_to_use     = 0;
590         txring->next_to_clean   = 0;
591         atomic_set(&txring->nr_free, RING_DESC_NR);
592
593         /*
594          * Initiallize Transmit Descriptors
595          */
596         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
597         memset(txring->bufinf, 0,
598                 sizeof(struct jme_buffer_info) * RING_DESC_NR);
599
600         return 0;
601 }
602
603 static void
604 jme_free_tx_resources(struct jme_adapter *jme)
605 {
606         int i;
607         struct jme_ring *txring = &(jme->txring[0]);
608         struct jme_buffer_info *txbi = txring->bufinf;
609
610         if(txring->alloc) {
611                 for(i = 0 ; i < RING_DESC_NR ; ++i) {
612                         txbi = txring->bufinf + i;
613                         if(txbi->skb) {
614                                 dev_kfree_skb(txbi->skb);
615                                 txbi->skb = NULL;
616                         }
617                         txbi->mapping   = 0;
618                         txbi->len       = 0;
619                         txbi->nr_desc   = 0;
620                 }
621
622                 dma_free_coherent(&(jme->pdev->dev),
623                                   TX_RING_ALLOC_SIZE,
624                                   txring->alloc,
625                                   txring->dmaalloc);
626
627                 txring->alloc           = NULL;
628                 txring->desc            = NULL;
629                 txring->dmaalloc        = 0;
630                 txring->dma             = 0;
631         }
632         txring->next_to_use     = 0;
633         txring->next_to_clean   = 0;
634         atomic_set(&txring->nr_free, 0);
635
636 }
637
638 __always_inline static void
639 jme_enable_tx_engine(struct jme_adapter *jme)
640 {
641         /*
642          * Select Queue 0
643          */
644         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
645
646         /*
647          * Setup TX Queue 0 DMA Bass Address
648          */
649         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
650         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
651         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
652
653         /*
654          * Setup TX Descptor Count
655          */
656         jwrite32(jme, JME_TXQDC, RING_DESC_NR);
657
658         /*
659          * Enable TX Engine
660          */
661         wmb();
662         jwrite32(jme, JME_TXCS, jme->reg_txcs |
663                                 TXCS_SELECT_QUEUE0 |
664                                 TXCS_ENABLE);
665
666 }
667
668 __always_inline static void
669 jme_restart_tx_engine(struct jme_adapter *jme)
670 {
671         /*
672          * Restart TX Engine
673          */
674         jwrite32(jme, JME_TXCS, jme->reg_txcs |
675                                 TXCS_SELECT_QUEUE0 |
676                                 TXCS_ENABLE);
677 }
678
679 __always_inline static void
680 jme_disable_tx_engine(struct jme_adapter *jme)
681 {
682         int i;
683         __u32 val;
684
685         /*
686          * Disable TX Engine
687          */
688         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
689
690         val = jread32(jme, JME_TXCS);
691         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
692         {
693                 mdelay(1);
694                 val = jread32(jme, JME_TXCS);
695         }
696
697         if(!i) {
698                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
699                 jme_reset_mac_processor(jme);
700         }
701
702
703 }
704
705 static void
706 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
707 {
708         struct jme_ring *rxring = jme->rxring;
709         register volatile struct rxdesc* rxdesc = rxring->desc;
710         struct jme_buffer_info *rxbi = rxring->bufinf;
711         rxdesc += i;
712         rxbi += i;
713
714         rxdesc->dw[0] = 0;
715         rxdesc->dw[1] = 0;
716         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
717         rxdesc->desc1.bufaddrl  = cpu_to_le32(
718                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
719         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
720         if(jme->dev->features & NETIF_F_HIGHDMA)
721                 rxdesc->desc1.flags = RXFLAG_64BIT;
722         wmb();
723         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
724 }
725
726 static int
727 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
728 {
729         struct jme_ring *rxring = &(jme->rxring[0]);
730         struct jme_buffer_info *rxbi = rxring->bufinf;
731         unsigned long offset;
732         struct sk_buff* skb;
733
734         skb = netdev_alloc_skb(jme->dev,
735                 jme->dev->mtu + RX_EXTRA_LEN);
736         if(unlikely(!skb))
737                 return -ENOMEM;
738
739         if(unlikely(skb_is_nonlinear(skb))) {
740                 dprintk(jme->dev->name,
741                         "Allocated skb fragged(%d).\n",
742                         skb_shinfo(skb)->nr_frags);
743                 dev_kfree_skb(skb);
744                 return -ENOMEM;
745         }
746
747         if(unlikely(offset =
748                         (unsigned long)(skb->data)
749                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
750                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
751
752         rxbi += i;
753         rxbi->skb = skb;
754         rxbi->len = skb_tailroom(skb);
755         rxbi->mapping = pci_map_single(jme->pdev,
756                                        skb->data,
757                                        rxbi->len,
758                                        PCI_DMA_FROMDEVICE);
759
760         return 0;
761 }
762
763 static void
764 jme_free_rx_buf(struct jme_adapter *jme, int i)
765 {
766         struct jme_ring *rxring = &(jme->rxring[0]);
767         struct jme_buffer_info *rxbi = rxring->bufinf;
768         rxbi += i;
769
770         if(rxbi->skb) {
771                 pci_unmap_single(jme->pdev,
772                                  rxbi->mapping,
773                                  rxbi->len,
774                                  PCI_DMA_FROMDEVICE);
775                 dev_kfree_skb(rxbi->skb);
776                 rxbi->skb = NULL;
777                 rxbi->mapping = 0;
778                 rxbi->len = 0;
779         }
780 }
781
782 static void
783 jme_free_rx_resources(struct jme_adapter *jme)
784 {
785         int i;
786         struct jme_ring *rxring = &(jme->rxring[0]);
787
788         if(rxring->alloc) {
789                 for(i = 0 ; i < RING_DESC_NR ; ++i)
790                         jme_free_rx_buf(jme, i);
791
792                 dma_free_coherent(&(jme->pdev->dev),
793                                   RX_RING_ALLOC_SIZE,
794                                   rxring->alloc,
795                                   rxring->dmaalloc);
796                 rxring->alloc    = NULL;
797                 rxring->desc     = NULL;
798                 rxring->dmaalloc = 0;
799                 rxring->dma      = 0;
800         }
801         rxring->next_to_use   = 0;
802         rxring->next_to_clean = 0;
803 }
804
805 static int
806 jme_setup_rx_resources(struct jme_adapter *jme)
807 {
808         int i;
809         struct jme_ring *rxring = &(jme->rxring[0]);
810
811         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
812                                            RX_RING_ALLOC_SIZE,
813                                            &(rxring->dmaalloc),
814                                            GFP_ATOMIC);
815         if(!rxring->alloc) {
816                 rxring->desc = NULL;
817                 rxring->dmaalloc = 0;
818                 rxring->dma = 0;
819                 return -ENOMEM;
820         }
821
822         /*
823          * 16 Bytes align
824          */
825         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
826                                                 RING_DESC_ALIGN);
827         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
828         rxring->next_to_use     = 0;
829         rxring->next_to_clean   = 0;
830
831         /*
832          * Initiallize Receive Descriptors
833          */
834         for(i = 0 ; i < RING_DESC_NR ; ++i) {
835                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
836                         jme_free_rx_resources(jme);
837                         return -ENOMEM;
838                 }
839
840                 jme_set_clean_rxdesc(jme, i);
841         }
842
843         return 0;
844 }
845
846 __always_inline static void
847 jme_enable_rx_engine(struct jme_adapter *jme)
848 {
849         /*
850          * Setup RX DMA Bass Address
851          */
852         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
853         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
854         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
855
856         /*
857          * Setup RX Descptor Count
858          */
859         jwrite32(jme, JME_RXQDC, RING_DESC_NR);
860
861         /*
862          * Setup Unicast Filter
863          */
864         jme_set_multi(jme->dev);
865
866         /*
867          * Enable RX Engine
868          */
869         wmb();
870         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
871                                 RXCS_QUEUESEL_Q0 |
872                                 RXCS_ENABLE |
873                                 RXCS_QST);
874 }
875
876 __always_inline static void
877 jme_restart_rx_engine(struct jme_adapter *jme)
878 {
879         /*
880          * Start RX Engine
881          */
882         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
883                                 RXCS_QUEUESEL_Q0 |
884                                 RXCS_ENABLE |
885                                 RXCS_QST);
886 }
887
888
889 __always_inline static void
890 jme_disable_rx_engine(struct jme_adapter *jme)
891 {
892         int i;
893         __u32 val;
894
895         /*
896          * Disable RX Engine
897          */
898         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
899
900         val = jread32(jme, JME_RXCS);
901         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
902         {
903                 mdelay(1);
904                 val = jread32(jme, JME_RXCS);
905         }
906
907         if(!i)
908                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
909
910 }
911
912 static void
913 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx, int summed)
914 {
915         struct jme_ring *rxring = &(jme->rxring[0]);
916         volatile struct rxdesc *rxdesc = rxring->desc;
917         struct jme_buffer_info *rxbi = rxring->bufinf;
918         struct sk_buff *skb;
919         int framesize;
920
921         rxdesc += idx;
922         rxbi += idx;
923
924         skb = rxbi->skb;
925         pci_dma_sync_single_for_cpu(jme->pdev,
926                                         rxbi->mapping,
927                                         rxbi->len,
928                                         PCI_DMA_FROMDEVICE);
929
930         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
931                 pci_dma_sync_single_for_device(jme->pdev,
932                                                 rxbi->mapping,
933                                                 rxbi->len,
934                                                 PCI_DMA_FROMDEVICE);
935
936                 ++(NET_STAT(jme).rx_dropped);
937         }
938         else {
939                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
940                                 - RX_PREPAD_SIZE;
941
942                 skb_reserve(skb, RX_PREPAD_SIZE);
943                 skb_put(skb, framesize);
944                 skb->protocol = eth_type_trans(skb, jme->dev);
945
946                 if(summed)
947                         skb->ip_summed = CHECKSUM_UNNECESSARY;
948                 else
949                         skb->ip_summed = CHECKSUM_NONE;
950
951                 netif_rx(skb);
952
953                 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
954                         ++(NET_STAT(jme).multicast);
955
956                 jme->dev->last_rx = jiffies;
957                 NET_STAT(jme).rx_bytes += framesize;
958                 ++(NET_STAT(jme).rx_packets);
959         }
960
961         jme_set_clean_rxdesc(jme, idx);
962
963 }
964
965 static int
966 jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
967 {
968         if(unlikely((flags & RXWBFLAG_TCPON) &&
969         !(flags & RXWBFLAG_TCPCS))) {
970                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
971                 return 1;
972         }
973         else if(unlikely((flags & RXWBFLAG_UDPON) &&
974         !(flags & RXWBFLAG_UDPCS))) {
975                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
976                 return 1;
977         }
978         else if(unlikely((flags & RXWBFLAG_IPV4) &&
979         !(flags & RXWBFLAG_IPCS))) {
980                 csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
981                 return 1;
982         }
983         else {
984                 return 0;
985         }
986 }
987
988 static int
989 jme_process_receive(struct jme_adapter *jme, int limit)
990 {
991         struct jme_ring *rxring = &(jme->rxring[0]);
992         volatile struct rxdesc *rxdesc = rxring->desc;
993         int i, j, ccnt, desccnt;
994
995         i = rxring->next_to_clean;
996         while( limit-- > 0 )
997         {
998                 rxdesc = rxring->desc;
999                 rxdesc += i;
1000
1001                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
1002                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1003                         goto out;
1004
1005                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1006
1007                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
1008
1009                 if(unlikely(desccnt > 1 ||
1010                 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
1011                 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
1012
1013                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
1014                                 ++(NET_STAT(jme).rx_crc_errors);
1015                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
1016                                 ++(NET_STAT(jme).rx_fifo_errors);
1017                         else
1018                                 ++(NET_STAT(jme).rx_errors);
1019
1020                         if(desccnt > 1) {
1021                                 rx_dbg(jme->dev->name,
1022                                         "RX: More than one(%d) descriptor, "
1023                                         "framelen=%d\n",
1024                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
1025                                 limit -= desccnt - 1;
1026                         }
1027
1028                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
1029                                 jme_set_clean_rxdesc(jme, j);
1030
1031                                 if(unlikely(++j == RING_DESC_NR))
1032                                         j = 0;
1033                         }
1034
1035                 }
1036                 else {
1037                         jme_alloc_and_feed_skb(jme, i,
1038                                 (rxdesc->descwb.flags &
1039                                         (RXWBFLAG_TCPON |
1040                                         RXWBFLAG_UDPON |
1041                                         RXWBFLAG_IPV4)));
1042                 }
1043
1044                 if((i += desccnt) >= RING_DESC_NR)
1045                         i -= RING_DESC_NR;
1046         }
1047
1048 out:
1049         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
1050         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
1051                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
1052                         >> 4);
1053
1054         rxring->next_to_clean = i;
1055
1056         return limit > 0 ? limit : 0;
1057
1058 }
1059
1060 static void
1061 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1062 {
1063         if(likely(atmp == dpi->cur))
1064                 return;
1065
1066         if(dpi->attempt == atmp) {
1067                 ++(dpi->cnt);
1068         }
1069         else {
1070                 dpi->attempt = atmp;
1071                 dpi->cnt = 0;
1072         }
1073
1074 }
1075
1076 static void
1077 jme_dynamic_pcc(struct jme_adapter *jme)
1078 {
1079         register struct dynpcc_info *dpi = &(jme->dpi);
1080
1081         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1082                 jme_attempt_pcc(dpi, PCC_P3);
1083         else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
1084         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1085                 jme_attempt_pcc(dpi, PCC_P2);
1086         else
1087                 jme_attempt_pcc(dpi, PCC_P1);
1088
1089         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
1090                 jme_set_rx_pcc(jme, dpi->attempt);
1091                 dpi->cur = dpi->attempt;
1092                 dpi->cnt = 0;
1093         }
1094 }
1095
1096 static void
1097 jme_start_pcc_timer(struct jme_adapter *jme)
1098 {
1099         struct dynpcc_info *dpi = &(jme->dpi);
1100         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1101         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1102         dpi->intr_cnt           = 0;
1103         jwrite32(jme, JME_TMCSR,
1104                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1105 }
1106
1107 static void
1108 jme_stop_pcc_timer(struct jme_adapter *jme)
1109 {
1110         jwrite32(jme, JME_TMCSR, 0);
1111 }
1112
1113 static void
1114 jme_pcc_tasklet(unsigned long arg)
1115 {
1116         struct jme_adapter *jme = (struct jme_adapter*)arg;
1117         struct net_device *netdev = jme->dev;
1118
1119
1120         if(unlikely(netif_queue_stopped(netdev) ||
1121                 (atomic_read(&jme->link_changing) != 1)
1122         )) {
1123                 jme_stop_pcc_timer(jme);
1124                 return;
1125         }
1126
1127         jme_dynamic_pcc(jme);
1128         jme_start_pcc_timer(jme);
1129 }
1130
1131 static void
1132 jme_link_change_tasklet(unsigned long arg)
1133 {
1134         struct jme_adapter *jme = (struct jme_adapter*)arg;
1135         struct net_device *netdev = jme->dev;
1136         int timeout = WAIT_TASKLET_TIMEOUT;
1137         int rc;
1138
1139         if(!atomic_dec_and_test(&jme->link_changing))
1140                 goto out;
1141
1142         if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1143                 goto out;
1144
1145         jme->old_mtu = netdev->mtu;
1146         netif_stop_queue(netdev);
1147
1148         while(--timeout > 0 &&
1149                 (
1150                 atomic_read(&jme->rx_cleaning) != 1 ||
1151                 atomic_read(&jme->tx_cleaning) != 1
1152                 )) {
1153
1154                 mdelay(1);
1155         }
1156
1157         if(netif_carrier_ok(netdev)) {
1158                 jme_stop_pcc_timer(jme);
1159                 jme_reset_mac_processor(jme);
1160                 jme_free_rx_resources(jme);
1161                 jme_free_tx_resources(jme);
1162         }
1163
1164         jme_check_link(netdev, 0);
1165         if(netif_carrier_ok(netdev)) {
1166                 rc = jme_setup_rx_resources(jme);
1167                 if(rc) {
1168                         jeprintk(netdev->name,
1169                                 "Allocating resources for RX error"
1170                                 ", Device STOPPED!\n");
1171                         goto out;
1172                 }
1173
1174
1175                 rc = jme_setup_tx_resources(jme);
1176                 if(rc) {
1177                         jeprintk(netdev->name,
1178                                 "Allocating resources for TX error"
1179                                 ", Device STOPPED!\n");
1180                         goto err_out_free_rx_resources;
1181                 }
1182
1183                 jme_enable_rx_engine(jme);
1184                 jme_enable_tx_engine(jme);
1185
1186                 netif_start_queue(netdev);
1187                 jme_start_pcc_timer(jme);
1188         }
1189
1190         goto out;
1191
1192 err_out_free_rx_resources:
1193         jme_free_rx_resources(jme);
1194 out:
1195         atomic_inc(&jme->link_changing);
1196 }
1197
1198 static void
1199 jme_rx_clean_tasklet(unsigned long arg)
1200 {
1201         struct jme_adapter *jme = (struct jme_adapter*)arg;
1202         struct dynpcc_info *dpi = &(jme->dpi);
1203
1204         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1205                 goto out;
1206         
1207         if(unlikely(atomic_read(&jme->link_changing) != 1))
1208                 goto out;
1209
1210         if(unlikely(netif_queue_stopped(jme->dev)))
1211                 goto out;
1212
1213         jme_process_receive(jme, RING_DESC_NR);
1214         ++(dpi->intr_cnt);
1215
1216 out:
1217         atomic_inc(&jme->rx_cleaning);
1218 }
1219
1220 static void
1221 jme_rx_empty_tasklet(unsigned long arg)
1222 {
1223         struct jme_adapter *jme = (struct jme_adapter*)arg;
1224
1225         if(unlikely(atomic_read(&jme->link_changing) != 1))
1226                 return;
1227
1228         if(unlikely(netif_queue_stopped(jme->dev)))
1229                 return;
1230
1231         queue_dbg(jme->dev->name, "RX Queue empty!\n");
1232
1233         jme_rx_clean_tasklet(arg);
1234         jme_restart_rx_engine(jme);
1235 }
1236
1237 static void
1238 jme_tx_clean_tasklet(unsigned long arg)
1239 {
1240         struct jme_adapter *jme = (struct jme_adapter*)arg;
1241         struct jme_ring *txring = &(jme->txring[0]);
1242         volatile struct txdesc *txdesc = txring->desc;
1243         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1244         int i, j, cnt = 0, max, err;
1245
1246         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1247                 goto out;
1248
1249         if(unlikely(atomic_read(&jme->link_changing) != 1))
1250                 goto out;
1251
1252         if(unlikely(netif_queue_stopped(jme->dev)))
1253                 goto out;
1254
1255         max = RING_DESC_NR - atomic_read(&txring->nr_free);
1256
1257         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1258
1259         for(i = txring->next_to_clean ; cnt < max ; ) {
1260
1261                 ctxbi = txbi + i;
1262
1263                 if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
1264
1265                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1266
1267                         tx_dbg(jme->dev->name,
1268                                 "Tx Tasklet: Clean %d+%d\n",
1269                                 i, ctxbi->nr_desc);
1270
1271                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1272                                 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1273                                 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1274
1275                                 pci_unmap_single(jme->pdev,
1276                                                  ttxbi->mapping,
1277                                                  ttxbi->len,
1278                                                  PCI_DMA_TODEVICE);
1279
1280                                 if(likely(!err))
1281                                         NET_STAT(jme).tx_bytes += ttxbi->len;
1282
1283                                 ttxbi->mapping = 0;
1284                                 ttxbi->len = 0;
1285                         }
1286
1287                         dev_kfree_skb(ctxbi->skb);
1288                         ctxbi->skb = NULL;
1289
1290                         cnt += ctxbi->nr_desc;
1291
1292                         if(unlikely(err))
1293                                 ++(NET_STAT(jme).tx_carrier_errors);
1294                         else
1295                                 ++(NET_STAT(jme).tx_packets);
1296                 }
1297                 else {
1298                         if(!ctxbi->skb)
1299                                 tx_dbg(jme->dev->name,
1300                                         "Tx Tasklet:"
1301                                         " Stoped due to no skb.\n");
1302                         else
1303                                 tx_dbg(jme->dev->name,
1304                                         "Tx Tasklet:"
1305                                         "Stoped due to not done.\n");
1306                         break;
1307                 }
1308
1309                 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1310                         i -= RING_DESC_NR;
1311
1312                 ctxbi->nr_desc = 0;
1313         }
1314
1315         tx_dbg(jme->dev->name,
1316                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1317                 i, jiffies);
1318         txring->next_to_clean = i;
1319
1320         atomic_add(cnt, &txring->nr_free);
1321
1322 out:
1323         atomic_inc(&jme->tx_cleaning);
1324 }
1325
1326 static void
1327 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1328 {
1329         /*
1330          * Disable interrupt
1331          */
1332         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1333
1334         /*
1335          * Write 1 clear interrupt status
1336          */
1337         jwrite32f(jme, JME_IEVE, intrstat);
1338
1339         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1340                 tasklet_schedule(&jme->linkch_task);
1341                 goto out_reenable;
1342         }
1343
1344         if(intrstat & INTR_TMINTR)
1345                 tasklet_schedule(&jme->pcc_task);
1346
1347         if(intrstat & INTR_RX0EMP)
1348                 tasklet_schedule(&jme->rxempty_task);
1349
1350         if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1351                 tasklet_schedule(&jme->rxclean_task);
1352
1353         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1354                 tasklet_schedule(&jme->txclean_task);
1355
1356         if((intrstat & ~INTR_ENABLE) != 0) {
1357                 /*
1358                  * Some interrupt not handled
1359                  * but not enabled also (for debug)
1360                  */
1361         }
1362
1363 out_reenable:
1364         /*
1365          * Re-enable interrupt
1366          */
1367         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1368
1369
1370 }
1371
1372 static irqreturn_t
1373 jme_intr(int irq, void *dev_id)
1374 {
1375         struct net_device *netdev = dev_id;
1376         struct jme_adapter *jme = netdev_priv(netdev);
1377         __u32 intrstat;
1378
1379         intrstat = jread32(jme, JME_IEVE);
1380
1381         /*
1382          * Check if it's really an interrupt for us
1383          */
1384         if(unlikely(intrstat == 0))
1385                 return IRQ_NONE;
1386
1387         /*
1388          * Check if the device still exist
1389          */
1390         if(unlikely(intrstat == ~((typeof(intrstat))0)))
1391                 return IRQ_NONE;
1392
1393         jme_intr_msi(jme, intrstat);
1394
1395         return IRQ_HANDLED;
1396 }
1397
1398 static irqreturn_t
1399 jme_msi(int irq, void *dev_id)
1400 {
1401         struct net_device *netdev = dev_id;
1402         struct jme_adapter *jme = netdev_priv(netdev);
1403         __u32 intrstat;
1404
1405         pci_dma_sync_single_for_cpu(jme->pdev,
1406                                     jme->shadow_dma,
1407                                     sizeof(__u32) * SHADOW_REG_NR,
1408                                     PCI_DMA_FROMDEVICE);
1409         intrstat = jme->shadow_regs[SHADOW_IEVE];
1410         jme->shadow_regs[SHADOW_IEVE] = 0;
1411
1412         jme_intr_msi(jme, intrstat);
1413
1414         return IRQ_HANDLED;
1415 }
1416
1417
1418 static void
1419 jme_reset_link(struct jme_adapter *jme)
1420 {
1421         jme->phylink = 0;
1422         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1423 }
1424
1425 static void
1426 jme_restart_an(struct jme_adapter *jme)
1427 {
1428         __u32 bmcr;
1429         unsigned long flags;
1430
1431         spin_lock_irqsave(&jme->phy_lock, flags);
1432         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1433         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1434         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1435         spin_unlock_irqrestore(&jme->phy_lock, flags);
1436 }
1437
1438 static int
1439 jme_request_irq(struct jme_adapter *jme)
1440 {
1441         int rc;
1442         struct net_device *netdev = jme->dev;
1443         irq_handler_t handler = jme_intr;
1444         int irq_flags = IRQF_SHARED;
1445
1446         if (!pci_enable_msi(jme->pdev)) {
1447                 jme->flags |= JME_FLAG_MSI;
1448                 handler = jme_msi;
1449                 irq_flags = 0;
1450         }
1451
1452         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1453                           netdev);
1454         if(rc) {
1455                 jeprintk(netdev->name,
1456                         "Unable to allocate %s interrupt (return: %d)\n",
1457                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1458
1459                 if(jme->flags & JME_FLAG_MSI) {
1460                         pci_disable_msi(jme->pdev);
1461                         jme->flags &= ~JME_FLAG_MSI;
1462                 }
1463         }
1464         else {
1465                 netdev->irq = jme->pdev->irq;
1466         }
1467
1468         return rc;
1469 }
1470
1471 static void
1472 jme_free_irq(struct jme_adapter *jme)
1473 {
1474         free_irq(jme->pdev->irq, jme->dev);
1475         if (jme->flags & JME_FLAG_MSI) {
1476                 pci_disable_msi(jme->pdev);
1477                 jme->flags &= ~JME_FLAG_MSI;
1478                 jme->dev->irq = jme->pdev->irq;
1479         }
1480 }
1481
1482 static int
1483 jme_open(struct net_device *netdev)
1484 {
1485         struct jme_adapter *jme = netdev_priv(netdev);
1486         int rc, timeout = 100;
1487
1488         while(
1489                 --timeout > 0 &&
1490                 (
1491                 atomic_read(&jme->link_changing) != 1 ||
1492                 atomic_read(&jme->rx_cleaning) != 1 ||
1493                 atomic_read(&jme->tx_cleaning) != 1
1494                 )
1495         )
1496                 msleep(10);
1497
1498         if(!timeout) {
1499                 rc = -EBUSY;
1500                 goto err_out;
1501         }
1502
1503         jme_reset_mac_processor(jme);
1504
1505         rc = jme_request_irq(jme);
1506         if(rc)
1507                 goto err_out;
1508
1509         jme_enable_shadow(jme);
1510         jme_start_irq(jme);
1511         jme_reset_link(jme);
1512
1513         return 0;
1514
1515 err_out:
1516         netif_stop_queue(netdev);
1517         netif_carrier_off(netdev);
1518         return rc;
1519 }
1520
1521 static int
1522 jme_close(struct net_device *netdev)
1523 {
1524         struct jme_adapter *jme = netdev_priv(netdev);
1525
1526         netif_stop_queue(netdev);
1527         netif_carrier_off(netdev);
1528
1529         jme_stop_irq(jme);
1530         jme_disable_shadow(jme);
1531         jme_free_irq(jme);
1532
1533         tasklet_kill(&jme->linkch_task);
1534         tasklet_kill(&jme->txclean_task);
1535         tasklet_kill(&jme->rxclean_task);
1536         tasklet_kill(&jme->rxempty_task);
1537
1538         jme_reset_mac_processor(jme);
1539         jme_free_rx_resources(jme);
1540         jme_free_tx_resources(jme);
1541
1542         return 0;
1543 }
1544
1545 /*
1546  * This function is already protected by netif_tx_lock()
1547  */
1548 static int
1549 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1550 {
1551         struct jme_adapter *jme = netdev_priv(netdev);
1552         int rc;
1553
1554         if(unlikely(netif_queue_stopped(jme->dev)))
1555                 return NETDEV_TX_BUSY;
1556
1557 #if 0
1558 /*Testing*/
1559         ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n", 
1560                 skb_shinfo(skb)->nr_frags,
1561                 skb_headlen(skb),
1562                 skb->len,
1563                 skb->ip_summed);
1564 /*********/
1565 #endif
1566
1567         rc = jme_set_new_txdesc(jme, skb);
1568
1569         if(unlikely(rc != NETDEV_TX_OK))
1570                 return rc;
1571
1572         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1573                                 TXCS_SELECT_QUEUE0 |
1574                                 TXCS_QUEUE0S |
1575                                 TXCS_ENABLE);
1576         netdev->trans_start = jiffies;
1577
1578         return NETDEV_TX_OK;
1579 }
1580
1581 static int
1582 jme_set_macaddr(struct net_device *netdev, void *p)
1583 {
1584         struct jme_adapter *jme = netdev_priv(netdev);
1585         struct sockaddr *addr = p;
1586         __u32 val;
1587
1588         if(netif_running(netdev))
1589                 return -EBUSY;
1590
1591         spin_lock(&jme->macaddr_lock);
1592         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1593
1594         val = addr->sa_data[3] << 24 |
1595               addr->sa_data[2] << 16 |
1596               addr->sa_data[1] <<  8 |
1597               addr->sa_data[0];
1598         jwrite32(jme, JME_RXUMA_LO, val);
1599         val = addr->sa_data[5] << 8 |
1600               addr->sa_data[4];
1601         jwrite32(jme, JME_RXUMA_HI, val);
1602         spin_unlock(&jme->macaddr_lock);
1603
1604         return 0;
1605 }
1606
1607 static void
1608 jme_set_multi(struct net_device *netdev)
1609 {
1610         struct jme_adapter *jme = netdev_priv(netdev);
1611         u32 mc_hash[2] = {};
1612         int i;
1613         unsigned long flags;
1614
1615         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1616
1617         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1618
1619         if (netdev->flags & IFF_PROMISC) {
1620                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1621         }
1622         else if (netdev->flags & IFF_ALLMULTI) {
1623                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1624         }
1625         else if(netdev->flags & IFF_MULTICAST) {
1626                 struct dev_mc_list *mclist;
1627                 int bit_nr;
1628
1629                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1630                 for (i = 0, mclist = netdev->mc_list;
1631                         mclist && i < netdev->mc_count;
1632                         ++i, mclist = mclist->next) {
1633
1634                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1635                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1636                 }
1637
1638                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1639                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1640         }
1641
1642         wmb();
1643         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1644
1645         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1646 }
1647
1648 static int
1649 jme_change_mtu(struct net_device *netdev, int new_mtu)
1650 {
1651         struct jme_adapter *jme = netdev_priv(netdev);
1652
1653         if(new_mtu == jme->old_mtu)
1654                 return 0;
1655
1656         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1657                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
1658                 return -EINVAL;
1659
1660         if(new_mtu > 4000) {
1661                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1662                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1663                 jme_restart_rx_engine(jme);
1664         }
1665         else {
1666                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1667                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1668                 jme_restart_rx_engine(jme);
1669         }
1670
1671         if(new_mtu > 1900) {
1672                 netdev->features &= ~NETIF_F_HW_CSUM;
1673         }
1674         else {
1675                 netdev->features |= NETIF_F_HW_CSUM;
1676         }
1677
1678         netdev->mtu = new_mtu;
1679         jme_reset_link(jme);
1680
1681         return 0;
1682 }
1683
1684 static void
1685 jme_tx_timeout(struct net_device *netdev)
1686 {
1687         struct jme_adapter *jme = netdev_priv(netdev);
1688
1689         /*
1690          * Reset the link
1691          * And the link change will reinitiallize all RX/TX resources
1692          */
1693         jme_reset_link(jme);
1694 }
1695
1696 static void
1697 jme_get_drvinfo(struct net_device *netdev,
1698                      struct ethtool_drvinfo *info)
1699 {
1700         struct jme_adapter *jme = netdev_priv(netdev);
1701
1702         strcpy(info->driver, DRV_NAME);
1703         strcpy(info->version, DRV_VERSION);
1704         strcpy(info->bus_info, pci_name(jme->pdev));
1705 }
1706
1707 static int
1708 jme_get_regs_len(struct net_device *netdev)
1709 {
1710         return 0x400;
1711 }
1712
1713 static void
1714 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1715 {
1716         int i;
1717
1718         for(i = 0 ; i < len ; i += 4)
1719                 p[i >> 2] = jread32(jme, reg + i);
1720
1721 }
1722
1723 static void
1724 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1725 {
1726         struct jme_adapter *jme = netdev_priv(netdev);
1727         __u32 *p32 = (__u32*)p;
1728
1729         memset(p, 0, 0x400);
1730
1731         regs->version = 1;
1732         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1733
1734         p32 += 0x100 >> 2;
1735         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1736
1737         p32 += 0x100 >> 2;
1738         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1739
1740         p32 += 0x100 >> 2;
1741         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1742
1743 }
1744
1745 static int
1746 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1747 {
1748         struct jme_adapter *jme = netdev_priv(netdev);
1749
1750         ecmd->use_adaptive_rx_coalesce = true;
1751         ecmd->tx_coalesce_usecs = PCC_TX_TO;
1752         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1753
1754         switch(jme->dpi.cur) {
1755         case PCC_P1:
1756                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1757                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1758                 break;
1759         case PCC_P2:
1760                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1761                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1762                 break;
1763         case PCC_P3:
1764                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1765                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1766                 break;
1767         default:
1768                 break;
1769         }
1770
1771         return 0;
1772 }
1773
1774 /*
1775  * It's not actually for coalesce.
1776  * It changes internell FIFO related setting for testing.
1777  */
1778 static int
1779 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1780 {
1781         struct jme_adapter *jme = netdev_priv(netdev);
1782
1783         if(ecmd->use_adaptive_rx_coalesce &&
1784         ecmd->use_adaptive_tx_coalesce &&
1785         ecmd->rx_coalesce_usecs == 250 &&
1786         (ecmd->rx_max_coalesced_frames_low == 16 ||
1787         ecmd->rx_max_coalesced_frames_low == 32 ||
1788         ecmd->rx_max_coalesced_frames_low == 64 ||
1789         ecmd->rx_max_coalesced_frames_low == 128)) {
1790                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1791                 switch(ecmd->rx_max_coalesced_frames_low) {
1792                 case 16:
1793                         jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
1794                         break;
1795                 case 32:
1796                         jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
1797                         break;
1798                 case 64:
1799                         jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1800                         break;
1801                 case 128:
1802                 default:
1803                         jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1804                 }
1805                 jme_restart_rx_engine(jme);
1806         }
1807         else {
1808                 return -EINVAL;
1809         }
1810
1811         return 0;
1812 }
1813
1814 static void
1815 jme_get_pauseparam(struct net_device *netdev,
1816                         struct ethtool_pauseparam *ecmd)
1817 {
1818         struct jme_adapter *jme = netdev_priv(netdev);
1819         unsigned long flags;
1820         __u32 val;
1821
1822         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1823         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1824
1825         spin_lock_irqsave(&jme->phy_lock, flags);
1826         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1827         spin_unlock_irqrestore(&jme->phy_lock, flags);
1828         ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1829 }
1830
1831 static int
1832 jme_set_pauseparam(struct net_device *netdev,
1833                         struct ethtool_pauseparam *ecmd)
1834 {
1835         struct jme_adapter *jme = netdev_priv(netdev);
1836         unsigned long flags;
1837         __u32 val;
1838
1839         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1840                 (ecmd->tx_pause != 0)) {
1841
1842                 if(ecmd->tx_pause)
1843                         jme->reg_txpfc |= TXPFC_PF_EN;
1844                 else
1845                         jme->reg_txpfc &= ~TXPFC_PF_EN;
1846
1847                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
1848         }
1849
1850         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1851         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
1852                 (ecmd->rx_pause != 0)) {
1853
1854                 if(ecmd->rx_pause)
1855                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
1856                 else
1857                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
1858
1859                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1860         }
1861         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1862
1863         spin_lock_irqsave(&jme->phy_lock, flags);
1864         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1865         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) != 
1866                 (ecmd->autoneg != 0)) {
1867
1868                 if(ecmd->autoneg)
1869                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1870                 else
1871                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1872
1873                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
1874         }
1875         spin_unlock_irqrestore(&jme->phy_lock, flags);
1876
1877         return 0;
1878 }
1879
1880 static void
1881 jme_get_wol(struct net_device *netdev,
1882                 struct ethtool_wolinfo *wol)
1883 {
1884         struct jme_adapter *jme = netdev_priv(netdev);
1885
1886         wol->supported = WAKE_MAGIC | WAKE_PHY;
1887
1888         wol->wolopts = 0;
1889
1890         if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1891                 wol->wolopts |= WAKE_PHY;
1892
1893         if(jme->reg_pmcs & PMCS_MFEN)
1894                 wol->wolopts |= WAKE_MAGIC;
1895
1896 }
1897
1898 static int
1899 jme_set_wol(struct net_device *netdev,
1900                 struct ethtool_wolinfo *wol)
1901 {
1902         struct jme_adapter *jme = netdev_priv(netdev);
1903
1904         if(wol->wolopts & (WAKE_MAGICSECURE |
1905                                 WAKE_UCAST |
1906                                 WAKE_MCAST |
1907                                 WAKE_BCAST |
1908                                 WAKE_ARP))
1909                 return -EOPNOTSUPP;
1910
1911         jme->reg_pmcs = 0;
1912
1913         if(wol->wolopts & WAKE_PHY)
1914                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
1915
1916         if(wol->wolopts & WAKE_MAGIC)
1917                 jme->reg_pmcs |= PMCS_MFEN;
1918
1919         return 0;
1920 }
1921  
1922 static int
1923 jme_get_settings(struct net_device *netdev,
1924                      struct ethtool_cmd *ecmd)
1925 {
1926         struct jme_adapter *jme = netdev_priv(netdev);
1927         int rc;
1928         unsigned long flags;
1929
1930         spin_lock_irqsave(&jme->phy_lock, flags);
1931         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1932         spin_unlock_irqrestore(&jme->phy_lock, flags);
1933         return rc;
1934 }
1935
1936 static int
1937 jme_set_settings(struct net_device *netdev,
1938                      struct ethtool_cmd *ecmd)
1939 {
1940         struct jme_adapter *jme = netdev_priv(netdev);
1941         int rc, fdc=0;
1942         unsigned long flags;
1943
1944         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
1945                 return -EINVAL;
1946
1947         if(jme->mii_if.force_media &&
1948         ecmd->autoneg != AUTONEG_ENABLE &&
1949         (jme->mii_if.full_duplex != ecmd->duplex))
1950                 fdc = 1;
1951
1952         spin_lock_irqsave(&jme->phy_lock, flags);
1953         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1954         spin_unlock_irqrestore(&jme->phy_lock, flags);
1955
1956         if(!rc && fdc)
1957                 jme_reset_link(jme);
1958
1959         if(!rc) {
1960                 jme->flags |= JME_FLAG_SSET;
1961                 jme->old_ecmd = *ecmd;
1962         }
1963
1964         return rc;
1965 }
1966
1967 static __u32
1968 jme_get_link(struct net_device *netdev)
1969 {
1970         struct jme_adapter *jme = netdev_priv(netdev);
1971         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1972 }
1973
1974 static u32
1975 jme_get_rx_csum(struct net_device *netdev)
1976 {
1977         struct jme_adapter *jme = netdev_priv(netdev);
1978
1979         return jme->reg_rxmcs & RXMCS_CHECKSUM;
1980 }
1981
1982 static int
1983 jme_set_rx_csum(struct net_device *netdev, u32 on)
1984 {
1985         struct jme_adapter *jme = netdev_priv(netdev);
1986         unsigned long flags;
1987         
1988         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1989         if(on)
1990                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
1991         else
1992                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
1993         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1994         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1995
1996         return 0;
1997 }
1998
1999 static int
2000 jme_set_tx_csum(struct net_device *netdev, u32 on)
2001 {
2002         if(on)
2003                 netdev->features |= NETIF_F_HW_CSUM;
2004         else
2005                 netdev->features &= ~NETIF_F_HW_CSUM;
2006
2007         return 0;
2008 }
2009
2010 static int
2011 jme_nway_reset(struct net_device *netdev)
2012 {
2013         struct jme_adapter *jme = netdev_priv(netdev);
2014         jme_restart_an(jme);
2015         return 0;
2016 }
2017
2018 static const struct ethtool_ops jme_ethtool_ops = {
2019         .get_drvinfo            = jme_get_drvinfo,
2020         .get_regs_len           = jme_get_regs_len,
2021         .get_regs               = jme_get_regs,
2022         .get_coalesce           = jme_get_coalesce,
2023         .set_coalesce           = jme_set_coalesce,
2024         .get_pauseparam         = jme_get_pauseparam,
2025         .set_pauseparam         = jme_set_pauseparam,
2026         .get_wol                = jme_get_wol,
2027         .set_wol                = jme_set_wol,
2028         .get_settings           = jme_get_settings,
2029         .set_settings           = jme_set_settings,
2030         .get_link               = jme_get_link,
2031         .get_rx_csum            = jme_get_rx_csum,
2032         .set_rx_csum            = jme_set_rx_csum,
2033         .set_tx_csum            = jme_set_tx_csum,
2034         .nway_reset             = jme_nway_reset,
2035 };
2036
2037 static int
2038 jme_pci_dma64(struct pci_dev *pdev)
2039 {
2040         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2041                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2042                         return 1;
2043
2044         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2045                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
2046                         return 1;
2047
2048         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2049                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
2050                         return 0;
2051
2052         return -1;
2053 }
2054
2055 static int __devinit
2056 jme_init_one(struct pci_dev *pdev,
2057              const struct pci_device_id *ent)
2058 {
2059         int rc = 0, using_dac;
2060         struct net_device *netdev;
2061         struct jme_adapter *jme;
2062
2063         /*
2064          * set up PCI device basics
2065          */
2066         rc = pci_enable_device(pdev);
2067         if(rc) {
2068                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2069                 goto err_out;
2070         }
2071
2072         using_dac = jme_pci_dma64(pdev);
2073         if(using_dac < 0) {
2074                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2075                 rc = -EIO;
2076                 goto err_out_disable_pdev;
2077         }
2078
2079         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2080                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2081                 rc = -ENOMEM;
2082                 goto err_out_disable_pdev;
2083         }
2084
2085         rc = pci_request_regions(pdev, DRV_NAME);
2086         if(rc) {
2087                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2088                 goto err_out_disable_pdev;
2089         }
2090
2091         pci_set_master(pdev);
2092
2093         /*
2094          * alloc and init net device
2095          */
2096         netdev = alloc_etherdev(sizeof(*jme));
2097         if(!netdev) {
2098                 rc = -ENOMEM;
2099                 goto err_out_release_regions;
2100         }
2101         netdev->open                    = jme_open;
2102         netdev->stop                    = jme_close;
2103         netdev->hard_start_xmit         = jme_start_xmit;
2104         netdev->set_mac_address         = jme_set_macaddr;
2105         netdev->set_multicast_list      = jme_set_multi;
2106         netdev->change_mtu              = jme_change_mtu;
2107         netdev->ethtool_ops             = &jme_ethtool_ops;
2108         netdev->tx_timeout              = jme_tx_timeout;
2109         netdev->watchdog_timeo          = TX_TIMEOUT;
2110         NETDEV_GET_STATS(netdev, &jme_get_stats);
2111         netdev->features                =       NETIF_F_HW_CSUM;
2112         if(using_dac)
2113                 netdev->features        |=      NETIF_F_HIGHDMA;
2114
2115         SET_NETDEV_DEV(netdev, &pdev->dev);
2116         pci_set_drvdata(pdev, netdev);
2117
2118         /*
2119          * init adapter info
2120          */
2121         jme = netdev_priv(netdev);
2122         jme->pdev = pdev;
2123         jme->dev = netdev;
2124         jme->old_mtu = netdev->mtu = 1500;
2125         jme->phylink = 0;
2126         jme->regs = ioremap(pci_resource_start(pdev, 0),
2127                              pci_resource_len(pdev, 0));
2128         if (!(jme->regs)) {
2129                 rc = -ENOMEM;
2130                 goto err_out_free_netdev;
2131         }
2132         jme->shadow_regs = pci_alloc_consistent(pdev,
2133                                                 sizeof(__u32) * SHADOW_REG_NR,
2134                                                 &(jme->shadow_dma));
2135         if (!(jme->shadow_regs)) {
2136                 rc = -ENOMEM;
2137                 goto err_out_unmap;
2138         }
2139
2140         spin_lock_init(&jme->phy_lock);
2141         spin_lock_init(&jme->macaddr_lock);
2142         spin_lock_init(&jme->rxmcs_lock);
2143
2144         atomic_set(&jme->link_changing, 1);
2145         atomic_set(&jme->rx_cleaning, 1);
2146         atomic_set(&jme->tx_cleaning, 1);
2147
2148         tasklet_init(&jme->pcc_task,
2149                      &jme_pcc_tasklet,
2150                      (unsigned long) jme);
2151         tasklet_init(&jme->linkch_task,
2152                      &jme_link_change_tasklet,
2153                      (unsigned long) jme);
2154         tasklet_init(&jme->txclean_task,
2155                      &jme_tx_clean_tasklet,
2156                      (unsigned long) jme);
2157         tasklet_init(&jme->rxclean_task,
2158                      &jme_rx_clean_tasklet,
2159                      (unsigned long) jme);
2160         tasklet_init(&jme->rxempty_task,
2161                      &jme_rx_empty_tasklet,
2162                      (unsigned long) jme);
2163         jme->mii_if.dev = netdev;
2164         jme->mii_if.phy_id = 1;
2165         jme->mii_if.supports_gmii = 1;
2166         jme->mii_if.mdio_read = jme_mdio_read;
2167         jme->mii_if.mdio_write = jme_mdio_write;
2168
2169         jme->dpi.cur = PCC_P1;
2170
2171         jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2172         jme->reg_rxcs = RXCS_DEFAULT;
2173         jme->reg_rxmcs = RXMCS_DEFAULT;
2174         jme->reg_txpfc = 0;
2175         jme->reg_pmcs = 0;
2176         /*
2177          * Get Max Read Req Size from PCI Config Space
2178          */
2179         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2180         switch(jme->mrrs) {
2181                 case MRRS_128B:
2182                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2183                         break;
2184                 case MRRS_256B:
2185                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2186                         break;
2187                 default:
2188                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2189                         break;
2190         };
2191
2192
2193         /*
2194          * Reset MAC processor and reload EEPROM for MAC Address
2195          */
2196         jme_clear_pm(jme);
2197         jme_reset_phy_processor(jme);
2198         jme_reset_mac_processor(jme);
2199         rc = jme_reload_eeprom(jme);
2200         if(rc) {
2201                 printk(KERN_ERR PFX
2202                         "Rload eeprom for reading MAC Address error.\n");
2203                 goto err_out_free_shadow;
2204         }
2205         jme_load_macaddr(netdev);
2206
2207
2208         /*
2209          * Tell stack that we are not ready to work until open()
2210          */
2211         netif_carrier_off(netdev);
2212         netif_stop_queue(netdev);
2213
2214         /*
2215          * Register netdev
2216          */
2217         rc = register_netdev(netdev);
2218         if(rc) {
2219                 printk(KERN_ERR PFX "Cannot register net device.\n");
2220                 goto err_out_free_shadow;
2221         }
2222
2223         jprintk(netdev->name,
2224                 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2225                 netdev->dev_addr[0],
2226                 netdev->dev_addr[1],
2227                 netdev->dev_addr[2],
2228                 netdev->dev_addr[3],
2229                 netdev->dev_addr[4],
2230                 netdev->dev_addr[5]);
2231
2232         return 0;
2233
2234 err_out_free_shadow:
2235         pci_free_consistent(pdev,
2236                             sizeof(__u32) * SHADOW_REG_NR,
2237                             jme->shadow_regs,
2238                             jme->shadow_dma);
2239 err_out_unmap:
2240         iounmap(jme->regs);
2241 err_out_free_netdev:
2242         pci_set_drvdata(pdev, NULL);
2243         free_netdev(netdev);
2244 err_out_release_regions:
2245         pci_release_regions(pdev);
2246 err_out_disable_pdev:
2247         pci_disable_device(pdev);
2248 err_out:
2249         return rc;
2250 }
2251
2252 static void __devexit
2253 jme_remove_one(struct pci_dev *pdev)
2254 {
2255         struct net_device *netdev = pci_get_drvdata(pdev);
2256         struct jme_adapter *jme = netdev_priv(netdev);
2257
2258         unregister_netdev(netdev);
2259         pci_free_consistent(pdev,
2260                             sizeof(__u32) * SHADOW_REG_NR,
2261                             jme->shadow_regs,
2262                             jme->shadow_dma);
2263         iounmap(jme->regs);
2264         pci_set_drvdata(pdev, NULL);
2265         free_netdev(netdev);
2266         pci_release_regions(pdev);
2267         pci_disable_device(pdev);
2268
2269 }
2270
2271 static void
2272 jme_set_10m_half(struct jme_adapter *jme)
2273 {
2274         __u32 bmcr, tmp;
2275
2276         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
2277         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
2278                        BMCR_SPEED1000 | BMCR_FULLDPLX);
2279
2280         if (bmcr != tmp)
2281                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
2282
2283         jwrite32(jme, JME_GHC, GHC_SPEED_10M);
2284 }
2285
2286 static int
2287 jme_suspend(struct pci_dev *pdev, pm_message_t state)
2288 {
2289         struct net_device *netdev = pci_get_drvdata(pdev);
2290         struct jme_adapter *jme = netdev_priv(netdev);
2291         int timeout = 100;
2292
2293         atomic_dec(&jme->link_changing);
2294
2295         netif_device_detach(netdev);
2296         netif_stop_queue(netdev);
2297         jme_stop_irq(jme);
2298         jme_free_irq(jme);
2299
2300         while(--timeout > 0 &&
2301         (
2302                 atomic_read(&jme->rx_cleaning) != 1 ||
2303                 atomic_read(&jme->tx_cleaning) != 1
2304         )) {
2305                 mdelay(1);
2306         }
2307         if(!timeout) {
2308                 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2309                 return -EBUSY;
2310         }
2311         jme_disable_shadow(jme);
2312
2313         if(netif_carrier_ok(netdev)) {
2314                 jme_stop_pcc_timer(jme);
2315                 jme_reset_mac_processor(jme);
2316                 jme_free_rx_resources(jme);
2317                 jme_free_tx_resources(jme);
2318                 netif_carrier_off(netdev);
2319                 jme->phylink = 0;
2320         }
2321
2322         jme_set_10m_half(jme);
2323
2324         pci_save_state(pdev);
2325         if(jme->reg_pmcs) {
2326                 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2327                 pci_enable_wake(pdev, PCI_D3cold, 1);
2328         }
2329         else {
2330                 pci_enable_wake(pdev, PCI_D3cold, 0);
2331         }
2332         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2333
2334         return 0;
2335 }
2336
2337 static int
2338 jme_resume(struct pci_dev *pdev)
2339 {
2340         struct net_device *netdev = pci_get_drvdata(pdev);
2341         struct jme_adapter *jme = netdev_priv(netdev);
2342
2343         jme_clear_pm(jme);
2344         pci_restore_state(pdev);
2345
2346         if(jme->flags & JME_FLAG_SSET)
2347                 jme_set_settings(netdev, &jme->old_ecmd);
2348         else
2349                 jme_reset_phy_processor(jme);
2350
2351         jme_reset_mac_processor(jme);
2352         jme_enable_shadow(jme);
2353         jme_request_irq(jme);
2354         jme_start_irq(jme);
2355         netif_device_attach(netdev);
2356
2357         atomic_inc(&jme->link_changing);
2358
2359         jme_reset_link(jme);
2360
2361         return 0;
2362 }
2363
2364 static struct pci_device_id jme_pci_tbl[] = {
2365         { PCI_VDEVICE(JMICRON, 0x250) },
2366         { }
2367 };
2368
2369 static struct pci_driver jme_driver = {
2370         .name           = DRV_NAME,
2371         .id_table       = jme_pci_tbl,
2372         .probe          = jme_init_one,
2373         .remove         = __devexit_p(jme_remove_one),
2374 #ifdef CONFIG_PM
2375         .suspend        = jme_suspend,
2376         .resume         = jme_resume,
2377 #endif /* CONFIG_PM */
2378 };
2379
2380 static int __init
2381 jme_init_module(void)
2382 {
2383         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2384                "driver version %s\n", DRV_VERSION);
2385         return pci_register_driver(&jme_driver);
2386 }
2387
2388 static void __exit
2389 jme_cleanup_module(void)
2390 {
2391         pci_unregister_driver(&jme_driver);
2392 }
2393
2394 module_init(jme_init_module);
2395 module_exit(jme_cleanup_module);
2396
2397 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2398 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2399 MODULE_LICENSE("GPL");
2400 MODULE_VERSION(DRV_VERSION);
2401 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2402