]> bbs.cooldavid.org Git - jme.git/blob - jme.c
f34bcb96e883a9488c490e3ba45a79bf2dd49d7b
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * Note:
26  *      Backdoor for changing "FIFO Threshold for processing next packet"
27  *         Using:
28  *              ethtool -C eth1 adaptive-rx on adaptive-tx on \
29  *              rx-usecs 250 rx-frames-low N
30  *         N := 16 | 32 | 64 | 128
31  */
32
33 /*
34  * Timeline before release:
35  *      Stage 4: Basic feature support.
36  *      0.7:
37  *      -  Implement Power Managemt related functions.
38  *
39  *      Stage 5: Advanced offloading support.
40  *      0.8:
41  *      -  Implement VLAN offloading.
42  *      0.9:
43  *      -  Implement scatter-gather offloading.
44  *         Use pci_map_page on scattered sk_buff for HIGHMEM support
45  *      -  Implement TCP Segement offloading.
46  *              Due to TX FIFO size, we should turn off tso when mtu > 1500.
47  *
48  *      Stage 6: CPU Load balancing.
49  *      1.0:
50  *      -  Implement MSI-X.
51  *         Along with multiple RX queue, for CPU load balancing.
52  *
53  *      Stage 7:
54  *      -  Cleanup/re-orginize code, performence tuneing(alignment etc...).
55  *      -  Test and Release 1.0
56  *
57  *      Non-Critical:
58  *      -  Use NAPI instead of rx_tasklet?
59  *              PCC Support Both Packet Counter and Timeout Interrupt for
60  *              receive and transmit complete, does NAPI really needed?
61  *      -  Decode register dump for ethtool.
62  */
63
64 #include <linux/version.h>
65 #include <linux/module.h>
66 #include <linux/kernel.h>
67 #include <linux/pci.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/ethtool.h>
71 #include <linux/mii.h>
72 #include <linux/crc32.h>
73 #include <linux/delay.h>
74 #include <linux/in.h>
75 #include <linux/ip.h>
76 #include <linux/ipv6.h>
77 #include <linux/tcp.h>
78 #include <linux/udp.h>
79 #include "jme.h"
80
81 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
82 static struct net_device_stats *
83 jme_get_stats(struct net_device *netdev)
84 {
85         struct jme_adapter *jme = netdev_priv(netdev);
86         return &jme->stats;
87 }
88 #endif
89
90 static int
91 jme_mdio_read(struct net_device *netdev, int phy, int reg)
92 {
93         struct jme_adapter *jme = netdev_priv(netdev);
94         int i, val;
95
96         jwrite32(jme, JME_SMI, SMI_OP_REQ |
97                                 smi_phy_addr(phy) |
98                                 smi_reg_addr(reg));
99
100         wmb();
101         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
102                 udelay(1);
103                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
104                         break;
105         }
106
107         if (i == 0) {
108                 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
109                 return 0;
110         }
111
112         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
113 }
114
115 static void
116 jme_mdio_write(struct net_device *netdev,
117                                 int phy, int reg, int val)
118 {
119         struct jme_adapter *jme = netdev_priv(netdev);
120         int i;
121
122         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
123                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
124                 smi_phy_addr(phy) | smi_reg_addr(reg));
125
126         wmb();
127         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
128                 udelay(1);
129                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
130                         break;
131         }
132
133         if (i == 0)
134                 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
135
136         return;
137 }
138
139 __always_inline static void
140 jme_reset_phy_processor(struct jme_adapter *jme)
141 {
142         __u32 val;
143
144         jme_mdio_write(jme->dev,
145                         jme->mii_if.phy_id,
146                         MII_ADVERTISE, ADVERTISE_ALL |
147                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
148
149         jme_mdio_write(jme->dev,
150                         jme->mii_if.phy_id,
151                         MII_CTRL1000,
152                         ADVERTISE_1000FULL | ADVERTISE_1000HALF);
153
154         val = jme_mdio_read(jme->dev,
155                                 jme->mii_if.phy_id,
156                                 MII_BMCR);
157
158         jme_mdio_write(jme->dev,
159                         jme->mii_if.phy_id,
160                         MII_BMCR, val | BMCR_RESET);
161
162         return;
163 }
164
165
166 __always_inline static void
167 jme_reset_mac_processor(struct jme_adapter *jme)
168 {
169         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
170         udelay(2);
171         jwrite32(jme, JME_GHC, jme->reg_ghc);
172         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
173         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
174         jwrite32(jme, JME_WFODP, 0);
175         jwrite32(jme, JME_WFOI, 0);
176         jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
177         jwrite32(jme, JME_GPREG1, 0);
178 }
179
180 __always_inline static void
181 jme_clear_pm(struct jme_adapter *jme)
182 {
183         jwrite32(jme, JME_PMCS, 0xFFFF0000);
184         pci_set_power_state(jme->pdev, PCI_D0);
185 }
186
187 static int
188 jme_reload_eeprom(struct jme_adapter *jme)
189 {
190         __u32 val;
191         int i;
192
193         val = jread32(jme, JME_SMBCSR);
194
195         if(val & SMBCSR_EEPROMD)
196         {
197                 val |= SMBCSR_CNACK;
198                 jwrite32(jme, JME_SMBCSR, val);
199                 val |= SMBCSR_RELOAD;
200                 jwrite32(jme, JME_SMBCSR, val);
201                 mdelay(12);
202
203                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
204                 {
205                         mdelay(1);
206                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
207                                 break;
208                 }
209
210                 if(i == 0) {
211                         jeprintk(jme->dev->name, "eeprom reload timeout\n");
212                         return -EIO;
213                 }
214         }
215         else
216                 return -EIO;
217
218         return 0;
219 }
220
221 static void
222 jme_load_macaddr(struct net_device *netdev)
223 {
224         struct jme_adapter *jme = netdev_priv(netdev);
225         unsigned char macaddr[6];
226         __u32 val;
227
228         spin_lock(&jme->macaddr_lock);
229         val = jread32(jme, JME_RXUMA_LO);
230         macaddr[0] = (val >>  0) & 0xFF;
231         macaddr[1] = (val >>  8) & 0xFF;
232         macaddr[2] = (val >> 16) & 0xFF;
233         macaddr[3] = (val >> 24) & 0xFF;
234         val = jread32(jme, JME_RXUMA_HI);
235         macaddr[4] = (val >>  0) & 0xFF;
236         macaddr[5] = (val >>  8) & 0xFF;
237         memcpy(netdev->dev_addr, macaddr, 6);
238         spin_unlock(&jme->macaddr_lock);
239 }
240
241 __always_inline static void
242 jme_set_rx_pcc(struct jme_adapter *jme, int p)
243 {
244         switch(p) {
245         case PCC_P1:
246                 jwrite32(jme, JME_PCCRX0,
247                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
248                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
249                 break;
250         case PCC_P2:
251                 jwrite32(jme, JME_PCCRX0,
252                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
253                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
254                 break;
255         case PCC_P3:
256                 jwrite32(jme, JME_PCCRX0,
257                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
258                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
259                 break;
260         default:
261                 break;
262         }
263
264         dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
265 }
266
267 static void
268 jme_start_irq(struct jme_adapter *jme)
269 {
270         register struct dynpcc_info *dpi = &(jme->dpi);
271
272         jme_set_rx_pcc(jme, PCC_P1);
273         dpi->cur                = PCC_P1;
274         dpi->attempt            = PCC_P1;
275         dpi->cnt                = 0;
276
277         jwrite32(jme, JME_PCCTX,
278                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
279                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
280                         PCCTXQ0_EN
281                 );
282
283         /*
284          * Enable Interrupts
285          */
286         jwrite32(jme, JME_IENS, INTR_ENABLE);
287 }
288
289 __always_inline static void
290 jme_stop_irq(struct jme_adapter *jme)
291 {
292         /*
293          * Disable Interrupts
294          */
295         jwrite32(jme, JME_IENC, INTR_ENABLE);
296 }
297
298
299 __always_inline static void
300 jme_enable_shadow(struct jme_adapter *jme)
301 {
302         jwrite32(jme,
303                  JME_SHBA_LO,
304                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
305 }
306
307 __always_inline static void
308 jme_disable_shadow(struct jme_adapter *jme)
309 {
310         jwrite32(jme, JME_SHBA_LO, 0x0);
311 }
312
313 static int
314 jme_check_link(struct net_device *netdev, int testonly)
315 {
316         struct jme_adapter *jme = netdev_priv(netdev);
317         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
318         char linkmsg[64];
319         int rc = 0;
320
321         phylink = jread32(jme, JME_PHY_LINK);
322
323         if (phylink & PHY_LINK_UP) {
324                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
325                         /*
326                          * If we did not enable AN
327                          * Speed/Duplex Info should be obtained from SMI
328                          */
329                         phylink = PHY_LINK_UP;
330
331                         bmcr = jme_mdio_read(jme->dev,
332                                                 jme->mii_if.phy_id,
333                                                 MII_BMCR);
334
335
336                         phylink |= ((bmcr & BMCR_SPEED1000) &&
337                                         (bmcr & BMCR_SPEED100) == 0) ?
338                                         PHY_LINK_SPEED_1000M :
339                                         (bmcr & BMCR_SPEED100) ?
340                                         PHY_LINK_SPEED_100M :
341                                         PHY_LINK_SPEED_10M;
342
343                         phylink |= (bmcr & BMCR_FULLDPLX) ?
344                                          PHY_LINK_DUPLEX : 0;
345
346                         strcpy(linkmsg, "Forced: ");
347                 }
348                 else {
349                         /*
350                          * Keep polling for speed/duplex resolve complete
351                          */
352                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
353                                 --cnt) {
354
355                                 udelay(1);
356                                 phylink = jread32(jme, JME_PHY_LINK);
357
358                         }
359
360                         if(!cnt)
361                                 jeprintk(netdev->name,
362                                         "Waiting speed resolve timeout.\n");
363
364                         strcpy(linkmsg, "ANed: ");
365                 }
366
367                 if(jme->phylink == phylink) {
368                         rc = 1;
369                         goto out;
370                 }
371                 if(testonly)
372                         goto out;
373
374                 jme->phylink = phylink;
375
376                 switch(phylink & PHY_LINK_SPEED_MASK) {
377                         case PHY_LINK_SPEED_10M:
378                                 ghc = GHC_SPEED_10M;
379                                 strcpy(linkmsg, "10 Mbps, ");
380                                 break;
381                         case PHY_LINK_SPEED_100M:
382                                 ghc = GHC_SPEED_100M;
383                                 strcpy(linkmsg, "100 Mbps, ");
384                                 break;
385                         case PHY_LINK_SPEED_1000M:
386                                 ghc = GHC_SPEED_1000M;
387                                 strcpy(linkmsg, "1000 Mbps, ");
388                                 break;
389                         default:
390                                 ghc = 0;
391                                 break;
392                 }
393                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
394
395                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
396                                         "Full-Duplex, " :
397                                         "Half-Duplex, ");
398
399                 if(phylink & PHY_LINK_MDI_STAT)
400                         strcat(linkmsg, "MDI-X");
401                 else
402                         strcat(linkmsg, "MDI");
403
404                 if(phylink & PHY_LINK_DUPLEX)
405                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
406                 else {
407                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
408                                                 TXMCS_BACKOFF |
409                                                 TXMCS_CARRIERSENSE |
410                                                 TXMCS_COLLISION);
411                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
412                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
413                                 TXTRHD_TXREN |
414                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
415                 }
416
417                 jme->reg_ghc = ghc;
418                 jwrite32(jme, JME_GHC, ghc);
419
420                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
421                 netif_carrier_on(netdev);
422         }
423         else {
424                 if(testonly)
425                         goto out;
426
427                 jprintk(netdev->name, "Link is down.\n");
428                 jme->phylink = 0;
429                 netif_carrier_off(netdev);
430         }
431
432 out:
433         return rc;
434 }
435
436
437 static int
438 jme_alloc_txdesc(struct jme_adapter *jme,
439                         int nr_alloc)
440 {
441         struct jme_ring *txring = jme->txring;
442         int idx;
443
444         idx = txring->next_to_use;
445
446         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
447                 return -1;
448
449         atomic_sub(nr_alloc, &txring->nr_free);
450
451         if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
452                 txring->next_to_use -= RING_DESC_NR;
453
454         return idx;
455 }
456
457 static void
458 jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
459 {
460         if(skb->ip_summed == CHECKSUM_PARTIAL) {
461                 __u8 ip_proto;
462
463                 switch (skb->protocol) {
464                 case __constant_htons(ETH_P_IP):
465                         ip_proto = ip_hdr(skb)->protocol;
466                         break;
467                 case __constant_htons(ETH_P_IPV6):
468                         ip_proto = ipv6_hdr(skb)->nexthdr;
469                         break;
470                 default:
471                         ip_proto = 0;
472                         break;
473                 }
474
475
476                 switch(ip_proto) {
477                 case IPPROTO_TCP:
478                         *flags |= TXFLAG_TCPCS;
479                         break;
480                 case IPPROTO_UDP:
481                         *flags |= TXFLAG_UDPCS;
482                         break;
483                 default:
484                         jeprintk("jme", "Error upper layer protocol.\n");
485                         break;
486                 }
487         }
488 }
489
490 static int
491 jme_set_new_txdesc(struct jme_adapter *jme,
492                         struct sk_buff *skb)
493 {
494         struct jme_ring *txring = jme->txring;
495         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
496         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
497         dma_addr_t dmaaddr;
498         int i, idx, nr_desc;
499         __u8 flags;
500
501         nr_desc = 2;
502         idx = jme_alloc_txdesc(jme, nr_desc);
503
504         if(unlikely(idx<0))
505                 return NETDEV_TX_BUSY;
506
507         for(i = 1 ; i < nr_desc  ; ++i) {
508                 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
509                 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
510
511                 dmaaddr = pci_map_single(jme->pdev,
512                                          skb->data,
513                                          skb->len,
514                                          PCI_DMA_TODEVICE);
515
516                 pci_dma_sync_single_for_device(jme->pdev,
517                                                dmaaddr,
518                                                skb->len,
519                                                PCI_DMA_TODEVICE);
520
521                 ctxdesc->dw[0] = 0;
522                 ctxdesc->dw[1] = 0;
523                 ctxdesc->desc2.flags    = TXFLAG_OWN;
524                 if(jme->dev->features & NETIF_F_HIGHDMA)
525                         ctxdesc->desc2.flags |= TXFLAG_64BIT;
526                 ctxdesc->desc2.datalen  = cpu_to_le16(skb->len);
527                 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
528                 ctxdesc->desc2.bufaddrl = cpu_to_le32(
529                                                 (__u64)dmaaddr & 0xFFFFFFFFUL);
530
531                 ctxbi->mapping = dmaaddr;
532                 ctxbi->len = skb->len;
533         }
534
535         ctxdesc = txdesc + idx;
536         ctxbi = txbi + idx;
537
538         ctxdesc->dw[0] = 0;
539         ctxdesc->dw[1] = 0;
540         ctxdesc->dw[2] = 0;
541         ctxdesc->dw[3] = 0;
542         ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
543         /*
544          * Set OWN bit at final.
545          * When kernel transmit faster than NIC.
546          * And NIC trying to send this descriptor before we tell
547          * it to start sending this TX queue.
548          * Other fields are already filled correctly.
549          */
550         wmb();
551         flags = TXFLAG_OWN | TXFLAG_INT; 
552         jme_tx_csum(skb, jme->dev->mtu, &flags);
553         ctxdesc->desc1.flags = flags;
554         /*
555          * Set tx buffer info after telling NIC to send
556          * For better tx_clean timing
557          */
558         wmb();
559         ctxbi->nr_desc = nr_desc;
560         ctxbi->skb = skb;
561
562         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
563
564         return 0;
565 }
566
567
568 static int
569 jme_setup_tx_resources(struct jme_adapter *jme)
570 {
571         struct jme_ring *txring = &(jme->txring[0]);
572
573         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
574                                            TX_RING_ALLOC_SIZE,
575                                            &(txring->dmaalloc),
576                                            GFP_ATOMIC);
577
578         if(!txring->alloc) {
579                 txring->desc = NULL;
580                 txring->dmaalloc = 0;
581                 txring->dma = 0;
582                 return -ENOMEM;
583         }
584
585         /*
586          * 16 Bytes align
587          */
588         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
589                                                 RING_DESC_ALIGN);
590         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
591         txring->next_to_use     = 0;
592         txring->next_to_clean   = 0;
593         atomic_set(&txring->nr_free, RING_DESC_NR);
594
595         /*
596          * Initiallize Transmit Descriptors
597          */
598         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
599         memset(txring->bufinf, 0,
600                 sizeof(struct jme_buffer_info) * RING_DESC_NR);
601
602         return 0;
603 }
604
605 static void
606 jme_free_tx_resources(struct jme_adapter *jme)
607 {
608         int i;
609         struct jme_ring *txring = &(jme->txring[0]);
610         struct jme_buffer_info *txbi = txring->bufinf;
611
612         if(txring->alloc) {
613                 for(i = 0 ; i < RING_DESC_NR ; ++i) {
614                         txbi = txring->bufinf + i;
615                         if(txbi->skb) {
616                                 dev_kfree_skb(txbi->skb);
617                                 txbi->skb = NULL;
618                         }
619                         txbi->mapping   = 0;
620                         txbi->len       = 0;
621                         txbi->nr_desc   = 0;
622                 }
623
624                 dma_free_coherent(&(jme->pdev->dev),
625                                   TX_RING_ALLOC_SIZE,
626                                   txring->alloc,
627                                   txring->dmaalloc);
628
629                 txring->alloc           = NULL;
630                 txring->desc            = NULL;
631                 txring->dmaalloc        = 0;
632                 txring->dma             = 0;
633         }
634         txring->next_to_use     = 0;
635         txring->next_to_clean   = 0;
636         atomic_set(&txring->nr_free, 0);
637
638 }
639
640 __always_inline static void
641 jme_enable_tx_engine(struct jme_adapter *jme)
642 {
643         /*
644          * Select Queue 0
645          */
646         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
647
648         /*
649          * Setup TX Queue 0 DMA Bass Address
650          */
651         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
652         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
653         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
654
655         /*
656          * Setup TX Descptor Count
657          */
658         jwrite32(jme, JME_TXQDC, RING_DESC_NR);
659
660         /*
661          * Enable TX Engine
662          */
663         wmb();
664         jwrite32(jme, JME_TXCS, jme->reg_txcs |
665                                 TXCS_SELECT_QUEUE0 |
666                                 TXCS_ENABLE);
667
668 }
669
670 __always_inline static void
671 jme_disable_tx_engine(struct jme_adapter *jme)
672 {
673         int i;
674         __u32 val;
675
676         /*
677          * Disable TX Engine
678          */
679         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
680
681         val = jread32(jme, JME_TXCS);
682         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
683         {
684                 mdelay(1);
685                 val = jread32(jme, JME_TXCS);
686         }
687
688         if(!i) {
689                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
690                 jme_reset_mac_processor(jme);
691         }
692
693
694 }
695
696 static void
697 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
698 {
699         struct jme_ring *rxring = jme->rxring;
700         register volatile struct rxdesc* rxdesc = rxring->desc;
701         struct jme_buffer_info *rxbi = rxring->bufinf;
702         rxdesc += i;
703         rxbi += i;
704
705         rxdesc->dw[0] = 0;
706         rxdesc->dw[1] = 0;
707         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
708         rxdesc->desc1.bufaddrl  = cpu_to_le32(
709                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
710         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
711         if(jme->dev->features & NETIF_F_HIGHDMA)
712                 rxdesc->desc1.flags = RXFLAG_64BIT;
713         wmb();
714         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
715 }
716
717 static int
718 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
719 {
720         struct jme_ring *rxring = &(jme->rxring[0]);
721         struct jme_buffer_info *rxbi = rxring->bufinf;
722         unsigned long offset;
723         struct sk_buff* skb;
724
725         skb = netdev_alloc_skb(jme->dev,
726                 jme->dev->mtu + RX_EXTRA_LEN);
727         if(unlikely(!skb))
728                 return -ENOMEM;
729
730         if(unlikely(skb_is_nonlinear(skb))) {
731                 dprintk(jme->dev->name,
732                         "Allocated skb fragged(%d).\n",
733                         skb_shinfo(skb)->nr_frags);
734                 dev_kfree_skb(skb);
735                 return -ENOMEM;
736         }
737
738         if(unlikely(offset =
739                         (unsigned long)(skb->data)
740                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
741                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
742
743         rxbi += i;
744         rxbi->skb = skb;
745         rxbi->len = skb_tailroom(skb);
746         rxbi->mapping = pci_map_single(jme->pdev,
747                                        skb->data,
748                                        rxbi->len,
749                                        PCI_DMA_FROMDEVICE);
750
751         return 0;
752 }
753
754 static void
755 jme_free_rx_buf(struct jme_adapter *jme, int i)
756 {
757         struct jme_ring *rxring = &(jme->rxring[0]);
758         struct jme_buffer_info *rxbi = rxring->bufinf;
759         rxbi += i;
760
761         if(rxbi->skb) {
762                 pci_unmap_single(jme->pdev,
763                                  rxbi->mapping,
764                                  rxbi->len,
765                                  PCI_DMA_FROMDEVICE);
766                 dev_kfree_skb(rxbi->skb);
767                 rxbi->skb = NULL;
768                 rxbi->mapping = 0;
769                 rxbi->len = 0;
770         }
771 }
772
773 static void
774 jme_free_rx_resources(struct jme_adapter *jme)
775 {
776         int i;
777         struct jme_ring *rxring = &(jme->rxring[0]);
778
779         if(rxring->alloc) {
780                 for(i = 0 ; i < RING_DESC_NR ; ++i)
781                         jme_free_rx_buf(jme, i);
782
783                 dma_free_coherent(&(jme->pdev->dev),
784                                   RX_RING_ALLOC_SIZE,
785                                   rxring->alloc,
786                                   rxring->dmaalloc);
787                 rxring->alloc    = NULL;
788                 rxring->desc     = NULL;
789                 rxring->dmaalloc = 0;
790                 rxring->dma      = 0;
791         }
792         rxring->next_to_use   = 0;
793         rxring->next_to_clean = 0;
794 }
795
796 static int
797 jme_setup_rx_resources(struct jme_adapter *jme)
798 {
799         int i;
800         struct jme_ring *rxring = &(jme->rxring[0]);
801
802         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
803                                            RX_RING_ALLOC_SIZE,
804                                            &(rxring->dmaalloc),
805                                            GFP_ATOMIC);
806         if(!rxring->alloc) {
807                 rxring->desc = NULL;
808                 rxring->dmaalloc = 0;
809                 rxring->dma = 0;
810                 return -ENOMEM;
811         }
812
813         /*
814          * 16 Bytes align
815          */
816         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
817                                                 RING_DESC_ALIGN);
818         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
819         rxring->next_to_use     = 0;
820         rxring->next_to_clean   = 0;
821
822         /*
823          * Initiallize Receive Descriptors
824          */
825         for(i = 0 ; i < RING_DESC_NR ; ++i) {
826                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
827                         jme_free_rx_resources(jme);
828                         return -ENOMEM;
829                 }
830
831                 jme_set_clean_rxdesc(jme, i);
832         }
833
834         return 0;
835 }
836
837 __always_inline static void
838 jme_enable_rx_engine(struct jme_adapter *jme)
839 {
840         /*
841          * Setup RX DMA Bass Address
842          */
843         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
844         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
845         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
846
847         /*
848          * Setup RX Descptor Count
849          */
850         jwrite32(jme, JME_RXQDC, RING_DESC_NR);
851
852         /*
853          * Setup Unicast Filter
854          */
855         jme_set_multi(jme->dev);
856
857         /*
858          * Enable RX Engine
859          */
860         wmb();
861         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
862                                 RXCS_QUEUESEL_Q0 |
863                                 RXCS_ENABLE |
864                                 RXCS_QST);
865 }
866
867 __always_inline static void
868 jme_restart_rx_engine(struct jme_adapter *jme)
869 {
870         /*
871          * Start RX Engine
872          */
873         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
874                                 RXCS_QUEUESEL_Q0 |
875                                 RXCS_ENABLE |
876                                 RXCS_QST);
877 }
878
879
880 __always_inline static void
881 jme_disable_rx_engine(struct jme_adapter *jme)
882 {
883         int i;
884         __u32 val;
885
886         /*
887          * Disable RX Engine
888          */
889         val = jread32(jme, JME_RXCS);
890         val &= ~RXCS_ENABLE;
891         jwrite32(jme, JME_RXCS, val);
892
893         val = jread32(jme, JME_RXCS);
894         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
895         {
896                 mdelay(100);
897                 val = jread32(jme, JME_RXCS);
898         }
899
900         if(!i)
901                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
902
903 }
904
905 static void
906 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx, int summed)
907 {
908         struct jme_ring *rxring = &(jme->rxring[0]);
909         volatile struct rxdesc *rxdesc = rxring->desc;
910         struct jme_buffer_info *rxbi = rxring->bufinf;
911         struct sk_buff *skb;
912         int framesize;
913
914         rxdesc += idx;
915         rxbi += idx;
916
917         skb = rxbi->skb;
918         pci_dma_sync_single_for_cpu(jme->pdev,
919                                         rxbi->mapping,
920                                         rxbi->len,
921                                         PCI_DMA_FROMDEVICE);
922
923         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
924                 pci_dma_sync_single_for_device(jme->pdev,
925                                                 rxbi->mapping,
926                                                 rxbi->len,
927                                                 PCI_DMA_FROMDEVICE);
928
929                 ++(NET_STAT(jme).rx_dropped);
930         }
931         else {
932                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
933                                 - RX_PREPAD_SIZE;
934
935                 skb_reserve(skb, RX_PREPAD_SIZE);
936                 skb_put(skb, framesize);
937                 skb->protocol = eth_type_trans(skb, jme->dev);
938
939                 if(summed)
940                         skb->ip_summed = CHECKSUM_UNNECESSARY;
941
942                 netif_rx(skb);
943
944                 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
945                         ++(NET_STAT(jme).multicast);
946
947                 jme->dev->last_rx = jiffies;
948                 NET_STAT(jme).rx_bytes += framesize;
949                 ++(NET_STAT(jme).rx_packets);
950         }
951
952         jme_set_clean_rxdesc(jme, idx);
953
954 }
955
956 static int
957 jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
958 {
959         if(unlikely((flags & RXWBFLAG_TCPON) &&
960         !(flags & RXWBFLAG_TCPCS))) {
961                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
962                 return 1;
963         }
964         else if(unlikely((flags & RXWBFLAG_UDPON) &&
965         !(flags & RXWBFLAG_UDPCS))) {
966                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
967                 return 1;
968         }
969         else if(unlikely((flags & RXWBFLAG_IPV4) &&
970         !(flags & RXWBFLAG_IPCS))) {
971                 csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
972                 return 1;
973         }
974         else {
975                 return 0;
976         }
977 }
978
979 static int
980 jme_process_receive(struct jme_adapter *jme, int limit)
981 {
982         struct jme_ring *rxring = &(jme->rxring[0]);
983         volatile struct rxdesc *rxdesc = rxring->desc;
984         int i, j, ccnt, desccnt;
985
986         i = rxring->next_to_clean;
987         while( limit-- > 0 )
988         {
989                 rxdesc = rxring->desc;
990                 rxdesc += i;
991
992                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
993                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
994                         goto out;
995
996                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
997
998                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
999
1000                 if(unlikely(desccnt > 1 ||
1001                 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
1002                 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
1003
1004                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
1005                                 ++(NET_STAT(jme).rx_crc_errors);
1006                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
1007                                 ++(NET_STAT(jme).rx_fifo_errors);
1008                         else
1009                                 ++(NET_STAT(jme).rx_errors);
1010
1011                         if(desccnt > 1) {
1012                                 rx_dbg(jme->dev->name,
1013                                         "RX: More than one(%d) descriptor, "
1014                                         "framelen=%d\n",
1015                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
1016                                 limit -= desccnt - 1;
1017                         }
1018
1019                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
1020                                 jme_set_clean_rxdesc(jme, j);
1021
1022                                 if(unlikely(++j == RING_DESC_NR))
1023                                         j = 0;
1024                         }
1025
1026                 }
1027                 else {
1028                         jme_alloc_and_feed_skb(jme, i,
1029                                 (rxdesc->descwb.flags &
1030                                         (RXWBFLAG_TCPON |
1031                                         RXWBFLAG_UDPON |
1032                                         RXWBFLAG_IPV4)));
1033                 }
1034
1035                 if((i += desccnt) >= RING_DESC_NR)
1036                         i -= RING_DESC_NR;
1037         }
1038
1039 out:
1040         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
1041         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
1042                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
1043                         >> 4);
1044
1045         rxring->next_to_clean = i;
1046
1047         return limit > 0 ? limit : 0;
1048
1049 }
1050
1051 static void
1052 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1053 {
1054         if(likely(atmp == dpi->cur))
1055                 return;
1056
1057         if(dpi->attempt == atmp) {
1058                 ++(dpi->cnt);
1059         }
1060         else {
1061                 dpi->attempt = atmp;
1062                 dpi->cnt = 0;
1063         }
1064
1065 }
1066
1067 static void
1068 jme_dynamic_pcc(struct jme_adapter *jme)
1069 {
1070         register struct dynpcc_info *dpi = &(jme->dpi);
1071
1072         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1073                 jme_attempt_pcc(dpi, PCC_P3);
1074         else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
1075         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1076                 jme_attempt_pcc(dpi, PCC_P2);
1077         else
1078                 jme_attempt_pcc(dpi, PCC_P1);
1079
1080         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
1081                 jme_set_rx_pcc(jme, dpi->attempt);
1082                 dpi->cur = dpi->attempt;
1083                 dpi->cnt = 0;
1084         }
1085 }
1086
1087 static void
1088 jme_start_pcc_timer(struct jme_adapter *jme)
1089 {
1090         struct dynpcc_info *dpi = &(jme->dpi);
1091         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1092         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1093         dpi->intr_cnt           = 0;
1094         jwrite32(jme, JME_TMCSR,
1095                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1096 }
1097
1098 static void
1099 jme_pcc_tasklet(unsigned long arg)
1100 {
1101         struct jme_adapter *jme = (struct jme_adapter*)arg;
1102         struct net_device *netdev = jme->dev;
1103
1104         if(netif_queue_stopped(netdev)) {
1105                 jwrite32(jme, JME_TMCSR, 0);
1106                 return;
1107         }
1108         jme_dynamic_pcc(jme);
1109         jme_start_pcc_timer(jme);
1110 }
1111
1112 static void
1113 jme_link_change_tasklet(unsigned long arg)
1114 {
1115         struct jme_adapter *jme = (struct jme_adapter*)arg;
1116         struct net_device *netdev = jme->dev;
1117         int timeout = WAIT_TASKLET_TIMEOUT;
1118         int rc;
1119
1120         if(!atomic_dec_and_test(&jme->link_changing))
1121                 goto out;
1122
1123         if(jme_check_link(netdev, 1) && jme->oldmtu == netdev->mtu)
1124                 goto out;
1125
1126         jme->oldmtu = netdev->mtu;
1127         netif_stop_queue(netdev);
1128
1129         while(--timeout > 0 &&
1130                 (
1131                 atomic_read(&jme->rx_cleaning) != 1 ||
1132                 atomic_read(&jme->tx_cleaning) != 1
1133                 )) {
1134
1135                 mdelay(1);
1136         }
1137
1138         if(netif_carrier_ok(netdev)) {
1139                 jme_reset_mac_processor(jme);
1140                 jme_free_rx_resources(jme);
1141                 jme_free_tx_resources(jme);
1142         }
1143
1144         jme_check_link(netdev, 0);
1145         if(netif_carrier_ok(netdev)) {
1146                 rc = jme_setup_rx_resources(jme);
1147                 if(rc) {
1148                         jeprintk(netdev->name,
1149                                 "Allocating resources for RX error"
1150                                 ", Device STOPPED!\n");
1151                         goto out;
1152                 }
1153
1154
1155                 rc = jme_setup_tx_resources(jme);
1156                 if(rc) {
1157                         jeprintk(netdev->name,
1158                                 "Allocating resources for TX error"
1159                                 ", Device STOPPED!\n");
1160                         goto err_out_free_rx_resources;
1161                 }
1162
1163                 jme_enable_rx_engine(jme);
1164                 jme_enable_tx_engine(jme);
1165
1166                 netif_start_queue(netdev);
1167                 jme_start_pcc_timer(jme);
1168         }
1169
1170         goto out;
1171
1172 err_out_free_rx_resources:
1173         jme_free_rx_resources(jme);
1174 out:
1175         atomic_inc(&jme->link_changing);
1176 }
1177
1178 static void
1179 jme_rx_clean_tasklet(unsigned long arg)
1180 {
1181         struct jme_adapter *jme = (struct jme_adapter*)arg;
1182         struct dynpcc_info *dpi = &(jme->dpi);
1183
1184         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1185                 goto out;
1186         
1187         if(unlikely(atomic_read(&jme->link_changing) != 1))
1188                 goto out;
1189
1190         if(unlikely(netif_queue_stopped(jme->dev)))
1191                 goto out;
1192
1193         jme_process_receive(jme, RING_DESC_NR);
1194         ++(dpi->intr_cnt);
1195
1196 out:
1197         atomic_inc(&jme->rx_cleaning);
1198 }
1199
1200 static void
1201 jme_rx_empty_tasklet(unsigned long arg)
1202 {
1203         struct jme_adapter *jme = (struct jme_adapter*)arg;
1204
1205         if(unlikely(atomic_read(&jme->link_changing) != 1))
1206                 return;
1207
1208         if(unlikely(netif_queue_stopped(jme->dev)))
1209                 return;
1210
1211         jme_rx_clean_tasklet(arg);
1212         jme_restart_rx_engine(jme);
1213 }
1214
1215 static void
1216 jme_tx_clean_tasklet(unsigned long arg)
1217 {
1218         struct jme_adapter *jme = (struct jme_adapter*)arg;
1219         struct jme_ring *txring = &(jme->txring[0]);
1220         volatile struct txdesc *txdesc = txring->desc;
1221         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1222         int i, j, cnt = 0, max, err;
1223
1224         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1225                 goto out;
1226
1227         if(unlikely(atomic_read(&jme->link_changing) != 1))
1228                 goto out;
1229
1230         if(unlikely(netif_queue_stopped(jme->dev)))
1231                 goto out;
1232
1233         max = RING_DESC_NR - atomic_read(&txring->nr_free);
1234
1235         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1236
1237         for(i = txring->next_to_clean ; cnt < max ; ) {
1238
1239                 ctxbi = txbi + i;
1240
1241                 if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
1242
1243                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1244
1245                         tx_dbg(jme->dev->name,
1246                                 "Tx Tasklet: Clean %d+%d\n",
1247                                 i, ctxbi->nr_desc);
1248
1249                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1250                                 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1251                                 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1252
1253                                 pci_unmap_single(jme->pdev,
1254                                                  ttxbi->mapping,
1255                                                  ttxbi->len,
1256                                                  PCI_DMA_TODEVICE);
1257
1258                                 if(likely(!err))
1259                                         NET_STAT(jme).tx_bytes += ttxbi->len;
1260
1261                                 ttxbi->mapping = 0;
1262                                 ttxbi->len = 0;
1263                         }
1264
1265                         dev_kfree_skb(ctxbi->skb);
1266                         ctxbi->skb = NULL;
1267
1268                         cnt += ctxbi->nr_desc;
1269
1270                         if(unlikely(err))
1271                                 ++(NET_STAT(jme).tx_carrier_errors);
1272                         else
1273                                 ++(NET_STAT(jme).tx_packets);
1274                 }
1275                 else {
1276                         if(!ctxbi->skb)
1277                                 tx_dbg(jme->dev->name,
1278                                         "Tx Tasklet:"
1279                                         " Stoped due to no skb.\n");
1280                         else
1281                                 tx_dbg(jme->dev->name,
1282                                         "Tx Tasklet:"
1283                                         "Stoped due to not done.\n");
1284                         break;
1285                 }
1286
1287                 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1288                         i -= RING_DESC_NR;
1289
1290                 ctxbi->nr_desc = 0;
1291         }
1292
1293         tx_dbg(jme->dev->name,
1294                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1295                 i, jiffies);
1296         txring->next_to_clean = i;
1297
1298         atomic_add(cnt, &txring->nr_free);
1299
1300 out:
1301         atomic_inc(&jme->tx_cleaning);
1302 }
1303
1304 static void
1305 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1306 {
1307         /*
1308          * Disable interrupt
1309          */
1310         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1311
1312         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1313                 tasklet_schedule(&jme->linkch_task);
1314                 goto out_deassert;
1315         }
1316
1317         if(intrstat & INTR_TMINTR)
1318                 tasklet_schedule(&jme->pcc_task);
1319
1320         if(intrstat & INTR_RX0EMP)
1321                 tasklet_schedule(&jme->rxempty_task);
1322
1323         if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1324                 tasklet_schedule(&jme->rxclean_task);
1325
1326         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1327                 tasklet_schedule(&jme->txclean_task);
1328
1329         if((intrstat & ~INTR_ENABLE) != 0) {
1330                 /*
1331                  * Some interrupt not handled
1332                  * but not enabled also (for debug)
1333                  */
1334         }
1335
1336 out_deassert:
1337         /*
1338          * Deassert interrupts
1339          */
1340         jwrite32f(jme, JME_IEVE, intrstat);
1341
1342         /*
1343          * Re-enable interrupt
1344          */
1345         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1346
1347
1348 }
1349
1350 static irqreturn_t
1351 jme_intr(int irq, void *dev_id)
1352 {
1353         struct net_device *netdev = dev_id;
1354         struct jme_adapter *jme = netdev_priv(netdev);
1355         irqreturn_t rc = IRQ_HANDLED;
1356         __u32 intrstat;
1357
1358         intrstat = jread32(jme, JME_IEVE);
1359
1360         /*
1361          * Check if it's really an interrupt for us
1362          */
1363         if(unlikely(intrstat == 0)) {
1364                 rc = IRQ_NONE;
1365                 goto out;
1366         }
1367
1368         /*
1369          * Check if the device still exist
1370          */
1371         if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1372                 rc = IRQ_NONE;
1373                 goto out;
1374         }
1375
1376         /*
1377          * Allow one interrupt handling at a time
1378          */
1379         if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1380                 goto out_inc;
1381
1382         jme_intr_msi(jme, intrstat);
1383
1384 out_inc:
1385         /*
1386          * Enable next interrupt handling
1387          */
1388         atomic_inc(&jme->intr_sem);
1389
1390 out:
1391         return rc;
1392 }
1393
1394 static irqreturn_t
1395 jme_msi(int irq, void *dev_id)
1396 {
1397         struct net_device *netdev = dev_id;
1398         struct jme_adapter *jme = netdev_priv(netdev);
1399         __u32 intrstat;
1400
1401         pci_dma_sync_single_for_cpu(jme->pdev,
1402                                     jme->shadow_dma,
1403                                     sizeof(__u32) * SHADOW_REG_NR,
1404                                     PCI_DMA_FROMDEVICE);
1405         intrstat = jme->shadow_regs[SHADOW_IEVE];
1406         jme->shadow_regs[SHADOW_IEVE] = 0;
1407
1408         jme_intr_msi(jme, intrstat);
1409
1410         return IRQ_HANDLED;
1411 }
1412
1413
1414 static void
1415 jme_reset_link(struct jme_adapter *jme)
1416 {
1417         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1418 }
1419
1420 static void
1421 jme_restart_an(struct jme_adapter *jme)
1422 {
1423         __u32 bmcr;
1424         unsigned long flags;
1425
1426         spin_lock_irqsave(&jme->phy_lock, flags);
1427         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1428         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1429         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1430         spin_unlock_irqrestore(&jme->phy_lock, flags);
1431 }
1432
1433 static int
1434 jme_request_irq(struct jme_adapter *jme)
1435 {
1436         int rc;
1437         struct net_device *netdev = jme->dev;
1438         irq_handler_t handler = jme_intr;
1439         int irq_flags = IRQF_SHARED;
1440
1441         if (!pci_enable_msi(jme->pdev)) {
1442                 jme->flags |= JME_FLAG_MSI;
1443                 handler = jme_msi;
1444                 irq_flags = 0;
1445         }
1446
1447         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1448                           netdev);
1449         if(rc) {
1450                 jeprintk(netdev->name,
1451                         "Unable to allocate %s interrupt (return: %d)\n",
1452                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx",
1453                         rc);
1454
1455                 if(jme->flags & JME_FLAG_MSI) {
1456                         pci_disable_msi(jme->pdev);
1457                         jme->flags &= ~JME_FLAG_MSI;
1458                 }
1459         }
1460         else {
1461                 netdev->irq = jme->pdev->irq;
1462         }
1463
1464         return rc;
1465 }
1466
1467 static void
1468 jme_free_irq(struct jme_adapter *jme)
1469 {
1470         free_irq(jme->pdev->irq, jme->dev);
1471         if (jme->flags & JME_FLAG_MSI) {
1472                 pci_disable_msi(jme->pdev);
1473                 jme->flags &= ~JME_FLAG_MSI;
1474                 jme->dev->irq = jme->pdev->irq;
1475         }
1476 }
1477
1478 static int
1479 jme_open(struct net_device *netdev)
1480 {
1481         struct jme_adapter *jme = netdev_priv(netdev);
1482         int rc, timeout = 100;
1483
1484         while(
1485                 --timeout > 0 &&
1486                 (
1487                 atomic_read(&jme->link_changing) != 1 ||
1488                 atomic_read(&jme->rx_cleaning) != 1 ||
1489                 atomic_read(&jme->tx_cleaning) != 1
1490                 )
1491         )
1492                 msleep(10);
1493
1494         if(!timeout) {
1495                 rc = -EBUSY;
1496                 goto err_out;
1497         }
1498
1499         jme_reset_mac_processor(jme);
1500
1501         rc = jme_request_irq(jme);
1502         if(rc)
1503                 goto err_out;
1504
1505         jme_enable_shadow(jme);
1506         jme_start_irq(jme);
1507         jme_restart_an(jme);
1508
1509         return 0;
1510
1511 err_out:
1512         netif_stop_queue(netdev);
1513         netif_carrier_off(netdev);
1514         return rc;
1515 }
1516
1517 static int
1518 jme_close(struct net_device *netdev)
1519 {
1520         struct jme_adapter *jme = netdev_priv(netdev);
1521
1522         netif_stop_queue(netdev);
1523         netif_carrier_off(netdev);
1524
1525         jme_stop_irq(jme);
1526         jme_disable_shadow(jme);
1527         jme_free_irq(jme);
1528
1529         tasklet_kill(&jme->linkch_task);
1530         tasklet_kill(&jme->txclean_task);
1531         tasklet_kill(&jme->rxclean_task);
1532         tasklet_kill(&jme->rxempty_task);
1533
1534         jme_reset_mac_processor(jme);
1535         jme_free_rx_resources(jme);
1536         jme_free_tx_resources(jme);
1537
1538         return 0;
1539 }
1540
1541 /*
1542  * This function is already protected by netif_tx_lock()
1543  */
1544 static int
1545 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1546 {
1547         struct jme_adapter *jme = netdev_priv(netdev);
1548         int rc;
1549
1550         if(unlikely(netif_queue_stopped(jme->dev)))
1551                 return NETDEV_TX_BUSY;
1552
1553 #if 0
1554 /*Testing*/
1555         ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n", 
1556                 skb_shinfo(skb)->nr_frags,
1557                 skb_headlen(skb),
1558                 skb->len,
1559                 skb->ip_summed);
1560 /*********/
1561 #endif
1562
1563         rc = jme_set_new_txdesc(jme, skb);
1564
1565         if(unlikely(rc != NETDEV_TX_OK))
1566                 return rc;
1567
1568         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1569                                 TXCS_SELECT_QUEUE0 |
1570                                 TXCS_QUEUE0S |
1571                                 TXCS_ENABLE);
1572         netdev->trans_start = jiffies;
1573
1574         return NETDEV_TX_OK;
1575 }
1576
1577 static int
1578 jme_set_macaddr(struct net_device *netdev, void *p)
1579 {
1580         struct jme_adapter *jme = netdev_priv(netdev);
1581         struct sockaddr *addr = p;
1582         __u32 val;
1583
1584         if(netif_running(netdev))
1585                 return -EBUSY;
1586
1587         spin_lock(&jme->macaddr_lock);
1588         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1589
1590         val = addr->sa_data[3] << 24 |
1591               addr->sa_data[2] << 16 |
1592               addr->sa_data[1] <<  8 |
1593               addr->sa_data[0];
1594         jwrite32(jme, JME_RXUMA_LO, val);
1595         val = addr->sa_data[5] << 8 |
1596               addr->sa_data[4];
1597         jwrite32(jme, JME_RXUMA_HI, val);
1598         spin_unlock(&jme->macaddr_lock);
1599
1600         return 0;
1601 }
1602
1603 static void
1604 jme_set_multi(struct net_device *netdev)
1605 {
1606         struct jme_adapter *jme = netdev_priv(netdev);
1607         u32 mc_hash[2] = {};
1608         int i;
1609         unsigned long flags;
1610
1611         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1612
1613         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1614
1615         if (netdev->flags & IFF_PROMISC) {
1616                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1617         }
1618         else if (netdev->flags & IFF_ALLMULTI) {
1619                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1620         }
1621         else if(netdev->flags & IFF_MULTICAST) {
1622                 struct dev_mc_list *mclist;
1623                 int bit_nr;
1624
1625                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1626                 for (i = 0, mclist = netdev->mc_list;
1627                         mclist && i < netdev->mc_count;
1628                         ++i, mclist = mclist->next) {
1629
1630                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1631                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1632                 }
1633
1634                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1635                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1636         }
1637
1638         wmb();
1639         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1640
1641         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1642 }
1643
1644 static int
1645 jme_change_mtu(struct net_device *netdev, int new_mtu)
1646 {
1647         struct jme_adapter *jme = netdev_priv(netdev);
1648
1649         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1650                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
1651                 return -EINVAL;
1652
1653         if(new_mtu > 4000) {
1654                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1655                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1656                 jme_restart_rx_engine(jme);
1657         }
1658         else {
1659                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1660                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1661                 jme_restart_rx_engine(jme);
1662         }
1663
1664         if(new_mtu > 1900) {
1665                 netdev->features &= ~NETIF_F_HW_CSUM;
1666         }
1667         else {
1668                 netdev->features |= NETIF_F_HW_CSUM;
1669         }
1670
1671         netdev->mtu = new_mtu;
1672         jme_reset_link(jme);
1673
1674         return 0;
1675 }
1676
1677 static void
1678 jme_tx_timeout(struct net_device *netdev)
1679 {
1680         struct jme_adapter *jme = netdev_priv(netdev);
1681
1682         /*
1683          * Reset the link
1684          * And the link change will reinitiallize all RX/TX resources
1685          */
1686         jme_restart_an(jme);
1687 }
1688
1689 static void
1690 jme_get_drvinfo(struct net_device *netdev,
1691                      struct ethtool_drvinfo *info)
1692 {
1693         struct jme_adapter *jme = netdev_priv(netdev);
1694
1695         strcpy(info->driver, DRV_NAME);
1696         strcpy(info->version, DRV_VERSION);
1697         strcpy(info->bus_info, pci_name(jme->pdev));
1698 }
1699
1700 static int
1701 jme_get_regs_len(struct net_device *netdev)
1702 {
1703         return 0x400;
1704 }
1705
1706 static void
1707 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1708 {
1709         int i;
1710
1711         for(i = 0 ; i < len ; i += 4)
1712                 p[i >> 2] = jread32(jme, reg + i);
1713
1714 }
1715
1716 static void
1717 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1718 {
1719         struct jme_adapter *jme = netdev_priv(netdev);
1720         __u32 *p32 = (__u32*)p;
1721
1722         memset(p, 0, 0x400);
1723
1724         regs->version = 1;
1725         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1726
1727         p32 += 0x100 >> 2;
1728         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1729
1730         p32 += 0x100 >> 2;
1731         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1732
1733         p32 += 0x100 >> 2;
1734         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1735
1736 }
1737
1738 static int
1739 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1740 {
1741         struct jme_adapter *jme = netdev_priv(netdev);
1742
1743         ecmd->use_adaptive_rx_coalesce = true;
1744         ecmd->tx_coalesce_usecs = PCC_TX_TO;
1745         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1746
1747         switch(jme->dpi.cur) {
1748         case PCC_P1:
1749                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1750                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1751                 break;
1752         case PCC_P2:
1753                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1754                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1755                 break;
1756         case PCC_P3:
1757                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1758                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1759                 break;
1760         default:
1761                 break;
1762         }
1763
1764         return 0;
1765 }
1766
1767 /*
1768  * It's not actually for coalesce.
1769  * It changes internell FIFO related setting for testing.
1770  */
1771 static int
1772 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1773 {
1774         struct jme_adapter *jme = netdev_priv(netdev);
1775
1776         if(ecmd->use_adaptive_rx_coalesce &&
1777         ecmd->use_adaptive_tx_coalesce &&
1778         ecmd->rx_coalesce_usecs == 250 &&
1779         (ecmd->rx_max_coalesced_frames_low == 16 ||
1780         ecmd->rx_max_coalesced_frames_low == 32 ||
1781         ecmd->rx_max_coalesced_frames_low == 64 ||
1782         ecmd->rx_max_coalesced_frames_low == 128)) {
1783                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1784                 switch(ecmd->rx_max_coalesced_frames_low) {
1785                 case 16:
1786                         jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
1787                         break;
1788                 case 32:
1789                         jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
1790                         break;
1791                 case 64:
1792                         jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1793                         break;
1794                 case 128:
1795                 default:
1796                         jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1797                 }
1798                 jme_restart_rx_engine(jme);
1799         }
1800         else {
1801                 return -EINVAL;
1802         }
1803
1804         return 0;
1805 }
1806
1807 static void
1808 jme_get_pauseparam(struct net_device *netdev,
1809                         struct ethtool_pauseparam *ecmd)
1810 {
1811         struct jme_adapter *jme = netdev_priv(netdev);
1812         unsigned long flags;
1813         __u32 val;
1814
1815         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1816         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1817
1818         spin_lock_irqsave(&jme->phy_lock, flags);
1819         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1820         spin_unlock_irqrestore(&jme->phy_lock, flags);
1821         ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1822 }
1823
1824 static int
1825 jme_set_pauseparam(struct net_device *netdev,
1826                         struct ethtool_pauseparam *ecmd)
1827 {
1828         struct jme_adapter *jme = netdev_priv(netdev);
1829         unsigned long flags;
1830         __u32 val;
1831
1832         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1833                 (ecmd->tx_pause != 0)) {
1834
1835                 if(ecmd->tx_pause)
1836                         jme->reg_txpfc |= TXPFC_PF_EN;
1837                 else
1838                         jme->reg_txpfc &= ~TXPFC_PF_EN;
1839
1840                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
1841         }
1842
1843         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1844         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
1845                 (ecmd->rx_pause != 0)) {
1846
1847                 if(ecmd->rx_pause)
1848                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
1849                 else
1850                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
1851
1852                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1853         }
1854         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1855
1856         spin_lock_irqsave(&jme->phy_lock, flags);
1857         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1858         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) != 
1859                 (ecmd->autoneg != 0)) {
1860
1861                 if(ecmd->autoneg)
1862                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1863                 else
1864                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1865
1866                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
1867         }
1868         spin_unlock_irqrestore(&jme->phy_lock, flags);
1869
1870         return 0;
1871 }
1872
1873 static int
1874 jme_get_settings(struct net_device *netdev,
1875                      struct ethtool_cmd *ecmd)
1876 {
1877         struct jme_adapter *jme = netdev_priv(netdev);
1878         int rc;
1879         unsigned long flags;
1880
1881         spin_lock_irqsave(&jme->phy_lock, flags);
1882         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1883         spin_unlock_irqrestore(&jme->phy_lock, flags);
1884         return rc;
1885 }
1886
1887 static int
1888 jme_set_settings(struct net_device *netdev,
1889                      struct ethtool_cmd *ecmd)
1890 {
1891         struct jme_adapter *jme = netdev_priv(netdev);
1892         int rc, fdc=0;
1893         unsigned long flags;
1894
1895         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
1896                 return -EINVAL;
1897
1898         if(jme->mii_if.force_media &&
1899         ecmd->autoneg != AUTONEG_ENABLE &&
1900         (jme->mii_if.full_duplex != ecmd->duplex))
1901                 fdc = 1;
1902
1903         spin_lock_irqsave(&jme->phy_lock, flags);
1904         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1905         spin_unlock_irqrestore(&jme->phy_lock, flags);
1906
1907         if(!rc && fdc)
1908                 jme_reset_link(jme);
1909
1910         return rc;
1911 }
1912
1913 static __u32
1914 jme_get_link(struct net_device *netdev)
1915 {
1916         struct jme_adapter *jme = netdev_priv(netdev);
1917         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1918 }
1919
1920 static u32
1921 jme_get_rx_csum(struct net_device *netdev)
1922 {
1923         struct jme_adapter *jme = netdev_priv(netdev);
1924
1925         return jme->reg_rxmcs & RXMCS_CHECKSUM;
1926 }
1927
1928 static int
1929 jme_set_rx_csum(struct net_device *netdev, u32 on)
1930 {
1931         struct jme_adapter *jme = netdev_priv(netdev);
1932         unsigned long flags;
1933         
1934         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1935         if(on)
1936                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
1937         else
1938                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
1939         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1940         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1941
1942         return 0;
1943 }
1944
1945 static int
1946 jme_set_tx_csum(struct net_device *netdev, u32 on)
1947 {
1948         if(on)
1949                 netdev->features |= NETIF_F_HW_CSUM;
1950         else
1951                 netdev->features &= ~NETIF_F_HW_CSUM;
1952
1953         return 0;
1954 }
1955
1956 static int
1957 jme_nway_reset(struct net_device *netdev)
1958 {
1959         struct jme_adapter *jme = netdev_priv(netdev);
1960         jme_restart_an(jme);
1961         return 0;
1962 }
1963
1964 static const struct ethtool_ops jme_ethtool_ops = {
1965         .get_drvinfo            = jme_get_drvinfo,
1966         .get_regs_len           = jme_get_regs_len,
1967         .get_regs               = jme_get_regs,
1968         .get_coalesce           = jme_get_coalesce,
1969         .set_coalesce           = jme_set_coalesce,
1970         .get_pauseparam         = jme_get_pauseparam,
1971         .set_pauseparam         = jme_set_pauseparam,
1972         .get_settings           = jme_get_settings,
1973         .set_settings           = jme_set_settings,
1974         .get_link               = jme_get_link,
1975         .get_rx_csum            = jme_get_rx_csum,
1976         .set_rx_csum            = jme_set_rx_csum,
1977         .set_tx_csum            = jme_set_tx_csum,
1978         .nway_reset             = jme_nway_reset,
1979 };
1980
1981 static int
1982 jme_pci_dma64(struct pci_dev *pdev)
1983 {
1984         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1985                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1986                         return 1;
1987
1988         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
1989                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
1990                         return 1;
1991
1992         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1993                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1994                         return 0;
1995
1996         return -1;
1997 }
1998
1999 static int __devinit
2000 jme_init_one(struct pci_dev *pdev,
2001              const struct pci_device_id *ent)
2002 {
2003         int rc = 0, using_dac;
2004         struct net_device *netdev;
2005         struct jme_adapter *jme;
2006
2007         /*
2008          * set up PCI device basics
2009          */
2010         rc = pci_enable_device(pdev);
2011         if(rc) {
2012                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2013                 goto err_out;
2014         }
2015
2016         using_dac = jme_pci_dma64(pdev);
2017         if(using_dac < 0) {
2018                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2019                 rc = -EIO;
2020                 goto err_out_disable_pdev;
2021         }
2022
2023         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2024                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2025                 rc = -ENOMEM;
2026                 goto err_out_disable_pdev;
2027         }
2028
2029         rc = pci_request_regions(pdev, DRV_NAME);
2030         if(rc) {
2031                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2032                 goto err_out_disable_pdev;
2033         }
2034
2035         pci_set_master(pdev);
2036
2037         /*
2038          * alloc and init net device
2039          */
2040         netdev = alloc_etherdev(sizeof(*jme));
2041         if(!netdev) {
2042                 rc = -ENOMEM;
2043                 goto err_out_release_regions;
2044         }
2045         netdev->open                    = jme_open;
2046         netdev->stop                    = jme_close;
2047         netdev->hard_start_xmit         = jme_start_xmit;
2048         netdev->set_mac_address         = jme_set_macaddr;
2049         netdev->set_multicast_list      = jme_set_multi;
2050         netdev->change_mtu              = jme_change_mtu;
2051         netdev->ethtool_ops             = &jme_ethtool_ops;
2052         netdev->tx_timeout              = jme_tx_timeout;
2053         netdev->watchdog_timeo          = TX_TIMEOUT;
2054         NETDEV_GET_STATS(netdev, &jme_get_stats);
2055         netdev->features                =       NETIF_F_HW_CSUM;
2056         if(using_dac)
2057                 netdev->features        |=      NETIF_F_HIGHDMA;
2058
2059         SET_NETDEV_DEV(netdev, &pdev->dev);
2060         pci_set_drvdata(pdev, netdev);
2061
2062         /*
2063          * init adapter info
2064          */
2065         jme = netdev_priv(netdev);
2066         jme->pdev = pdev;
2067         jme->dev = netdev;
2068         jme->oldmtu = netdev->mtu = 1500;
2069         jme->phylink = 0;
2070         jme->regs = ioremap(pci_resource_start(pdev, 0),
2071                              pci_resource_len(pdev, 0));
2072         if (!(jme->regs)) {
2073                 rc = -ENOMEM;
2074                 goto err_out_free_netdev;
2075         }
2076         jme->shadow_regs = pci_alloc_consistent(pdev,
2077                                                 sizeof(__u32) * SHADOW_REG_NR,
2078                                                 &(jme->shadow_dma));
2079         if (!(jme->shadow_regs)) {
2080                 rc = -ENOMEM;
2081                 goto err_out_unmap;
2082         }
2083
2084         spin_lock_init(&jme->phy_lock);
2085         spin_lock_init(&jme->macaddr_lock);
2086         spin_lock_init(&jme->rxmcs_lock);
2087
2088         atomic_set(&jme->intr_sem, 1);
2089         atomic_set(&jme->link_changing, 1);
2090         atomic_set(&jme->rx_cleaning, 1);
2091         atomic_set(&jme->tx_cleaning, 1);
2092
2093         tasklet_init(&jme->pcc_task,
2094                      &jme_pcc_tasklet,
2095                      (unsigned long) jme);
2096         tasklet_init(&jme->linkch_task,
2097                      &jme_link_change_tasklet,
2098                      (unsigned long) jme);
2099         tasklet_init(&jme->txclean_task,
2100                      &jme_tx_clean_tasklet,
2101                      (unsigned long) jme);
2102         tasklet_init(&jme->rxclean_task,
2103                      &jme_rx_clean_tasklet,
2104                      (unsigned long) jme);
2105         tasklet_init(&jme->rxempty_task,
2106                      &jme_rx_empty_tasklet,
2107                      (unsigned long) jme);
2108         jme->mii_if.dev = netdev;
2109         jme->mii_if.phy_id = 1;
2110         jme->mii_if.supports_gmii = 1;
2111         jme->mii_if.mdio_read = jme_mdio_read;
2112         jme->mii_if.mdio_write = jme_mdio_write;
2113
2114         jme->dpi.cur = PCC_P1;
2115
2116         jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2117         jme->reg_rxcs = RXCS_DEFAULT;
2118         jme->reg_rxmcs = RXMCS_DEFAULT;
2119         jme->reg_txpfc = 0;
2120         /*
2121          * Get Max Read Req Size from PCI Config Space
2122          */
2123         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2124         switch(jme->mrrs) {
2125                 case MRRS_128B:
2126                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2127                         break;
2128                 case MRRS_256B:
2129                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2130                         break;
2131                 default:
2132                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2133                         break;
2134         };
2135
2136
2137         /*
2138          * Reset MAC processor and reload EEPROM for MAC Address
2139          */
2140         jme_clear_pm(jme);
2141         jme_reset_phy_processor(jme);
2142         jme_reset_mac_processor(jme);
2143         rc = jme_reload_eeprom(jme);
2144         if(rc) {
2145                 printk(KERN_ERR PFX
2146                         "Rload eeprom for reading MAC Address error.\n");
2147                 goto err_out_free_shadow;
2148         }
2149         jme_load_macaddr(netdev);
2150
2151
2152         /*
2153          * Tell stack that we are not ready to work until open()
2154          */
2155         netif_carrier_off(netdev);
2156         netif_stop_queue(netdev);
2157
2158         /*
2159          * Register netdev
2160          */
2161         rc = register_netdev(netdev);
2162         if(rc) {
2163                 printk(KERN_ERR PFX "Cannot register net device.\n");
2164                 goto err_out_free_shadow;
2165         }
2166
2167         jprintk(netdev->name,
2168                 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2169                 netdev->dev_addr[0],
2170                 netdev->dev_addr[1],
2171                 netdev->dev_addr[2],
2172                 netdev->dev_addr[3],
2173                 netdev->dev_addr[4],
2174                 netdev->dev_addr[5]);
2175
2176         return 0;
2177
2178 err_out_free_shadow:
2179         pci_free_consistent(pdev,
2180                             sizeof(__u32) * SHADOW_REG_NR,
2181                             jme->shadow_regs,
2182                             jme->shadow_dma);
2183 err_out_unmap:
2184         iounmap(jme->regs);
2185 err_out_free_netdev:
2186         pci_set_drvdata(pdev, NULL);
2187         free_netdev(netdev);
2188 err_out_release_regions:
2189         pci_release_regions(pdev);
2190 err_out_disable_pdev:
2191         pci_disable_device(pdev);
2192 err_out:
2193         return rc;
2194 }
2195
2196 static void __devexit
2197 jme_remove_one(struct pci_dev *pdev)
2198 {
2199         struct net_device *netdev = pci_get_drvdata(pdev);
2200         struct jme_adapter *jme = netdev_priv(netdev);
2201
2202         unregister_netdev(netdev);
2203         pci_free_consistent(pdev,
2204                             sizeof(__u32) * SHADOW_REG_NR,
2205                             jme->shadow_regs,
2206                             jme->shadow_dma);
2207         iounmap(jme->regs);
2208         pci_set_drvdata(pdev, NULL);
2209         free_netdev(netdev);
2210         pci_release_regions(pdev);
2211         pci_disable_device(pdev);
2212
2213 }
2214
2215 static struct pci_device_id jme_pci_tbl[] = {
2216         { PCI_VDEVICE(JMICRON, 0x250) },
2217         { }
2218 };
2219
2220 static struct pci_driver jme_driver = {
2221         .name           = DRV_NAME,
2222         .id_table       = jme_pci_tbl,
2223         .probe          = jme_init_one,
2224         .remove         = __devexit_p(jme_remove_one),
2225 #if 0
2226 #ifdef CONFIG_PM
2227         .suspend        = jme_suspend,
2228         .resume         = jme_resume,
2229 #endif /* CONFIG_PM */
2230 #endif
2231 };
2232
2233 static int __init
2234 jme_init_module(void)
2235 {
2236         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2237                "driver version %s\n", DRV_VERSION);
2238         return pci_register_driver(&jme_driver);
2239 }
2240
2241 static void __exit
2242 jme_cleanup_module(void)
2243 {
2244         pci_unregister_driver(&jme_driver);
2245 }
2246
2247 module_init(jme_init_module);
2248 module_exit(jme_cleanup_module);
2249
2250 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2251 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2252 MODULE_LICENSE("GPL");
2253 MODULE_VERSION(DRV_VERSION);
2254 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2255