]> bbs.cooldavid.org Git - jme.git/blob - jme.c
Import jme 0.5 source
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * Timeline before release:
26  *      Stage 4: Basic feature support.
27  *      -  Implement scatter-gather offloading.
28  *         Use pci_map_page on scattered sk_buff for HIGHMEM support
29  *      -  Implement Power Managemt related functions.
30  *      -  Implement Jumboframe.
31  *      -  Implement MSI.
32  *
33  *      Stage 5: Advanced offloading support.
34  *      -  Implement VLAN offloading.
35  *      -  Implement TCP Segement offloading.
36  *
37  *      Stage 6: CPU Load balancing.
38  *      -  Implement MSI-X.
39  *         Along with multiple RX queue, for CPU load balancing.
40  *
41  *      Stage 7:
42  *      -  Cleanup/re-orginize code, performence tuneing(alignment etc...).
43  *      -  Test and Release 1.0
44  *
45  *      Non-Critical:
46  *      -  Use NAPI instead of rx_tasklet?
47  *              PCC Support Both Packet Counter and Timeout Interrupt for
48  *              receive and transmit complete, does NAPI really needed?
49  *      -  Decode register dump for ethtool.
50  */
51
52 #include <linux/version.h>
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/pci.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/ethtool.h>
59 #include <linux/mii.h>
60 #include <linux/crc32.h>
61 #include <linux/delay.h>
62 #include <linux/in.h>
63 #include <linux/ip.h>
64 #include "jme.h"
65
66 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
67 static struct net_device_stats *
68 jme_get_stats(struct net_device *netdev)
69 {
70         struct jme_adapter *jme = netdev_priv(netdev);
71         return &jme->stats;
72 }
73 #endif
74
75 static int
76 jme_mdio_read(struct net_device *netdev, int phy, int reg)
77 {
78         struct jme_adapter *jme = netdev_priv(netdev);
79         int i, val;
80
81         jwrite32(jme, JME_SMI, SMI_OP_REQ |
82                                 smi_phy_addr(phy) |
83                                 smi_reg_addr(reg));
84
85         wmb();
86         for (i = JME_PHY_TIMEOUT; i > 0; --i) {
87                 udelay(1);
88                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
89                         break;
90         }
91
92         if (i == 0) {
93                 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
94                 return 0;
95         }
96
97         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
98 }
99
100 static void
101 jme_mdio_write(struct net_device *netdev,
102                                 int phy, int reg, int val)
103 {
104         struct jme_adapter *jme = netdev_priv(netdev);
105         int i;
106
107         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
108                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
109                 smi_phy_addr(phy) | smi_reg_addr(reg));
110
111         wmb();
112         for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
113                 udelay(1);
114                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
115                         break;
116         }
117
118         if (i == 0)
119                 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
120
121         return;
122 }
123
124 __always_inline static void
125 jme_reset_phy_processor(struct jme_adapter *jme)
126 {
127         __u32 val;
128
129         jme_mdio_write(jme->dev,
130                         jme->mii_if.phy_id,
131                         MII_ADVERTISE, ADVERTISE_ALL |
132                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
133
134         jme_mdio_write(jme->dev,
135                         jme->mii_if.phy_id,
136                         MII_CTRL1000,
137                         ADVERTISE_1000FULL | ADVERTISE_1000HALF);
138
139         val = jme_mdio_read(jme->dev,
140                                 jme->mii_if.phy_id,
141                                 MII_BMCR);
142
143         jme_mdio_write(jme->dev,
144                         jme->mii_if.phy_id,
145                         MII_BMCR, val | BMCR_RESET);
146
147         return;
148 }
149
150
151 __always_inline static void
152 jme_reset_mac_processor(struct jme_adapter *jme)
153 {
154         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
155         udelay(2);
156         jwrite32(jme, JME_GHC, jme->reg_ghc);
157         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
158         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
159         jwrite32(jme, JME_WFODP, 0);
160         jwrite32(jme, JME_WFOI, 0);
161         jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
162         jwrite32(jme, JME_GPREG1, 0);
163 }
164
165 __always_inline static void
166 jme_clear_pm(struct jme_adapter *jme)
167 {
168         jwrite32(jme, JME_PMCS, 0xFFFF0000);
169         pci_set_power_state(jme->pdev, PCI_D0);
170 }
171
172 static int
173 jme_reload_eeprom(struct jme_adapter *jme)
174 {
175         __u32 val;
176         int i;
177
178         val = jread32(jme, JME_SMBCSR);
179
180         if(val & SMBCSR_EEPROMD)
181         {
182                 val |= SMBCSR_CNACK;
183                 jwrite32(jme, JME_SMBCSR, val);
184                 val |= SMBCSR_RELOAD;
185                 jwrite32(jme, JME_SMBCSR, val);
186                 mdelay(12);
187
188                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
189                 {
190                         mdelay(1);
191                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
192                                 break;
193                 }
194
195                 if(i == 0) {
196                         jeprintk(jme->dev->name, "eeprom reload timeout\n");
197                         return -EIO;
198                 }
199         }
200         else
201                 return -EIO;
202
203         return 0;
204 }
205
206 static void
207 jme_load_macaddr(struct net_device *netdev)
208 {
209         struct jme_adapter *jme = netdev_priv(netdev);
210         unsigned char macaddr[6];
211         __u32 val;
212
213         spin_lock(&jme->macaddr_lock);
214         val = jread32(jme, JME_RXUMA_LO);
215         macaddr[0] = (val >>  0) & 0xFF;
216         macaddr[1] = (val >>  8) & 0xFF;
217         macaddr[2] = (val >> 16) & 0xFF;
218         macaddr[3] = (val >> 24) & 0xFF;
219         val = jread32(jme, JME_RXUMA_HI);
220         macaddr[4] = (val >>  0) & 0xFF;
221         macaddr[5] = (val >>  8) & 0xFF;
222         memcpy(netdev->dev_addr, macaddr, 6);
223         spin_unlock(&jme->macaddr_lock);
224 }
225
226 __always_inline static void
227 jme_set_rx_pcc(struct jme_adapter *jme, int p)
228 {
229         switch(p) {
230         case PCC_P1:
231                 jwrite32(jme, JME_PCCRX0,
232                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
233                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
234                 break;
235         case PCC_P2:
236                 jwrite32(jme, JME_PCCRX0,
237                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
238                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
239                 break;
240         case PCC_P3:
241                 jwrite32(jme, JME_PCCRX0,
242                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
243                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
244                 break;
245         default:
246                 break;
247         }
248
249         dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
250 }
251
252 static void
253 jme_start_irq(struct jme_adapter *jme)
254 {
255         register struct dynpcc_info *dpi = &(jme->dpi);
256
257         jme_set_rx_pcc(jme, PCC_P1);
258
259         dpi->check_point        = jiffies + PCC_INTERVAL;
260         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
261         dpi->last_pkts          = NET_STAT(jme).rx_packets;
262         dpi->cur                = PCC_P1;
263         dpi->attempt            = PCC_P1;
264         dpi->cnt                = 0;
265
266         jwrite32(jme, JME_PCCTX,
267                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
268                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
269                         PCCTXQ0_EN
270                 );
271
272         /*
273          * Enable Interrupts
274          */
275         jwrite32(jme, JME_IENS, INTR_ENABLE);
276 }
277
278 __always_inline static void
279 jme_stop_irq(struct jme_adapter *jme)
280 {
281         /*
282          * Disable Interrupts
283          */
284         jwrite32(jme, JME_IENC, INTR_ENABLE);
285 }
286
287
288 __always_inline static void
289 jme_enable_shadow(struct jme_adapter *jme)
290 {
291         jwrite32(jme,
292                  JME_SHBA_LO,
293                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
294 }
295
296 __always_inline static void
297 jme_disable_shadow(struct jme_adapter *jme)
298 {
299         jwrite32(jme, JME_SHBA_LO, 0x0);
300 }
301
302 static int
303 jme_check_link(struct net_device *netdev, int testonly)
304 {
305         struct jme_adapter *jme = netdev_priv(netdev);
306         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
307         char linkmsg[32];
308         int rc = 0;
309
310         phylink = jread32(jme, JME_PHY_LINK);
311
312         if (phylink & PHY_LINK_UP) {
313                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
314                         /*
315                          * If we did not enable AN
316                          * Speed/Duplex Info should be obtained from SMI
317                          */
318                         phylink = PHY_LINK_UP;
319
320                         bmcr = jme_mdio_read(jme->dev,
321                                                 jme->mii_if.phy_id,
322                                                 MII_BMCR);
323
324                         phylink |= ((bmcr & BMCR_SPEED1000) &&
325                                         (bmcr & BMCR_SPEED100) == 0) ?
326                                         PHY_LINK_SPEED_1000M :
327                                         (bmcr & BMCR_SPEED100) ?
328                                         PHY_LINK_SPEED_100M :
329                                         PHY_LINK_SPEED_10M;
330
331                         phylink |= (bmcr & BMCR_FULLDPLX) ?
332                                          PHY_LINK_DUPLEX : 0;
333                 }
334                 else {
335                         /*
336                          * Keep polling for speed/duplex resolve complete
337                          */
338                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
339                                 --cnt) {
340
341                                 udelay(1);
342                                 phylink = jread32(jme, JME_PHY_LINK);
343
344                         }
345
346                         if(!cnt)
347                                 jeprintk(netdev->name,
348                                         "Waiting speed resolve timeout.\n");
349                 }
350
351                 if(jme->phylink == phylink) {
352                         rc = 1;
353                         goto out;
354                 }
355                 if(testonly)
356                         goto out;
357
358                 jme->phylink = phylink;
359
360                 switch(phylink & PHY_LINK_SPEED_MASK) {
361                         case PHY_LINK_SPEED_10M:
362                                 ghc = GHC_SPEED_10M;
363                                 strcpy(linkmsg, "10 Mbps, ");
364                                 break;
365                         case PHY_LINK_SPEED_100M:
366                                 ghc = GHC_SPEED_100M;
367                                 strcpy(linkmsg, "100 Mbps, ");
368                                 break;
369                         case PHY_LINK_SPEED_1000M:
370                                 ghc = GHC_SPEED_1000M;
371                                 strcpy(linkmsg, "1000 Mbps, ");
372                                 break;
373                         default:
374                                 ghc = 0;
375                                 break;
376                 }
377                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
378
379                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
380                                         "Full-Duplex, " :
381                                         "Half-Duplex, ");
382
383                 if(phylink & PHY_LINK_MDI_STAT)
384                         strcat(linkmsg, "MDI-X");
385                 else
386                         strcat(linkmsg, "MDI");
387
388                 if(phylink & PHY_LINK_DUPLEX)
389                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
390                 else {
391                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
392                                                 TXMCS_BACKOFF |
393                                                 TXMCS_CARRIERSENSE |
394                                                 TXMCS_COLLISION);
395                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
396                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
397                                 TXTRHD_TXREN |
398                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
399                 }
400
401                 jme->reg_ghc = ghc;
402                 jwrite32(jme, JME_GHC, ghc);
403
404                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
405                 netif_carrier_on(netdev);
406         }
407         else {
408                 if(testonly)
409                         goto out;
410
411                 jprintk(netdev->name, "Link is down.\n");
412                 jme->phylink = 0;
413                 netif_carrier_off(netdev);
414         }
415
416 out:
417         return rc;
418 }
419
420
421 static int
422 jme_alloc_txdesc(struct jme_adapter *jme,
423                         int nr_alloc)
424 {
425         struct jme_ring *txring = jme->txring;
426         int idx;
427
428         idx = txring->next_to_use;
429
430         if(unlikely(txring->nr_free < nr_alloc))
431                 return -1;
432
433         spin_lock(&jme->tx_lock);
434         txring->nr_free -= nr_alloc;
435
436         if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
437                 txring->next_to_use -= RING_DESC_NR;
438         spin_unlock(&jme->tx_lock);
439
440         return idx;
441 }
442
443 static int
444 jme_set_new_txdesc(struct jme_adapter *jme,
445                         struct sk_buff *skb)
446 {
447         struct jme_ring *txring = jme->txring;
448         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
449         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
450         dma_addr_t dmaaddr;
451         int i, idx, nr_desc;
452         __u8 flags;
453
454         nr_desc = 2;
455         idx = jme_alloc_txdesc(jme, nr_desc);
456
457         if(unlikely(idx<0))
458                 return NETDEV_TX_BUSY;
459
460         for(i = 1 ; i < nr_desc  ; ++i) {
461                 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
462                 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
463
464                 dmaaddr = pci_map_single(jme->pdev,
465                                          skb->data,
466                                          skb->len,
467                                          PCI_DMA_TODEVICE);
468
469                 pci_dma_sync_single_for_device(jme->pdev,
470                                                dmaaddr,
471                                                skb->len,
472                                                PCI_DMA_TODEVICE);
473
474                 ctxdesc->dw[0] = 0;
475                 ctxdesc->dw[1] = 0;
476                 ctxdesc->desc2.flags    = TXFLAG_OWN;
477                 if(jme->dev->features & NETIF_F_HIGHDMA)
478                         ctxdesc->desc2.flags |= TXFLAG_64BIT;
479                 ctxdesc->desc2.datalen  = cpu_to_le16(skb->len);
480                 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
481                 ctxdesc->desc2.bufaddrl = cpu_to_le32(
482                                                 (__u64)dmaaddr & 0xFFFFFFFFUL);
483
484                 ctxbi->mapping = dmaaddr;
485                 ctxbi->len = skb->len;
486         }
487
488         ctxdesc = txdesc + idx;
489         ctxbi = txbi + idx;
490
491         ctxdesc->dw[0] = 0;
492         ctxdesc->dw[1] = 0;
493         ctxdesc->dw[2] = 0;
494         ctxdesc->dw[3] = 0;
495         ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
496         /*
497          * Set OWN bit at final.
498          * When kernel transmit faster than NIC.
499          * And NIC trying to send this descriptor before we tell
500          * it to start sending this TX queue.
501          * Other fields are already filled correctly.
502          */
503         wmb();
504         flags = TXFLAG_OWN | TXFLAG_INT; 
505         if(skb->ip_summed == CHECKSUM_PARTIAL) {
506                 //flags |= TXFLAG_IPCS;
507
508                 switch(ip_hdr(skb)->protocol) {
509                 case IPPROTO_TCP:
510                         flags |= TXFLAG_TCPCS;
511                         break;
512                 case IPPROTO_UDP:
513                         flags |= TXFLAG_UDPCS;
514                         break;
515                 default:
516                         break;
517                 }
518         }
519         ctxdesc->desc1.flags = flags;
520         /*
521          * Set tx buffer info after telling NIC to send
522          * For better tx_clean timing
523          */
524         wmb();
525         ctxbi->nr_desc = nr_desc;
526         ctxbi->skb = skb;
527
528         tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
529
530         return 0;
531 }
532
533
534 static int
535 jme_setup_tx_resources(struct jme_adapter *jme)
536 {
537         struct jme_ring *txring = &(jme->txring[0]);
538
539         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
540                                            TX_RING_ALLOC_SIZE,
541                                            &(txring->dmaalloc),
542                                            GFP_ATOMIC);
543
544         if(!txring->alloc) {
545                 txring->desc = NULL;
546                 txring->dmaalloc = 0;
547                 txring->dma = 0;
548                 return -ENOMEM;
549         }
550
551         /*
552          * 16 Bytes align
553          */
554         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
555                                                 RING_DESC_ALIGN);
556         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
557         txring->next_to_use     = 0;
558         txring->next_to_clean   = 0;
559         txring->nr_free         = RING_DESC_NR;
560
561         /*
562          * Initiallize Transmit Descriptors
563          */
564         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
565         memset(txring->bufinf, 0,
566                 sizeof(struct jme_buffer_info) * RING_DESC_NR);
567
568         return 0;
569 }
570
571 static void
572 jme_free_tx_resources(struct jme_adapter *jme)
573 {
574         int i;
575         struct jme_ring *txring = &(jme->txring[0]);
576         struct jme_buffer_info *txbi = txring->bufinf;
577
578         if(txring->alloc) {
579                 for(i = 0 ; i < RING_DESC_NR ; ++i) {
580                         txbi = txring->bufinf + i;
581                         if(txbi->skb) {
582                                 dev_kfree_skb(txbi->skb);
583                                 txbi->skb = NULL;
584                         }
585                         txbi->mapping   = 0;
586                         txbi->len       = 0;
587                         txbi->nr_desc   = 0;
588                 }
589
590                 dma_free_coherent(&(jme->pdev->dev),
591                                   TX_RING_ALLOC_SIZE,
592                                   txring->alloc,
593                                   txring->dmaalloc);
594
595                 txring->alloc           = NULL;
596                 txring->desc            = NULL;
597                 txring->dmaalloc        = 0;
598                 txring->dma             = 0;
599         }
600         txring->next_to_use     = 0;
601         txring->next_to_clean   = 0;
602         txring->nr_free         = 0;
603
604 }
605
606 __always_inline static void
607 jme_enable_tx_engine(struct jme_adapter *jme)
608 {
609         /*
610          * Select Queue 0
611          */
612         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
613
614         /*
615          * Setup TX Queue 0 DMA Bass Address
616          */
617         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
618         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
619         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
620
621         /*
622          * Setup TX Descptor Count
623          */
624         jwrite32(jme, JME_TXQDC, RING_DESC_NR);
625
626         /*
627          * Enable TX Engine
628          */
629         wmb();
630         jwrite32(jme, JME_TXCS, jme->reg_txcs |
631                                 TXCS_SELECT_QUEUE0 |
632                                 TXCS_ENABLE);
633
634 }
635
636 __always_inline static void
637 jme_disable_tx_engine(struct jme_adapter *jme)
638 {
639         int i;
640         __u32 val;
641
642         /*
643          * Disable TX Engine
644          */
645         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
646
647         val = jread32(jme, JME_TXCS);
648         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
649         {
650                 mdelay(1);
651                 val = jread32(jme, JME_TXCS);
652         }
653
654         if(!i) {
655                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
656                 jme_reset_mac_processor(jme);
657         }
658
659
660 }
661
662 static void
663 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
664 {
665         struct jme_ring *rxring = jme->rxring;
666         register volatile struct rxdesc* rxdesc = rxring->desc;
667         struct jme_buffer_info *rxbi = rxring->bufinf;
668         rxdesc += i;
669         rxbi += i;
670
671         rxdesc->dw[0] = 0;
672         rxdesc->dw[1] = 0;
673         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
674         rxdesc->desc1.bufaddrl  = cpu_to_le32(
675                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
676         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
677         if(jme->dev->features & NETIF_F_HIGHDMA)
678                 rxdesc->desc1.flags = RXFLAG_64BIT;
679         wmb();
680         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
681 }
682
683 static int
684 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
685 {
686         struct jme_ring *rxring = &(jme->rxring[0]);
687         struct jme_buffer_info *rxbi = rxring->bufinf;
688         unsigned long offset;
689         struct sk_buff* skb;
690
691         skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
692         if(unlikely(!skb))
693                 return -ENOMEM;
694
695         if(unlikely(skb_is_nonlinear(skb))) {
696                 dprintk(jme->dev->name,
697                         "Allocated skb fragged(%d).\n",
698                         skb_shinfo(skb)->nr_frags);
699                 dev_kfree_skb(skb);
700                 return -ENOMEM;
701         }
702
703         if(unlikely(offset =
704                         (unsigned long)(skb->data)
705                         & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
706                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
707
708         rxbi += i;
709         rxbi->skb = skb;
710         rxbi->len = skb_tailroom(skb);
711         rxbi->mapping = pci_map_single(jme->pdev,
712                                        skb->data,
713                                        rxbi->len,
714                                        PCI_DMA_FROMDEVICE);
715
716         return 0;
717 }
718
719 static void
720 jme_free_rx_buf(struct jme_adapter *jme, int i)
721 {
722         struct jme_ring *rxring = &(jme->rxring[0]);
723         struct jme_buffer_info *rxbi = rxring->bufinf;
724         rxbi += i;
725
726         if(rxbi->skb) {
727                 pci_unmap_single(jme->pdev,
728                                  rxbi->mapping,
729                                  rxbi->len,
730                                  PCI_DMA_FROMDEVICE);
731                 dev_kfree_skb(rxbi->skb);
732                 rxbi->skb = NULL;
733                 rxbi->mapping = 0;
734                 rxbi->len = 0;
735         }
736 }
737
738 static void
739 jme_free_rx_resources(struct jme_adapter *jme)
740 {
741         int i;
742         struct jme_ring *rxring = &(jme->rxring[0]);
743
744         if(rxring->alloc) {
745                 for(i = 0 ; i < RING_DESC_NR ; ++i)
746                         jme_free_rx_buf(jme, i);
747
748                 dma_free_coherent(&(jme->pdev->dev),
749                                   RX_RING_ALLOC_SIZE,
750                                   rxring->alloc,
751                                   rxring->dmaalloc);
752                 rxring->alloc    = NULL;
753                 rxring->desc     = NULL;
754                 rxring->dmaalloc = 0;
755                 rxring->dma      = 0;
756         }
757         rxring->next_to_use   = 0;
758         rxring->next_to_clean = 0;
759 }
760
761 static int
762 jme_setup_rx_resources(struct jme_adapter *jme)
763 {
764         int i;
765         struct jme_ring *rxring = &(jme->rxring[0]);
766
767         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
768                                            RX_RING_ALLOC_SIZE,
769                                            &(rxring->dmaalloc),
770                                            GFP_ATOMIC);
771         if(!rxring->alloc) {
772                 rxring->desc = NULL;
773                 rxring->dmaalloc = 0;
774                 rxring->dma = 0;
775                 return -ENOMEM;
776         }
777
778         /*
779          * 16 Bytes align
780          */
781         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
782                                                 RING_DESC_ALIGN);
783         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
784         rxring->next_to_use     = 0;
785         rxring->next_to_clean   = 0;
786
787         /*
788          * Initiallize Receive Descriptors
789          */
790         for(i = 0 ; i < RING_DESC_NR ; ++i) {
791                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
792                         jme_free_rx_resources(jme);
793                         return -ENOMEM;
794                 }
795
796                 jme_set_clean_rxdesc(jme, i);
797         }
798
799         return 0;
800 }
801
802 __always_inline static void
803 jme_enable_rx_engine(struct jme_adapter *jme)
804 {
805         /*
806          * Setup RX DMA Bass Address
807          */
808         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
809         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
810         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
811
812         /*
813          * Setup RX Descptor Count
814          */
815         jwrite32(jme, JME_RXQDC, RING_DESC_NR);
816
817         /*
818          * Setup Unicast Filter
819          */
820         jme_set_multi(jme->dev);
821
822         /*
823          * Enable RX Engine
824          */
825         wmb();
826         jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
827                                 RXCS_QUEUESEL_Q0 |
828                                 RXCS_ENABLE |
829                                 RXCS_QST);
830 }
831
832 __always_inline static void
833 jme_restart_rx_engine(struct jme_adapter *jme)
834 {
835         /*
836          * Start RX Engine
837          */
838         jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
839                                 RXCS_QUEUESEL_Q0 |
840                                 RXCS_ENABLE |
841                                 RXCS_QST);
842 }
843
844
845 __always_inline static void
846 jme_disable_rx_engine(struct jme_adapter *jme)
847 {
848         int i;
849         __u32 val;
850
851         /*
852          * Disable RX Engine
853          */
854         val = jread32(jme, JME_RXCS);
855         val &= ~RXCS_ENABLE;
856         jwrite32(jme, JME_RXCS, val);
857
858         val = jread32(jme, JME_RXCS);
859         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
860         {
861                 mdelay(100);
862                 val = jread32(jme, JME_RXCS);
863         }
864
865         if(!i)
866                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
867
868 }
869
870 static void
871 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
872 {
873         if(dpi->attempt == atmp) {
874                 ++(dpi->cnt);
875         }
876         else {
877                 dpi->attempt = atmp;
878                 dpi->cnt = 0;
879         }
880 }
881
882 static void
883 jme_dynamic_pcc(struct jme_adapter *jme)
884 {
885         register struct dynpcc_info *dpi = &(jme->dpi);
886
887         if(jiffies >= dpi->check_point) {
888                 if(jiffies > (dpi->check_point + PCC_INTERVAL))
889                         jme_attempt_pcc(dpi, PCC_P1);
890                 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
891                                                         PCC_P3_THRESHOLD)
892                         jme_attempt_pcc(dpi, PCC_P3);
893                 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
894                                                         PCC_P2_THRESHOLD)
895                         jme_attempt_pcc(dpi, PCC_P2);
896                 else
897                         jme_attempt_pcc(dpi, PCC_P1);
898
899                 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
900                         jme_set_rx_pcc(jme, dpi->attempt);
901                         dpi->cur = dpi->attempt;
902                         dpi->cnt = 0;
903                 }
904
905                 dpi->last_bytes = NET_STAT(jme).rx_bytes;
906                 dpi->last_pkts  = NET_STAT(jme).rx_packets;
907                 dpi->check_point = jiffies + PCC_INTERVAL;
908         }
909 }
910
911 static void
912 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
913 {
914         struct jme_ring *rxring = &(jme->rxring[0]);
915         volatile struct rxdesc *rxdesc = rxring->desc;
916         struct jme_buffer_info *rxbi = rxring->bufinf;
917         struct sk_buff *skb;
918         int framesize;
919
920         rxdesc += idx;
921         rxbi += idx;
922
923         skb = rxbi->skb;
924         pci_dma_sync_single_for_cpu(jme->pdev,
925                                         rxbi->mapping,
926                                         rxbi->len,
927                                         PCI_DMA_FROMDEVICE);
928
929         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
930                 pci_dma_sync_single_for_device(jme->pdev,
931                                                 rxbi->mapping,
932                                                 rxbi->len,
933                                                 PCI_DMA_FROMDEVICE);
934
935                 ++(NET_STAT(jme).rx_dropped);
936         }
937         else {
938                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
939                                 - RX_PREPAD_SIZE;
940
941                 skb_reserve(skb, RX_PREPAD_SIZE);
942                 skb_put(skb, framesize);
943                 skb->protocol = eth_type_trans(skb, jme->dev);
944
945                 if(jme->reg_rxmcs & RXMCS_CHECKSUM)
946                         skb->ip_summed = CHECKSUM_UNNECESSARY;
947
948                 netif_rx(skb);
949
950                 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
951                         ++(NET_STAT(jme).multicast);
952
953                 jme->dev->last_rx = jiffies;
954                 NET_STAT(jme).rx_bytes += framesize;
955                 ++(NET_STAT(jme).rx_packets);
956         }
957
958         jme_set_clean_rxdesc(jme, idx);
959
960         jme_dynamic_pcc(jme);
961
962 }
963
964 static int
965 jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
966 {
967         if(jme->reg_rxmcs & RXMCS_CHECKSUM) {
968                 return  ((flags & RXWBFLAG_IPV4) && 
969                                 !(flags & RXWBFLAG_IPCS)) ||
970                         ((flags & RXWBFLAG_IPV6) && 
971                                 !(flags & RXWBFLAG_IPCS)) ||
972                         ((flags & RXWBFLAG_TCPON) && 
973                                 !(flags & RXWBFLAG_TCPCS)) ||
974                         ((flags & RXWBFLAG_UDPON) && 
975                                 !(flags & RXWBFLAG_UDPCS));
976         }
977         else {
978                 return 0;
979         }
980 }
981
982 static int
983 jme_process_receive(struct jme_adapter *jme, int limit)
984 {
985         struct jme_ring *rxring = &(jme->rxring[0]);
986         volatile struct rxdesc *rxdesc = rxring->desc;
987         int i, j, ccnt, desccnt;
988
989         i = rxring->next_to_clean;
990         while( limit-- > 0 )
991         {
992                 rxdesc = rxring->desc;
993                 rxdesc += i;
994
995                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
996                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
997                         goto out;
998
999                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1000
1001                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
1002
1003                 if(unlikely(desccnt > 1 ||
1004                 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
1005                 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
1006
1007                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
1008                                 ++(NET_STAT(jme).rx_crc_errors);
1009                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
1010                                 ++(NET_STAT(jme).rx_fifo_errors);
1011                         else
1012                                 ++(NET_STAT(jme).rx_errors);
1013
1014                         if(desccnt > 1)
1015                                 limit -= desccnt - 1;
1016
1017                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
1018                                 jme_set_clean_rxdesc(jme, j);
1019
1020                                 if(unlikely(++j == RING_DESC_NR))
1021                                         j = 0;
1022                         }
1023
1024                 }
1025                 else {
1026                         jme_alloc_and_feed_skb(jme, i);
1027                 }
1028
1029
1030                 if((i += desccnt) >= RING_DESC_NR)
1031                         i -= RING_DESC_NR;
1032         }
1033
1034 out:
1035         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
1036         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
1037                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
1038                         >> 4);
1039
1040         rxring->next_to_clean = i;
1041
1042         return limit > 0 ? limit : 0;
1043
1044 }
1045
1046 static void
1047 jme_link_change_tasklet(unsigned long arg)
1048 {
1049         struct jme_adapter *jme = (struct jme_adapter*)arg;
1050         struct net_device *netdev = jme->dev;
1051         int timeout = WAIT_TASKLET_TIMEOUT;
1052         int rc;
1053
1054         if(!atomic_dec_and_test(&jme->link_changing))
1055                 goto out;
1056
1057         if(jme_check_link(netdev, 1))
1058                 goto out;
1059
1060         netif_stop_queue(netdev);
1061
1062         while(--timeout > 0 &&
1063                 (
1064                 atomic_read(&jme->rx_cleaning) != 1 ||
1065                 atomic_read(&jme->tx_cleaning) != 1
1066                 )) {
1067
1068                 mdelay(1);
1069         }
1070
1071         if(netif_carrier_ok(netdev)) {
1072                 jme_reset_mac_processor(jme);
1073                 jme_free_rx_resources(jme);
1074                 jme_free_tx_resources(jme);
1075         }
1076
1077         jme_check_link(netdev, 0);
1078         if(netif_carrier_ok(netdev)) {
1079                 rc = jme_setup_rx_resources(jme);
1080                 if(rc) {
1081                         jeprintk(netdev->name,
1082                                 "Allocating resources for RX error"
1083                                 ", Device STOPPED!\n");
1084                         goto out;
1085                 }
1086
1087
1088                 rc = jme_setup_tx_resources(jme);
1089                 if(rc) {
1090                         jeprintk(netdev->name,
1091                                 "Allocating resources for TX error"
1092                                 ", Device STOPPED!\n");
1093                         goto err_out_free_rx_resources;
1094                 }
1095
1096                 jme_enable_rx_engine(jme);
1097                 jme_enable_tx_engine(jme);
1098
1099                 netif_start_queue(netdev);
1100         }
1101
1102         goto out;
1103
1104 err_out_free_rx_resources:
1105         jme_free_rx_resources(jme);
1106 out:
1107         atomic_inc(&jme->link_changing);
1108 }
1109
1110 static void
1111 jme_rx_clean_tasklet(unsigned long arg)
1112 {
1113         struct jme_adapter *jme = (struct jme_adapter*)arg;
1114
1115         if(!atomic_dec_and_test(&jme->rx_cleaning))
1116                 goto out;
1117         
1118         if(atomic_read(&jme->link_changing) != 1)
1119                 goto out;
1120
1121         if(unlikely(netif_queue_stopped(jme->dev)))
1122                 goto out;
1123
1124         jme_process_receive(jme, RING_DESC_NR);
1125
1126 out:
1127         atomic_inc(&jme->rx_cleaning);
1128 }
1129
1130 static void
1131 jme_rx_empty_tasklet(unsigned long arg)
1132 {
1133         struct jme_adapter *jme = (struct jme_adapter*)arg;
1134
1135         if(atomic_read(&jme->link_changing) != 1)
1136                 return;
1137
1138         if(unlikely(netif_queue_stopped(jme->dev)))
1139                 return;
1140
1141         jme_rx_clean_tasklet(arg);
1142         jme_restart_rx_engine(jme);
1143 }
1144
1145 static void
1146 jme_tx_clean_tasklet(unsigned long arg)
1147 {
1148         struct jme_adapter *jme = (struct jme_adapter*)arg;
1149         struct jme_ring *txring = &(jme->txring[0]);
1150         volatile struct txdesc *txdesc = txring->desc;
1151         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1152         int i, j, cnt = 0, max, err;
1153
1154         if(!atomic_dec_and_test(&jme->tx_cleaning))
1155                 goto out;
1156
1157         if(atomic_read(&jme->link_changing) != 1)
1158                 goto out;
1159
1160         if(unlikely(netif_queue_stopped(jme->dev)))
1161                 goto out;
1162
1163         spin_lock(&jme->tx_lock);
1164         max = RING_DESC_NR - txring->nr_free;
1165         spin_unlock(&jme->tx_lock);
1166
1167         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1168
1169         for(i = txring->next_to_clean ; cnt < max ; ) {
1170
1171                 ctxbi = txbi + i;
1172
1173                 if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
1174
1175                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1176
1177                         tx_dbg(jme->dev->name,
1178                                 "Tx Tasklet: Clean %d+%d\n",
1179                                 i, ctxbi->nr_desc);
1180
1181                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1182                                 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1183                                 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1184
1185                                 pci_unmap_single(jme->pdev,
1186                                                  ttxbi->mapping,
1187                                                  ttxbi->len,
1188                                                  PCI_DMA_TODEVICE);
1189
1190                                 if(likely(!err))
1191                                         NET_STAT(jme).tx_bytes += ttxbi->len;
1192
1193                                 ttxbi->mapping = 0;
1194                                 ttxbi->len = 0;
1195                         }
1196
1197                         dev_kfree_skb(ctxbi->skb);
1198                         ctxbi->skb = NULL;
1199
1200                         cnt += ctxbi->nr_desc;
1201
1202                         if(unlikely(err))
1203                                 ++(NET_STAT(jme).tx_carrier_errors);
1204                         else
1205                                 ++(NET_STAT(jme).tx_packets);
1206                 }
1207                 else {
1208                         if(!ctxbi->skb)
1209                                 tx_dbg(jme->dev->name,
1210                                         "Tx Tasklet:"
1211                                         " Stoped due to no skb.\n");
1212                         else
1213                                 tx_dbg(jme->dev->name,
1214                                         "Tx Tasklet:"
1215                                         "Stoped due to not done.\n");
1216                         break;
1217                 }
1218
1219                 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1220                         i -= RING_DESC_NR;
1221
1222                 ctxbi->nr_desc = 0;
1223         }
1224
1225         tx_dbg(jme->dev->name,
1226                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1227                 i, jiffies);
1228         txring->next_to_clean = i;
1229
1230         spin_lock(&jme->tx_lock);
1231         txring->nr_free += cnt;
1232         spin_unlock(&jme->tx_lock);
1233
1234 out:
1235         atomic_inc(&jme->tx_cleaning);
1236 }
1237
1238 static irqreturn_t
1239 jme_intr(int irq, void *dev_id)
1240 {
1241         struct net_device *netdev = dev_id;
1242         struct jme_adapter *jme = netdev_priv(netdev);
1243         irqreturn_t rc = IRQ_HANDLED;
1244         __u32 intrstat;
1245
1246 #if USE_IEVE_SHADOW
1247         pci_dma_sync_single_for_cpu(jme->pdev,
1248                                     jme->shadow_dma,
1249                                     sizeof(__u32) * SHADOW_REG_NR,
1250                                     PCI_DMA_FROMDEVICE);
1251         intrstat = jme->shadow_regs[SHADOW_IEVE];
1252         jme->shadow_regs[SHADOW_IEVE] = 0;
1253 #else
1254         intrstat = jread32(jme, JME_IEVE);
1255 #endif
1256
1257         /*
1258          * Check if it's really an interrupt for us
1259          */
1260         if(intrstat == 0) {
1261                 rc = IRQ_NONE;
1262                 goto out;
1263         }
1264
1265         /*
1266          * Check if the device still exist
1267          */
1268         if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1269                 rc = IRQ_NONE;
1270                 goto out;
1271         }
1272
1273         /*
1274          * Allow one interrupt handling at a time
1275          */
1276         if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1277                 goto out_inc;
1278
1279         /*
1280          * Disable interrupt
1281          */
1282         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1283
1284         if(intrstat & INTR_LINKCH) {
1285                 tasklet_schedule(&jme->linkch_task);
1286                 goto out_deassert;
1287         }
1288
1289         if(intrstat & INTR_RX0EMP)
1290                 tasklet_schedule(&jme->rxempty_task);
1291
1292         if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1293                 tasklet_schedule(&jme->rxclean_task);
1294
1295         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1296                 tasklet_schedule(&jme->txclean_task);
1297
1298         if((intrstat & ~INTR_ENABLE) != 0) {
1299                 /*
1300                  * Some interrupt not handled
1301                  * but not enabled also (for debug)
1302                  */
1303         }
1304
1305 out_deassert:
1306         /*
1307          * Deassert interrupts
1308          */
1309         jwrite32f(jme, JME_IEVE, intrstat);
1310
1311         /*
1312          * Re-enable interrupt
1313          */
1314         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1315
1316 out_inc:
1317         /*
1318          * Enable next interrupt handling
1319          */
1320         atomic_inc(&jme->intr_sem);
1321
1322 out:
1323         return rc;
1324 }
1325
1326 static void
1327 jme_restart_an(struct jme_adapter *jme)
1328 {
1329         __u32 bmcr;
1330
1331         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1332         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1333         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1334 }
1335
1336 static int
1337 jme_open(struct net_device *netdev)
1338 {
1339         struct jme_adapter *jme = netdev_priv(netdev);
1340         int rc, timeout = 100;
1341
1342         while(
1343                 --timeout > 0 &&
1344                 (
1345                 atomic_read(&jme->link_changing) != 1 ||
1346                 atomic_read(&jme->rx_cleaning) != 1 ||
1347                 atomic_read(&jme->tx_cleaning) != 1
1348                 )
1349         )
1350                 msleep(10);
1351
1352         jme_reset_mac_processor(jme);
1353
1354         rc = request_irq(jme->pdev->irq, jme_intr,
1355                          IRQF_SHARED, netdev->name, netdev);
1356         if(rc) {
1357                 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1358                 goto err_out;
1359         }
1360         jme_enable_shadow(jme);
1361         jme_start_irq(jme);
1362         jme_restart_an(jme);
1363
1364         return 0;
1365
1366 err_out:
1367         netif_stop_queue(netdev);
1368         netif_carrier_off(netdev);
1369         return rc;
1370 }
1371
1372 static int
1373 jme_close(struct net_device *netdev)
1374 {
1375         struct jme_adapter *jme = netdev_priv(netdev);
1376
1377         netif_stop_queue(netdev);
1378         netif_carrier_off(netdev);
1379
1380         jme_stop_irq(jme);
1381         jme_disable_shadow(jme);
1382         free_irq(jme->pdev->irq, jme->dev);
1383
1384         tasklet_kill(&jme->linkch_task);
1385         tasklet_kill(&jme->txclean_task);
1386         tasklet_kill(&jme->rxclean_task);
1387         tasklet_kill(&jme->rxempty_task);
1388
1389         jme_reset_mac_processor(jme);
1390         jme_free_rx_resources(jme);
1391         jme_free_tx_resources(jme);
1392
1393         return 0;
1394 }
1395
1396 /*
1397  * This function is already protected by netif_tx_lock()
1398  */
1399 static int
1400 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1401 {
1402         struct jme_adapter *jme = netdev_priv(netdev);
1403         int rc;
1404
1405         if(unlikely(netif_queue_stopped(jme->dev)))
1406                 return NETDEV_TX_BUSY;
1407
1408         rc = jme_set_new_txdesc(jme, skb);
1409
1410         if(unlikely(rc != NETDEV_TX_OK))
1411                 return rc;
1412
1413         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1414                                 TXCS_SELECT_QUEUE0 |
1415                                 TXCS_QUEUE0S |
1416                                 TXCS_ENABLE);
1417         netdev->trans_start = jiffies;
1418
1419         return NETDEV_TX_OK;
1420 }
1421
1422 static int
1423 jme_set_macaddr(struct net_device *netdev, void *p)
1424 {
1425         struct jme_adapter *jme = netdev_priv(netdev);
1426         struct sockaddr *addr = p;
1427         __u32 val;
1428
1429         if(netif_running(netdev))
1430                 return -EBUSY;
1431
1432         spin_lock(&jme->macaddr_lock);
1433         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1434
1435         val = addr->sa_data[3] << 24 |
1436               addr->sa_data[2] << 16 |
1437               addr->sa_data[1] <<  8 |
1438               addr->sa_data[0];
1439         jwrite32(jme, JME_RXUMA_LO, val);
1440         val = addr->sa_data[5] << 8 |
1441               addr->sa_data[4];
1442         jwrite32(jme, JME_RXUMA_HI, val);
1443         spin_unlock(&jme->macaddr_lock);
1444
1445         return 0;
1446 }
1447
1448 static void
1449 jme_set_multi(struct net_device *netdev)
1450 {
1451         struct jme_adapter *jme = netdev_priv(netdev);
1452         u32 mc_hash[2] = {};
1453         int i;
1454         unsigned long flags;
1455
1456         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1457
1458         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1459
1460         if (netdev->flags & IFF_PROMISC) {
1461                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1462         }
1463         else if (netdev->flags & IFF_ALLMULTI) {
1464                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1465         }
1466         else if(netdev->flags & IFF_MULTICAST) {
1467                 struct dev_mc_list *mclist;
1468                 int bit_nr;
1469
1470                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1471                 for (i = 0, mclist = netdev->mc_list;
1472                         mclist && i < netdev->mc_count;
1473                         ++i, mclist = mclist->next) {
1474
1475                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1476                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1477                 }
1478
1479                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1480                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1481         }
1482
1483         wmb();
1484         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1485
1486         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1487 }
1488
1489 static int
1490 jme_change_mtu(struct net_device *netdev, int new_mtu)
1491 {
1492         /*
1493          * Not supporting MTU change for now.
1494          */
1495         return -EINVAL;
1496 }
1497
1498 static void
1499 jme_tx_timeout(struct net_device *netdev)
1500 {
1501         struct jme_adapter *jme = netdev_priv(netdev);
1502
1503         /*
1504          * Reset the link
1505          * And the link change will reinitiallize all RX/TX resources
1506          */
1507         jme_restart_an(jme);
1508 }
1509
1510 static void
1511 jme_get_drvinfo(struct net_device *netdev,
1512                      struct ethtool_drvinfo *info)
1513 {
1514         struct jme_adapter *jme = netdev_priv(netdev);
1515
1516         strcpy(info->driver, DRV_NAME);
1517         strcpy(info->version, DRV_VERSION);
1518         strcpy(info->bus_info, pci_name(jme->pdev));
1519 }
1520
1521 static int
1522 jme_get_regs_len(struct net_device *netdev)
1523 {
1524         return 0x400;
1525 }
1526
1527 static void
1528 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1529 {
1530         int i;
1531
1532         for(i = 0 ; i < len ; i += 4)
1533                 p[i>>2] = jread32(jme, reg + i);
1534
1535 }
1536
1537 static void
1538 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1539 {
1540         struct jme_adapter *jme = netdev_priv(netdev);
1541         __u32 *p32 = (__u32*)p;
1542
1543         memset(p, 0, 0x400);
1544
1545         regs->version = 1;
1546         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1547
1548         p32 += 0x100 >> 2;
1549         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1550
1551         p32 += 0x100 >> 2;
1552         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1553
1554         p32 += 0x100 >> 2;
1555         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1556
1557 }
1558
1559 static int
1560 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1561 {
1562         struct jme_adapter *jme = netdev_priv(netdev);
1563
1564         ecmd->use_adaptive_rx_coalesce = true;
1565         ecmd->tx_coalesce_usecs = PCC_TX_TO;
1566         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1567
1568         switch(jme->dpi.cur) {
1569         case PCC_P1:
1570                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1571                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1572                 break;
1573         case PCC_P2:
1574                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1575                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1576                 break;
1577         case PCC_P3:
1578                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1579                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1580                 break;
1581         default:
1582                 break;
1583         }
1584
1585         return 0;
1586 }
1587
1588 static void
1589 jme_get_pauseparam(struct net_device *netdev,
1590                         struct ethtool_pauseparam *ecmd)
1591 {
1592         struct jme_adapter *jme = netdev_priv(netdev);
1593         unsigned long flags;
1594         __u32 val;
1595
1596         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1597         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1598
1599         spin_lock_irqsave(&jme->phy_lock, flags);
1600         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1601         spin_unlock_irqrestore(&jme->phy_lock, flags);
1602         ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1603 }
1604
1605 static int
1606 jme_set_pauseparam(struct net_device *netdev,
1607                         struct ethtool_pauseparam *ecmd)
1608 {
1609         struct jme_adapter *jme = netdev_priv(netdev);
1610         unsigned long flags;
1611         __u32 val;
1612
1613         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1614                 (ecmd->tx_pause != 0)) {
1615
1616                 if(ecmd->tx_pause)
1617                         jme->reg_txpfc |= TXPFC_PF_EN;
1618                 else
1619                         jme->reg_txpfc &= ~TXPFC_PF_EN;
1620
1621                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
1622         }
1623
1624         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1625         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
1626                 (ecmd->rx_pause != 0)) {
1627
1628                 if(ecmd->rx_pause)
1629                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
1630                 else
1631                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
1632
1633                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1634         }
1635         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1636
1637         spin_lock_irqsave(&jme->phy_lock, flags);
1638         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1639         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) != 
1640                 (ecmd->autoneg != 0)) {
1641
1642                 if(ecmd->autoneg)
1643                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1644                 else
1645                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1646
1647                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
1648         }
1649         spin_unlock_irqrestore(&jme->phy_lock, flags);
1650
1651         return 0;
1652 }
1653
1654 static int
1655 jme_get_settings(struct net_device *netdev,
1656                      struct ethtool_cmd *ecmd)
1657 {
1658         struct jme_adapter *jme = netdev_priv(netdev);
1659         int rc;
1660
1661         spin_lock(&jme->phy_lock);
1662         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1663         spin_unlock(&jme->phy_lock);
1664         return rc;
1665 }
1666
1667 static int
1668 jme_set_settings(struct net_device *netdev,
1669                      struct ethtool_cmd *ecmd)
1670 {
1671         struct jme_adapter *jme = netdev_priv(netdev);
1672         int rc;
1673         unsigned long flags;
1674
1675         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
1676                 return -EINVAL;
1677
1678         spin_lock_irqsave(&jme->phy_lock, flags);
1679         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1680         spin_unlock_irqrestore(&jme->phy_lock, flags);
1681
1682         return rc;
1683 }
1684
1685 static __u32
1686 jme_get_link(struct net_device *netdev)
1687 {
1688         struct jme_adapter *jme = netdev_priv(netdev);
1689         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1690 }
1691
1692 static u32
1693 jme_get_rx_csum(struct net_device *netdev)
1694 {
1695         struct jme_adapter *jme = netdev_priv(netdev);
1696
1697         return jme->reg_rxmcs & RXMCS_CHECKSUM;
1698 }
1699
1700 static int
1701 jme_set_rx_csum(struct net_device *netdev, u32 on)
1702 {
1703         struct jme_adapter *jme = netdev_priv(netdev);
1704         unsigned long flags;
1705         
1706         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1707         if(on)
1708                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
1709         else
1710                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
1711         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1712         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1713
1714         return 0;
1715 }
1716
1717 static int
1718 jme_set_tx_csum(struct net_device *netdev, u32 on)
1719 {
1720         if(on)
1721                 netdev->features |= NETIF_F_HW_CSUM;
1722         else
1723                 netdev->features &= ~NETIF_F_HW_CSUM;
1724
1725         return 0;
1726 }
1727
1728 static int
1729 jme_nway_reset(struct net_device *netdev)
1730 {
1731         struct jme_adapter *jme = netdev_priv(netdev);
1732         jme_restart_an(jme);
1733         return 0;
1734 }
1735
1736 static const struct ethtool_ops jme_ethtool_ops = {
1737         .get_drvinfo            = jme_get_drvinfo,
1738         .get_regs_len           = jme_get_regs_len,
1739         .get_regs               = jme_get_regs,
1740         .get_coalesce           = jme_get_coalesce,
1741         .get_pauseparam         = jme_get_pauseparam,
1742         .set_pauseparam         = jme_set_pauseparam,
1743         .get_settings           = jme_get_settings,
1744         .set_settings           = jme_set_settings,
1745         .get_link               = jme_get_link,
1746         .get_rx_csum            = jme_get_rx_csum,
1747         .set_rx_csum            = jme_set_rx_csum,
1748         .set_tx_csum            = jme_set_tx_csum,
1749         .nway_reset             = jme_nway_reset,
1750 };
1751
1752 static int
1753 jme_pci_dma64(struct pci_dev *pdev)
1754 {
1755         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1756                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1757                         return 1;
1758
1759         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
1760                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
1761                         return 1;
1762
1763         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1764                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1765                         return 0;
1766
1767         return -1;
1768 }
1769
1770 static int __devinit
1771 jme_init_one(struct pci_dev *pdev,
1772              const struct pci_device_id *ent)
1773 {
1774         int rc = 0, using_dac;
1775         struct net_device *netdev;
1776         struct jme_adapter *jme;
1777
1778         /*
1779          * set up PCI device basics
1780          */
1781         rc = pci_enable_device(pdev);
1782         if(rc) {
1783                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1784                 goto err_out;
1785         }
1786
1787         using_dac = jme_pci_dma64(pdev);
1788         if(using_dac < 0) {
1789                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
1790                 rc = -EIO;
1791                 goto err_out_disable_pdev;
1792         }
1793
1794         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1795                 printk(KERN_ERR PFX "No PCI resource region found.\n");
1796                 rc = -ENOMEM;
1797                 goto err_out_disable_pdev;
1798         }
1799
1800         rc = pci_request_regions(pdev, DRV_NAME);
1801         if(rc) {
1802                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1803                 goto err_out_disable_pdev;
1804         }
1805
1806         pci_set_master(pdev);
1807
1808         /*
1809          * alloc and init net device
1810          */
1811         netdev = alloc_etherdev(sizeof(*jme));
1812         if(!netdev) {
1813                 rc = -ENOMEM;
1814                 goto err_out_release_regions;
1815         }
1816         netdev->open                    = jme_open;
1817         netdev->stop                    = jme_close;
1818         netdev->hard_start_xmit         = jme_start_xmit;
1819         netdev->irq                     = pdev->irq;
1820         netdev->set_mac_address         = jme_set_macaddr;
1821         netdev->set_multicast_list      = jme_set_multi;
1822         netdev->change_mtu              = jme_change_mtu;
1823         netdev->ethtool_ops             = &jme_ethtool_ops;
1824         netdev->tx_timeout              = jme_tx_timeout;
1825         netdev->watchdog_timeo          = TX_TIMEOUT;
1826         NETDEV_GET_STATS(netdev, &jme_get_stats);
1827         netdev->features                =       NETIF_F_HW_CSUM;
1828         if(using_dac)
1829                 netdev->features        |=      NETIF_F_HIGHDMA;
1830
1831         SET_NETDEV_DEV(netdev, &pdev->dev);
1832         pci_set_drvdata(pdev, netdev);
1833
1834         /*
1835          * init adapter info
1836          */
1837         jme = netdev_priv(netdev);
1838         jme->pdev = pdev;
1839         jme->dev = netdev;
1840         jme->phylink = 0;
1841         jme->regs = ioremap(pci_resource_start(pdev, 0),
1842                              pci_resource_len(pdev, 0));
1843         if (!(jme->regs)) {
1844                 rc = -ENOMEM;
1845                 goto err_out_free_netdev;
1846         }
1847         jme->shadow_regs = pci_alloc_consistent(pdev,
1848                                                 sizeof(__u32) * SHADOW_REG_NR,
1849                                                 &(jme->shadow_dma));
1850         if (!(jme->shadow_regs)) {
1851                 rc = -ENOMEM;
1852                 goto err_out_unmap;
1853         }
1854
1855         spin_lock_init(&jme->tx_lock);
1856         spin_lock_init(&jme->phy_lock);
1857         spin_lock_init(&jme->macaddr_lock);
1858         spin_lock_init(&jme->rxmcs_lock);
1859
1860         atomic_set(&jme->intr_sem, 1);
1861         atomic_set(&jme->link_changing, 1);
1862         atomic_set(&jme->rx_cleaning, 1);
1863         atomic_set(&jme->tx_cleaning, 1);
1864
1865         tasklet_init(&jme->linkch_task,
1866                      &jme_link_change_tasklet,
1867                      (unsigned long) jme);
1868         tasklet_init(&jme->txclean_task,
1869                      &jme_tx_clean_tasklet,
1870                      (unsigned long) jme);
1871         tasklet_init(&jme->rxclean_task,
1872                      &jme_rx_clean_tasklet,
1873                      (unsigned long) jme);
1874         tasklet_init(&jme->rxempty_task,
1875                      &jme_rx_empty_tasklet,
1876                      (unsigned long) jme);
1877         jme->mii_if.dev = netdev;
1878         jme->mii_if.phy_id = 1;
1879         jme->mii_if.supports_gmii = 1;
1880         jme->mii_if.mdio_read = jme_mdio_read;
1881         jme->mii_if.mdio_write = jme_mdio_write;
1882
1883         jme->dpi.cur = PCC_P1;
1884
1885         jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
1886         jme->reg_rxmcs = RXMCS_DEFAULT;
1887         jme->reg_txpfc = 0;
1888         /*
1889          * Get Max Read Req Size from PCI Config Space
1890          */
1891         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
1892         switch(jme->mrrs) {
1893                 case MRRS_128B:
1894                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
1895                         break;
1896                 case MRRS_256B:
1897                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
1898                         break;
1899                 default:
1900                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
1901                         break;
1902         };
1903
1904
1905         /*
1906          * Reset MAC processor and reload EEPROM for MAC Address
1907          */
1908         jme_clear_pm(jme);
1909         jme_reset_phy_processor(jme);
1910         jme_reset_mac_processor(jme);
1911         rc = jme_reload_eeprom(jme);
1912         if(rc) {
1913                 printk(KERN_ERR PFX
1914                         "Rload eeprom for reading MAC Address error.\n");
1915                 goto err_out_free_shadow;
1916         }
1917         jme_load_macaddr(netdev);
1918
1919
1920         /*
1921          * Tell stack that we are not ready to work until open()
1922          */
1923         netif_carrier_off(netdev);
1924         netif_stop_queue(netdev);
1925
1926         /*
1927          * Register netdev
1928          */
1929         rc = register_netdev(netdev);
1930         if(rc) {
1931                 printk(KERN_ERR PFX "Cannot register net device.\n");
1932                 goto err_out_free_shadow;
1933         }
1934
1935         jprintk(netdev->name,
1936                 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
1937                 netdev->dev_addr[0],
1938                 netdev->dev_addr[1],
1939                 netdev->dev_addr[2],
1940                 netdev->dev_addr[3],
1941                 netdev->dev_addr[4],
1942                 netdev->dev_addr[5]);
1943
1944         return 0;
1945
1946 err_out_free_shadow:
1947         pci_free_consistent(pdev,
1948                             sizeof(__u32) * SHADOW_REG_NR,
1949                             jme->shadow_regs,
1950                             jme->shadow_dma);
1951 err_out_unmap:
1952         iounmap(jme->regs);
1953 err_out_free_netdev:
1954         pci_set_drvdata(pdev, NULL);
1955         free_netdev(netdev);
1956 err_out_release_regions:
1957         pci_release_regions(pdev);
1958 err_out_disable_pdev:
1959         pci_disable_device(pdev);
1960 err_out:
1961         return rc;
1962 }
1963
1964 static void __devexit
1965 jme_remove_one(struct pci_dev *pdev)
1966 {
1967         struct net_device *netdev = pci_get_drvdata(pdev);
1968         struct jme_adapter *jme = netdev_priv(netdev);
1969
1970         unregister_netdev(netdev);
1971         pci_free_consistent(pdev,
1972                             sizeof(__u32) * SHADOW_REG_NR,
1973                             jme->shadow_regs,
1974                             jme->shadow_dma);
1975         iounmap(jme->regs);
1976         pci_set_drvdata(pdev, NULL);
1977         free_netdev(netdev);
1978         pci_release_regions(pdev);
1979         pci_disable_device(pdev);
1980
1981 }
1982
1983 static struct pci_device_id jme_pci_tbl[] = {
1984         { PCI_VDEVICE(JMICRON, 0x250) },
1985         { }
1986 };
1987
1988 static struct pci_driver jme_driver = {
1989         .name           = DRV_NAME,
1990         .id_table       = jme_pci_tbl,
1991         .probe          = jme_init_one,
1992         .remove         = __devexit_p(jme_remove_one),
1993 #if 0
1994 #ifdef CONFIG_PM
1995         .suspend        = jme_suspend,
1996         .resume         = jme_resume,
1997 #endif /* CONFIG_PM */
1998 #endif
1999 };
2000
2001 static int __init
2002 jme_init_module(void)
2003 {
2004         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2005                "driver version %s\n", DRV_VERSION);
2006         return pci_register_driver(&jme_driver);
2007 }
2008
2009 static void __exit
2010 jme_cleanup_module(void)
2011 {
2012         pci_unregister_driver(&jme_driver);
2013 }
2014
2015 module_init(jme_init_module);
2016 module_exit(jme_cleanup_module);
2017
2018 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2019 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2020 MODULE_LICENSE("GPL");
2021 MODULE_VERSION(DRV_VERSION);
2022 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2023
2024