]> bbs.cooldavid.org Git - jme.git/blob - jme.c
Import jme 0.1 source
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  *
20  */
21
22 /*
23  * TODO before release:
24  *      1. Use sk_buff for dma buffer with pci_map_single,
25  *         and handle scattered sk_buffs (Reduce memory copy)
26  *      2. Try setting 64bit DMA with pci_set[_consistent]_dma_mask
27  *         and set netdev feature flag.
28  *      3. Implement Power Managemt related functions.
29  *      4. Implement checksum offloading, VLAN offloading,
30  *         TCP Segement offloading.
31  *      5. Implement Jumboframe.
32  *      6. Implement NAPI option for user.
33  *      7. Implement MSI / MSI-X.
34  *      8. Implement PCC.
35  *      9. Implement QoS according to "priority" attribute in sk_buff
36  *         with 8 TX priority queue provided by hardware.
37  *      10.Cleanup/re-orginize code, performence tuneing(alignment etc...).
38  */
39
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/ethtool.h>
46 #include <linux/mii.h>
47 #include <linux/crc32.h>
48 #include "jme.h"
49
50 static int jme_mdio_read(struct net_device *netdev, int phy, int reg)
51 {
52         struct jme_adapter *jme = netdev_priv(netdev);
53         int i, val;
54
55         jwrite32(jme, JME_SMI, SMI_OP_REQ |
56                                  smi_phy_addr(phy) |
57                                  smi_reg_addr(reg));
58
59         wmb();
60         for (i = JME_PHY_TIMEOUT; i > 0; --i) {
61                 udelay(1);
62                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
63                         break;
64         }
65
66         if (i == 0) {
67                 dprintk("phy read timeout : %d\n", reg);
68                 return (0);
69         }
70
71         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
72 }
73
74 static void jme_mdio_write(struct net_device *netdev, int phy, int reg, int val)
75 {
76         struct jme_adapter *jme = netdev_priv(netdev);
77         int i;
78
79         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
80             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
81             smi_phy_addr(phy) | smi_reg_addr(reg));
82
83         wmb();
84         for (i = JME_PHY_TIMEOUT; i > 0; --i)
85         {
86                 udelay(1);
87                 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
88                         break;
89         }
90
91         if (i == 0)
92                 dprintk("phy write timeout : %d\n", reg);
93
94         return;
95 }
96
97 static void jme_reset_mac_processor(struct jme_adapter *jme)
98 {
99         __u32 val;
100
101         val = jread32(jme, JME_GHC);
102         val |= GHC_SWRST;
103         jwrite32(jme, JME_GHC, val);
104         udelay(2);
105         val &= ~GHC_SWRST;
106         jwrite32(jme, JME_GHC, val);
107         jwrite32(jme, JME_RXMCHT, 0x00000000);
108         jwrite32(jme, JME_RXMCHT+4, 0x00000000);
109         jwrite32(jme, JME_WFODP, 0);
110         jwrite32(jme, JME_WFOI, 0);
111 }
112
113 __always_inline static void jme_clear_pm(struct jme_adapter *jme)
114 {
115         jwrite32(jme, JME_PMCS, 0xFFFF0000);
116 }
117
118 static int jme_reload_eeprom(struct jme_adapter *jme)
119 {
120         __u32 val;
121         int i;
122
123         val = jread32(jme, JME_SMBCSR);
124
125         if(val & SMBCSR_EEPROMD)
126         {
127                 val |= SMBCSR_CNACK;
128                 jwrite32(jme, JME_SMBCSR, val);
129                 val |= SMBCSR_RELOAD;
130                 jwrite32(jme, JME_SMBCSR, val);
131                 mdelay(12);
132
133                 for (i = JME_SMB_TIMEOUT; i > 0; --i)
134                 {
135                         mdelay(1);
136                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
137                                 break;
138                 }
139
140                 if(i == 0) {
141                         dprintk("eeprom reload timeout\n");
142                         return -EIO;
143                 }
144         }
145         else
146                 return -EIO;
147         
148         return 0;
149 }
150
151 __always_inline static void jme_load_macaddr(struct net_device *netdev)
152 {
153         struct jme_adapter *jme = netdev_priv(netdev);
154         unsigned char macaddr[6];
155         __u32 val;
156
157         val = jread32(jme, JME_RXUMA);
158         macaddr[0] = (val >>  0) & 0xFF;
159         macaddr[1] = (val >>  8) & 0xFF;
160         macaddr[2] = (val >> 16) & 0xFF;
161         macaddr[3] = (val >> 24) & 0xFF;
162         val = jread32(jme, JME_RXUMA+4);
163         macaddr[4] = (val >>  0) & 0xFF;
164         macaddr[5] = (val >>  8) & 0xFF;
165         memcpy(netdev->dev_addr, macaddr, 6);
166 }
167
168 __always_inline static void jme_start_irq(struct jme_adapter *jme)
169 {
170         /*
171          * Enable Interrupts
172          */
173         jwrite32(jme, JME_IENS, INTR_ENABLE);
174 }
175
176 __always_inline static void jme_stop_irq(struct jme_adapter *jme)
177 {
178         /*
179          * Disable Interrupts
180          */
181         jwrite32(jme, JME_IENC, INTR_ENABLE);
182 }
183
184 __always_inline static void jme_check_link(struct net_device *netdev)
185 {
186         struct jme_adapter *jme = netdev_priv(netdev);
187         __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
188         char linkmsg[32];
189
190         phylink = jread32(jme, JME_PHY_LINK);
191
192         if (phylink & PHY_LINK_UP) {
193                 /*
194                  * Keep polling for autoneg complete
195                  */
196                 while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
197                         mdelay(1);
198                         phylink = jread32(jme, JME_PHY_LINK);
199                 }
200
201                 if(!cnt)
202                         printk(KERN_ERR "Waiting autoneg timeout.\n");
203
204                 switch(phylink & PHY_LINK_SPEED_MASK) {
205                         case PHY_LINK_SPEED_10M:
206                                 ghc = GHC_SPEED_10M;
207                                 strcpy(linkmsg, "10 Mbps, ");
208                                 break;
209                         case PHY_LINK_SPEED_100M:
210                                 ghc = GHC_SPEED_100M;
211                                 strcpy(linkmsg, "100 Mbps, ");
212                                 break;
213                         case PHY_LINK_SPEED_1000M:
214                                 ghc = GHC_SPEED_1000M;
215                                 strcpy(linkmsg, "1000 Mbps, ");
216                                 break;
217                         default:
218                                 ghc = 0;
219                                 break;
220                 }
221                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
222                 jwrite32(jme, JME_GHC, ghc);
223                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
224                                         "Full-Duplex" :
225                                         "Half-Duplex");
226
227                 if(phylink & PHY_LINK_DUPLEX)
228                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
229                 else
230                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
231                                                    TXMCS_BACKOFF |
232                                                    TXMCS_CARRIERSENSE |
233                                                    TXMCS_COLLISION);
234
235                 jprintk("Link is up at %s.\n", linkmsg);
236                 netif_carrier_on(netdev);
237         }
238         else {
239                 jprintk("Link is down.\n");
240                 netif_carrier_off(netdev);
241         }
242 }
243
244 __always_inline static void jme_set_new_txdesc(struct jme_adapter *jme,
245                                                 int i, int framesize)
246 {
247         struct jme_ring *txring = jme->txring;
248         struct TxDesc* txdesc = txring->desc;
249         
250         memset(txdesc + i, 0, TX_DESC_SIZE);
251         txdesc[i].desc1.bufaddr = cpu_to_le32(ALIGN(txring->buf_dma[i], 8));
252         txdesc[i].desc1.datalen = cpu_to_le16(TX_BUF_SIZE);
253         txdesc[i].desc1.pktsize = cpu_to_le16(framesize);
254         /*
255          * Set OWN bit at final.
256          * When kernel transmit faster than NIC last packet sent,
257          * and NIC tring to send this descriptor before we tell
258          * it to start sending this TX queue.
259          * Other fields are already filled correctly.
260          */
261         wmb();
262         txdesc[i].desc1.flags = TXFLAG_OWN | TXFLAG_INT;
263
264         dprintk("TX Ring Buf Address(%08x,%08x,%d).\n",
265                 txring->buf_dma[i],
266                 (txdesc[i].all[12] <<  0) |
267                 (txdesc[i].all[13] <<  8) |
268                 (txdesc[i].all[14] << 16) |
269                 (txdesc[i].all[15] << 24),
270                 (txdesc[i].all[4]  <<  0) |
271                 (txdesc[i].all[5]  <<  8));
272
273 }
274
275
276 __always_inline static int jme_setup_tx_resources(struct jme_adapter *jme)
277 {
278         int i;
279         struct jme_ring *txring = &(jme->txring[0]);
280
281         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
282                                            TX_RING_ALLOC_SIZE,
283                                            &(txring->dmaalloc), 
284                                            GFP_KERNEL);
285         if(!txring->alloc)
286                 return -ENOMEM;
287
288         /*
289          * 16 Bytes align
290          */
291         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc), 16);
292         txring->dma             = ALIGN(txring->dmaalloc, 16);
293         txring->next_to_use     = 0;
294         txring->next_to_clean   = 0;
295
296         dprintk("TX Ring Base Address(%08x,%08x).\n",
297                 (__u32)txring->desc,
298                 txring->dma);
299
300         /*
301          * Initiallize Transmit Descriptors
302          */
303         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
304         for(i = 0 ; i < RING_DESC_NR ; ++i) {
305                 txring->buf_virt[i] = dma_alloc_coherent(&(jme->pdev->dev),
306                                                          TX_BUF_ALLOC_SIZE,
307                                                          &(txring->buf_dma[i]),
308                                                          GFP_KERNEL);
309                 if(!txring->buf_virt[i])
310                         break;
311         }
312
313         /*
314          * Cleanup allocated memories when error
315          */
316         if(i != RING_DESC_NR) {
317                 for(--i ; i >= 0 ; --i) {
318                         dma_free_coherent(&(jme->pdev->dev),
319                                           TX_BUF_ALLOC_SIZE,
320                                           txring->buf_virt[i],
321                                           txring->buf_dma[i]);
322                 }
323                 dma_free_coherent(&(jme->pdev->dev),
324                                   TX_RING_ALLOC_SIZE,
325                                   txring->alloc,
326                                   txring->dmaalloc);
327                 txring->alloc    = NULL;
328                 txring->desc     = NULL;
329                 txring->dmaalloc = 0;
330                 txring->dma      = 0;
331                 return -ENOMEM;
332         }
333
334
335         return 0;
336 }
337
338 __always_inline static void jme_free_tx_resources(struct jme_adapter *jme)
339 {
340         int i;
341         struct jme_ring *txring = &(jme->txring[0]);
342
343         if(txring->alloc) {
344                 for(i = 0 ; i < RING_DESC_NR ; ++i) {
345                         if(txring->buf_virt[i]) {
346                                 dma_free_coherent(&(jme->pdev->dev),
347                                                   TX_BUF_ALLOC_SIZE,
348                                                   txring->buf_virt[i],
349                                                   txring->buf_dma[i]);
350                         }
351                 }
352
353                 dma_free_coherent(&(jme->pdev->dev),
354                                   TX_RING_ALLOC_SIZE,
355                                   txring->alloc,
356                                   txring->dmaalloc);
357                 txring->alloc    = NULL;
358                 txring->desc     = NULL;
359                 txring->dmaalloc = 0;
360                 txring->dma      = 0;
361         }
362         txring->next_to_use   = 0;
363         txring->next_to_clean = 0;
364
365 }
366
367 __always_inline static void jme_enable_tx_engine(struct jme_adapter *jme)
368 {
369         /*
370          * Select Queue 0
371          */
372         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
373
374         /*
375          * Setup TX Queue 0 DMA Bass Address
376          */
377         jwrite32(jme, JME_TXDBA, jme->txring[0].dma);
378         jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
379
380         /*
381          * Setup TX Descptor Count
382          */
383         jwrite32(jme, JME_TXQDC, RING_DESC_NR);
384
385         /*
386          * Enable TX Engine
387          */
388         wmb();
389         jwrite32(jme, JME_TXCS, TXCS_DEFAULT |
390                                   TXCS_SELECT_QUEUE0 |
391                                   TXCS_ENABLE);
392
393 }
394
395 __always_inline static void jme_disable_tx_engine(struct jme_adapter *jme)
396 {
397         int i;
398         __u32 val;
399
400         /*
401          * Disable TX Engine
402          */
403         jwrite32(jme, JME_TXCS, TXCS_DEFAULT);
404
405         val = jread32(jme, JME_TXCS);
406         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
407         {
408                 udelay(1);
409                 val = jread32(jme, JME_TXCS);
410         }
411
412         if(!i)
413                 printk(KERN_ERR "Disable TX engine timeout.\n");
414
415
416 }
417
418 __always_inline static void jme_set_clean_rxdesc(struct jme_adapter *jme,
419                                                   int i)
420 {
421         struct jme_ring *rxring = jme->rxring;
422         struct RxDesc* rxdesc = rxring->desc;
423         
424         memset(rxdesc + i, 0, RX_DESC_SIZE);
425         rxdesc[i].desc1.bufaddrl = cpu_to_le32(ALIGN(rxring->buf_dma[i], 8));
426         rxdesc[i].desc1.datalen = cpu_to_le16(RX_BUF_SIZE);
427         wmb();
428         rxdesc[i].desc1.flags = RXFLAG_OWN | RXFLAG_INT;
429
430 #ifdef RX_QUEUE_DEBUG
431         dprintk("RX Ring Buf Address(%08x,%08x,%d).\n",
432                 rxring->buf_dma[i],
433                 (rxdesc[i].all[12] <<  0) |
434                 (rxdesc[i].all[13] <<  8) |
435                 (rxdesc[i].all[14] << 16) |
436                 (rxdesc[i].all[15] << 24),
437                 (rxdesc[i].all[4]  <<  0) |
438                 (rxdesc[i].all[5]  <<  8));
439 #endif
440
441 }
442
443 __always_inline static int jme_setup_rx_resources(struct jme_adapter *jme)
444 {
445         int i;
446         struct jme_ring *rxring = &(jme->rxring[0]);
447
448         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
449                                            RX_RING_ALLOC_SIZE,
450                                            &(rxring->dmaalloc), 
451                                            GFP_KERNEL);
452         if(!rxring->alloc)
453                 return -ENOMEM;
454
455         /*
456          * 16 Bytes align
457          */
458         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc), 16);
459         rxring->dma             = ALIGN(rxring->dmaalloc, 16);
460         rxring->next_to_use     = 0;
461         rxring->next_to_clean   = 0;
462
463 #ifdef RX_QUEUE_DEBUG
464         dprintk("RX Ring Base Address(%08x,%08x).\n",
465                 (__u32)rxring->desc,
466                 rxring->dma);
467 #endif
468
469         /*
470          * Initiallize Receive Descriptors
471          */
472         for(i = 0 ; i < RING_DESC_NR ; ++i) {
473                 rxring->buf_virt[i] = dma_alloc_coherent(&(jme->pdev->dev),
474                                                          RX_BUF_ALLOC_SIZE,
475                                                          &(rxring->buf_dma[i]),
476                                                          GFP_KERNEL);
477                 if(!rxring->buf_virt[i])
478                         break;
479
480                 jme_set_clean_rxdesc(jme, i);
481         }
482
483         /*
484          * Cleanup allocated memories when error
485          */
486         if(i != RING_DESC_NR) {
487                 for(--i ; i >= 0 ; --i) {
488                         dma_free_coherent(&(jme->pdev->dev),
489                                           RX_BUF_ALLOC_SIZE,
490                                           rxring->buf_virt[i],
491                                           rxring->buf_dma[i]);
492                 }
493                 dma_free_coherent(&(jme->pdev->dev),
494                                   RX_RING_ALLOC_SIZE,
495                                   rxring->alloc,
496                                   rxring->dmaalloc);
497                 rxring->alloc    = NULL;
498                 rxring->desc     = NULL;
499                 rxring->dmaalloc = 0;
500                 rxring->dma      = 0;
501                 return -ENOMEM;
502         }
503
504         return 0;
505 }
506
507 __always_inline static void jme_free_rx_resources(struct jme_adapter *jme)
508 {
509         int i;
510         struct jme_ring *rxring = &(jme->rxring[0]);
511
512         if(rxring->alloc) {
513                 for(i = 0 ; i < RING_DESC_NR ; ++i) {
514                         if(rxring->buf_virt[i]) {
515                                 dma_free_coherent(&(jme->pdev->dev),
516                                                   RX_BUF_ALLOC_SIZE,
517                                                   rxring->buf_virt[i],
518                                                   rxring->buf_dma[i]);
519                         }
520                 }
521
522                 dma_free_coherent(&(jme->pdev->dev),
523                                   RX_RING_ALLOC_SIZE,
524                                   rxring->alloc,
525                                   rxring->dmaalloc);
526                 rxring->alloc    = NULL;
527                 rxring->desc     = NULL;
528                 rxring->dmaalloc = 0;
529                 rxring->dma      = 0;
530         }
531         rxring->next_to_use   = 0;
532         rxring->next_to_clean = 0;
533 }
534
535 __always_inline static void jme_enable_rx_engine(struct jme_adapter *jme)
536 {
537         __u32 val;
538
539         /*
540          * Setup RX DMA Bass Address
541          */
542         jwrite32(jme, JME_RXDBA, jme->rxring[0].dma);
543         jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
544
545         /*
546          * Setup RX Descptor Count
547          */
548         jwrite32(jme, JME_RXQDC, RING_DESC_NR);
549
550         /* 
551          * Setup Unicast Filter
552          */
553         jme_set_multi(jme->dev);
554
555         /*
556          * Enable RX Engine
557          */
558         wmb();
559         val = jread32(jme, JME_RXCS);
560         val |= RXCS_ENABLE | RXCS_QST;
561         jwrite32(jme, JME_RXCS, val);
562 }
563
564 __always_inline static void jme_disable_rx_engine(struct jme_adapter *jme)
565 {
566         int i;
567         __u32 val;
568
569         /*
570          * Disable RX Engine
571          */
572         val = jread32(jme, JME_RXCS);
573         val &= ~RXCS_ENABLE;
574         jwrite32(jme, JME_RXCS, val);
575
576         val = jread32(jme, JME_RXCS);
577         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
578         {
579                 udelay(1);
580                 val = jread32(jme, JME_RXCS);
581         }
582
583         if(!i)
584                 printk(KERN_ERR "Disable RX engine timeout.\n");
585
586 }
587
588 __always_inline static void jme_process_tx_complete(struct net_device *netdev)
589 {
590         /*
591          * Clear sk_buff here in the future
592          * (Allowing NIC directly DMA with sk_buff kernel requested to send)
593          */
594 }
595
596 __always_inline static void jme_process_receive(struct net_device *netdev)
597 {
598         struct jme_adapter *jme = netdev_priv(netdev);
599         struct jme_ring *rxring = &(jme->rxring[0]);
600         struct RxDesc *rxdesc;
601         __u8 *rxbuf;
602         struct sk_buff *skb;
603         int i, start, cnt;
604         int framesize, desccnt;
605
606         /*
607          * Assume that one descriptor per frame,
608          * Should be fixed in the future
609          * (or not? If buffer already large enough to store entire packet.)
610          */
611
612         rxdesc = rxring->desc;
613
614         spin_lock(&jme->recv_lock);
615         i = start = rxring->next_to_clean;
616         /*
617          * Decide how many descriptors need to be processed
618          * We have to process entire queue in worst case
619          */
620         for(cnt = 0 ; cnt < RING_DESC_NR ; ++cnt)
621         {
622                 if(rxdesc[i].descwb.flags & RXWBFLAG_OWN) {
623                         rxring->next_to_clean = i;
624                         break;
625                 }
626
627                 if(unlikely(++i == RING_DESC_NR))
628                         i = 0;
629         }
630         spin_unlock(&jme->recv_lock);
631
632         /*
633          * Process descriptors independently accross cpu
634          *      --- save for multiple cpu handling
635          */
636         for( i = start ; cnt-- ; ) {
637                 /*
638                 * Pass received packet to kernel
639                 */
640                 rxbuf = (void*)ALIGN((unsigned long)(rxring->buf_virt[i]), 8);
641                 desccnt = rxdesc[i].descwb.desccnt & RXWBDCNT_DCNT;
642                 framesize = le16_to_cpu(rxdesc[i].descwb.framesize);
643                 skb = dev_alloc_skb(framesize);
644                 if(!skb) {
645                         printk(KERN_ERR PFX "Out of memory.\n");
646                         ++(netdev->stats.rx_dropped);
647                 }
648                 else {
649                         skb_put(skb, framesize);
650                         skb_copy_to_linear_data(skb, rxbuf, framesize);
651                         skb->protocol = eth_type_trans(skb, netdev);
652                         netif_rx(skb);
653
654                         netdev->last_rx = jiffies;
655                         netdev->stats.rx_bytes += framesize;
656                         ++(netdev->stats.rx_packets);
657                 }
658
659                 dprintk("DESCCNT: %u, FSIZE: %u, ADDRH: %08x, "
660                         "ADDRL: %08x, FLAGS: %04x, STAT: %02x, "
661                         "DST:%02x:%02x:%02x:%02x:%02x:%02x, "
662                         "DSTCRC: %d\n",
663                         desccnt,
664                         framesize,
665                         le32_to_cpu(rxdesc[i].dw[2]),
666                         le32_to_cpu(rxdesc[i].dw[3]),
667                         le16_to_cpu(rxdesc[i].descwb.flags),
668                         rxdesc[i].descwb.stat,
669                         rxbuf[0], rxbuf[1], rxbuf[2],
670                         rxbuf[3], rxbuf[4], rxbuf[5],
671                         ether_crc(ETH_ALEN, rxbuf) & 0x3F);
672
673
674                 /*
675                 * Cleanup descriptor for next receive
676                 */
677                 jme_set_clean_rxdesc(jme, i);
678
679                 if(unlikely(++i == RING_DESC_NR))
680                         i = 0;
681         }
682
683 }
684
685 static irqreturn_t jme_intr(int irq, void *dev_id)
686 {
687         struct net_device *netdev = dev_id;
688         struct jme_adapter *jme = netdev_priv(netdev);
689         irqreturn_t rc = IRQ_HANDLED;
690         __u32 intrstat = jread32(jme, JME_IEVE);
691 #ifdef RX_QUEUE_DEBUG
692         __u32 val;
693 #endif
694
695 #if 0
696         /*
697          * Don't disable interrupt, the driver should be
698          * working fine with multiple interrupt handling
699          * at the same time. (When Multi-core CPU)
700          */
701
702         /*
703          * Temporary disable all Interrupts From Our NIC
704          */
705         jwrite32(jme, JME_IENC, INTR_ENABLE);
706         wmb();
707 #endif
708
709         dprintk("Interrupt received(%08x).\n", intrstat);
710
711
712         /*
713          * Check if it's really an interrupt for us
714          * and if the device still exist
715          */
716         if((intrstat & INTR_ENABLE) == 0 || intrstat == ~0) {
717                 rc = IRQ_NONE;
718                 goto out;
719         }
720
721         if(intrstat & INTR_LINKCH) {
722                 /*
723                  * Process Link status change event
724                  */
725                 jme_check_link(netdev);
726
727                 /*
728                  * Write 1 clear Link status change Interrupt
729                  */
730                 jwrite32(jme, JME_IEVE, INTR_LINKCH);
731         }
732
733         if(intrstat & INTR_RX0) {
734                 /*
735                  * Process event
736                  */
737                 jme_process_receive(netdev);
738
739                 /*
740                  * Write 1 clear Interrupt
741                  */
742                 jwrite32(jme, JME_IEVE, INTR_RX0);
743
744                 dprintk("Received From Queue 0.\n");
745
746 #ifdef RX_QUEUE_DEBUG
747                 //Poll out the Receive Queue Next Descriptor Address/Status
748                 val = jread32(jme, JME_RXCS);
749                 val |= RXCS_QST;
750                 jwrite32(jme, JME_RXCS, val);
751                 wmb();
752                 val = jread32(jme, JME_RXNDA);
753                 dprintk("NEXT_RX_DESC.(%08x)\n", val);
754 #endif
755
756         }
757
758         if(intrstat & INTR_RX0EMP) {
759                 /*
760                  * Write 1 clear Interrupt
761                  */
762                 jwrite32(jme, JME_IEVE, INTR_RX0EMP);
763
764                 dprintk("Received Queue 0 is running-out.\n");
765         }
766
767         if(intrstat & INTR_TX0) {
768                 /*
769                  * Process event
770                  */
771                 jme_process_tx_complete(netdev);
772
773                 /*
774                  * Write 1 clear Interrupt
775                  */
776                 jwrite32(jme, JME_IEVE, INTR_TX0);
777
778                 dprintk("Queue 0 transmit complete.\n");
779         }
780
781 out:
782
783 #if 0
784         /*
785          * Re-enable interrupts
786          */
787         wmb();
788         jwrite32(jme, JME_IENS, INTR_ENABLE);
789 #endif
790         return rc;
791 }
792
793 static int jme_open(struct net_device *netdev)
794 {
795         struct jme_adapter *jme = netdev_priv(netdev);
796         int CHECK_VAR;
797
798         CHECK_AND_GOTO(request_irq(jme->pdev->irq, jme_intr, IRQF_SHARED,
799                                    netdev->name, netdev),
800                        err_out,
801                        "Requesting IRQ error.")
802
803         CHECK_AND_GOTO(jme_setup_rx_resources(jme),
804                        err_out_free_irq,
805                        "Error allocating resources for RX.")
806
807         CHECK_AND_GOTO(jme_setup_tx_resources(jme),
808                        err_out_free_rx_resources,
809                        "Error allocating resources for TX.")
810
811         jme_reset_mac_processor(jme);
812         jme_check_link(netdev);
813         jme_start_irq(jme);
814         jme_enable_rx_engine(jme);
815         jme_enable_tx_engine(jme);
816         netif_start_queue(netdev);
817
818         return 0;
819
820 err_out_free_rx_resources:
821         jme_free_rx_resources(jme);
822 err_out_free_irq:
823         free_irq(jme->pdev->irq, jme->dev);
824 err_out:
825         netif_stop_queue(netdev);
826         netif_carrier_off(netdev);
827         return CHECK_VAR;
828 }
829
830 static int jme_close(struct net_device *netdev)
831 {
832         struct jme_adapter *jme = netdev_priv(netdev);
833
834         netif_stop_queue(netdev);
835         netif_carrier_off(netdev);
836
837         jme_stop_irq(jme);
838         free_irq(jme->pdev->irq, jme->dev);
839
840         jme_disable_rx_engine(jme);
841         jme_disable_tx_engine(jme);
842         jme_free_rx_resources(jme);
843         jme_free_tx_resources(jme);
844
845         return 0;
846 }
847
848 static int jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
849 {
850         struct jme_adapter *jme = netdev_priv(netdev);
851         struct jme_ring *txring = &(jme->txring[0]);
852         struct TxDesc *txdesc = txring->desc;
853         int idx;
854
855
856         /*
857          * Check if transmit queue is already full
858          * and take one descriptor to use
859          */
860         spin_lock(&jme->xmit_lock);
861         idx = txring->next_to_use;
862         if(unlikely(txdesc[idx].desc1.flags & TXFLAG_OWN)) {
863                 spin_unlock(&jme->xmit_lock);
864                 return NETDEV_TX_BUSY;
865         }
866         if(unlikely(++(txring->next_to_use) == RING_DESC_NR))
867                 txring->next_to_use = 0;
868         spin_unlock(&jme->xmit_lock);
869
870
871         /*
872          * Fill up TX descriptors
873          */
874         skb_copy_from_linear_data(skb,
875                                   (void*)ALIGN((unsigned long)(txring->buf_virt[idx]), 8),
876                                   skb->len);
877         jme_set_new_txdesc(jme, idx, skb->len);
878
879         /*
880          * Since still using copy now. we could free it here.
881          */
882         dev_kfree_skb(skb);
883
884         /*
885          * Tell MAC HW to send
886          */
887         jwrite32(jme, JME_TXCS, TXCS_QUEUE0S |
888                                   TXCS_DEFAULT |
889                                   TXCS_SELECT_QUEUE0 |
890                                   TXCS_ENABLE);
891
892         netdev->stats.tx_bytes += skb->len;
893         ++(netdev->stats.tx_packets);
894         netdev->trans_start = jiffies;
895
896         return 0;
897 }
898
899 static int jme_set_macaddr(struct net_device *netdev, void *p)
900 {
901         struct jme_adapter *jme = netdev_priv(netdev);
902         struct sockaddr *addr = p;
903         __u32 val;
904
905         if(netif_running(netdev))
906                 return -EBUSY;
907
908         spin_lock(&jme->macaddr_lock);
909         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
910
911         val = addr->sa_data[3] << 24 |
912               addr->sa_data[2] << 16 |
913               addr->sa_data[1] <<  8 |
914               addr->sa_data[0];
915         jwrite32(jme, JME_RXUMA, val);
916         val = addr->sa_data[5] << 8 |
917               addr->sa_data[4];
918         jwrite32(jme, JME_RXUMA+4, val);
919         spin_unlock(&jme->macaddr_lock);
920
921         return 0;
922 }
923
924 static void jme_set_multi(struct net_device *netdev)
925 {
926         struct jme_adapter *jme = netdev_priv(netdev);
927         u32 mc_hash[2] = {};
928         __u32 val;
929         int i;
930
931
932         spin_lock(&jme->macaddr_lock);
933         val = RXMCS_BRDFRAME | RXMCS_UNIFRAME;
934
935         if (netdev->flags & IFF_PROMISC)
936                 val |= RXMCS_ALLFRAME;
937         else if (netdev->flags & IFF_ALLMULTI)
938                 val |= RXMCS_ALLMULFRAME;
939         else if(netdev->flags & IFF_MULTICAST) {
940                 struct dev_mc_list *mclist;
941                 int bit_nr;
942
943                 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
944                 for (i = 0, mclist = netdev->mc_list;
945                      mclist && i < netdev->mc_count;
946                      ++i, mclist = mclist->next) {
947                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
948                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
949                         dprintk("Adding MCAddr: "
950                                 "%02x:%02x:%02x:%02x:%02x:%02x (%d)\n",
951                                 mclist->dmi_addr[0],
952                                 mclist->dmi_addr[1],
953                                 mclist->dmi_addr[2],
954                                 mclist->dmi_addr[3],
955                                 mclist->dmi_addr[4],
956                                 mclist->dmi_addr[5],
957                                 bit_nr);
958                 }
959
960                 jwrite32(jme, JME_RXMCHT, mc_hash[0]);
961                 jwrite32(jme, JME_RXMCHT+4, mc_hash[1]);
962         }
963
964
965         wmb();
966         jwrite32(jme, JME_RXMCS, val);
967         spin_unlock(&jme->macaddr_lock);
968
969         dprintk("RX Mode changed: %08x\n", val);
970 }
971
972 static int jme_change_mtu(struct net_device *dev, int new_mtu)
973 {
974         /*
975          * Do not support MTU change for now.
976          */
977         return -EINVAL;
978 }
979
980 static void jme_get_drvinfo(struct net_device *netdev,
981                              struct ethtool_drvinfo *info)
982 {
983         struct jme_adapter *jme = netdev_priv(netdev);
984
985         strcpy(info->driver, DRV_NAME);
986         strcpy(info->version, DRV_VERSION);
987         strcpy(info->bus_info, pci_name(jme->pdev));
988 }
989
990 static int jme_get_settings(struct net_device *netdev,
991                              struct ethtool_cmd *ecmd)
992 {
993         struct jme_adapter *jme = netdev_priv(netdev);
994         int rc;
995         spin_lock(&jme->phy_lock);
996         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
997         spin_unlock(&jme->phy_lock);
998         return rc;
999 }
1000
1001 static int jme_set_settings(struct net_device *netdev,
1002                              struct ethtool_cmd *ecmd)
1003 {
1004         struct jme_adapter *jme = netdev_priv(netdev);
1005         int rc;
1006         spin_lock(&jme->phy_lock);
1007         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1008         spin_unlock(&jme->phy_lock);
1009         return rc;
1010 }
1011
1012 static u32 jme_get_link(struct net_device *netdev) {
1013         struct jme_adapter *jme = netdev_priv(netdev);
1014         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1015 }
1016
1017 static const struct ethtool_ops jme_ethtool_ops = {
1018         .get_drvinfo            = jme_get_drvinfo,
1019         .get_settings           = jme_get_settings,
1020         .set_settings           = jme_set_settings,
1021         .get_link               = jme_get_link,
1022 };
1023
1024 static int __devinit jme_init_one(struct pci_dev *pdev,
1025                                      const struct pci_device_id *ent)
1026 {
1027         int CHECK_VAR = 0;
1028         struct net_device *netdev;
1029         struct jme_adapter *jme;
1030         DECLARE_MAC_BUF(mac);
1031
1032         /*
1033          * set up PCI device basics
1034          */
1035         CHECK_AND_GOTO(pci_enable_device(pdev),
1036                        err_out,
1037                        "Cannot enable PCI device.")
1038
1039         CHECK_AND_GOTO(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM),
1040                        err_out_disable_pdev,
1041                        "No PCI resource region found.")
1042
1043         CHECK_AND_GOTO(pci_request_regions(pdev, DRV_NAME),
1044                        err_out_disable_pdev,
1045                        "Cannot obtain PCI resource region.")
1046
1047         pci_set_master(pdev);
1048
1049         /*
1050          * alloc and init net device
1051          */
1052         netdev = alloc_etherdev(sizeof(struct jme_adapter));
1053         if(!netdev) {
1054                 CHECK_VAR = -ENOMEM;
1055                 goto err_out_disable_pdev;
1056         }
1057         netdev->open                    = jme_open;
1058         netdev->stop                    = jme_close;
1059         netdev->hard_start_xmit         = jme_start_xmit;
1060         netdev->irq                     = pdev->irq;
1061         netdev->set_mac_address         = jme_set_macaddr;
1062         netdev->set_multicast_list      = jme_set_multi;
1063         netdev->change_mtu              = jme_change_mtu;
1064         netdev->ethtool_ops             = &jme_ethtool_ops;
1065
1066         SET_NETDEV_DEV(netdev, &pdev->dev);
1067         pci_set_drvdata(pdev, netdev);
1068
1069         /*
1070          * init adapter info
1071          */
1072         jme = netdev_priv(netdev);
1073         jme->pdev = pdev;
1074         jme->dev = netdev;
1075         jme->regs = ioremap(pci_resource_start(pdev, 0),
1076                              pci_resource_len(pdev, 0));
1077         if (!jme->regs) {
1078                 rc = -ENOMEM;
1079                 goto err_out_free_netdev;
1080         }
1081         spin_lock_init(&jme->xmit_lock);
1082         spin_lock_init(&jme->recv_lock);
1083         spin_lock_init(&jme->macaddr_lock);
1084         spin_lock_init(&jme->phy_lock);
1085         jme->mii_if.dev = netdev;
1086         jme->mii_if.phy_id = 1;
1087         jme->mii_if.supports_gmii = 1;
1088         jme->mii_if.mdio_read = jme_mdio_read;
1089         jme->mii_if.mdio_write = jme_mdio_write;
1090
1091         /*
1092          * Reset MAC processor and reload EEPROM for MAC Address
1093          */
1094         jme_clear_pm(jme);
1095         jme_reset_mac_processor(jme);
1096         CHECK_AND_GOTO(jme_reload_eeprom(jme),
1097                        err_out_unmap,
1098                        "Rload eeprom for reading MAC Address error.");
1099         jme_load_macaddr(netdev);
1100
1101
1102         /*
1103          * Tell stack that we are not ready to work until open()
1104          */
1105         netif_carrier_off(netdev);
1106         netif_stop_queue(netdev);
1107
1108         /*
1109          * Register netdev
1110          */
1111         CHECK_AND_GOTO(register_netdev(netdev),
1112                        err_out_unmap,
1113                        "Cannot register net device.")
1114
1115         printk(KERN_INFO "%s: JMC250 gigabit eth at %llx, %s, IRQ %d\n",
1116                netdev->name,
1117                (unsigned long long) pci_resource_start(pdev, 0),
1118                print_mac(mac, netdev->dev_addr),
1119                pdev->irq);
1120
1121         pci_set_drvdata(pdev, netdev);
1122
1123         return 0;
1124
1125 err_out_unmap:
1126         iounmap(jme->regs);
1127 err_out_free_netdev:
1128         pci_set_drvdata(pdev, NULL);
1129         free_netdev(netdev);
1130 err_out_disable_pdev:
1131         pci_disable_device(pdev);
1132         pci_set_drvdata(pdev, NULL);
1133 err_out:
1134         return CHECK_VAR;
1135 }
1136
1137 static void __devexit jme_remove_one(struct pci_dev *pdev)
1138
1139         struct net_device *netdev = pci_get_drvdata(pdev);
1140         struct jme_adapter *jme = netdev_priv(netdev);
1141
1142         unregister_netdev(netdev);
1143         iounmap(jme->regs);
1144         pci_set_drvdata(pdev, NULL);
1145         free_netdev(netdev);
1146         pci_release_regions(pdev);
1147         pci_disable_device(pdev);
1148
1149 }
1150
1151 static struct pci_device_id jme_pci_tbl[] = {
1152         { PCI_VDEVICE(JMICRON, 0x250) },
1153         { }
1154 };
1155
1156 static struct pci_driver jme_driver = {
1157         .name           = DRV_NAME,
1158         .id_table       = jme_pci_tbl,
1159         .probe          = jme_init_one,
1160         .remove         = __devexit_p(jme_remove_one),
1161 #if 0
1162 #ifdef CONFIG_PM
1163         .suspend        = jme_suspend,
1164         .resume         = jme_resume,
1165 #endif /* CONFIG_PM */
1166 #endif
1167 };
1168
1169 static int __init jme_init_module(void)
1170 {
1171         printk(KERN_INFO "jme: JMicron JMC250 gigabit ethernet "
1172                          "driver version %s\n", DRV_VERSION);
1173         return pci_register_driver(&jme_driver);
1174 }
1175
1176 static void __exit jme_cleanup_module(void)
1177 {
1178         pci_unregister_driver(&jme_driver);
1179 }
1180
1181 module_init(jme_init_module);
1182 module_exit(jme_cleanup_module);
1183
1184 MODULE_AUTHOR("David Tseng <cooldavid@cooldavid.org>");
1185 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1186 MODULE_LICENSE("GPL");
1187 MODULE_VERSION(DRV_VERSION);
1188 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
1189
1190