]> bbs.cooldavid.org Git - jme.git/blob - jme.c
799c2d45505c41528b232a75e73d78d29179c888
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  *
7  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 /*
25  * TODO:
26  *      -  Decode register dump for ethtool.
27  */
28
29 #include <linux/version.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/crc32.h>
38 #include <linux/delay.h>
39 #include <linux/spinlock.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/tcp.h>
44 #include <linux/udp.h>
45 #include <linux/if_vlan.h>
46 #include "jme.h"
47
48 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
49 static struct net_device_stats *
50 jme_get_stats(struct net_device *netdev)
51 {
52         struct jme_adapter *jme = netdev_priv(netdev);
53         return &jme->stats;
54 }
55 #endif
56
57 static int
58 jme_mdio_read(struct net_device *netdev, int phy, int reg)
59 {
60         struct jme_adapter *jme = netdev_priv(netdev);
61         int i, val, again = (reg == MII_BMSR)?1:0;
62
63 read_again:
64         jwrite32(jme, JME_SMI, SMI_OP_REQ |
65                                 smi_phy_addr(phy) |
66                                 smi_reg_addr(reg));
67
68         wmb();
69         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
70                 udelay(20);
71                 val = jread32(jme, JME_SMI);
72                 if ((val & SMI_OP_REQ) == 0)
73                         break;
74         }
75
76         if (i == 0) {
77                 jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
78                 return 0;
79         }
80
81         if(again--)
82                 goto read_again;
83
84         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
85 }
86
87 static void
88 jme_mdio_write(struct net_device *netdev,
89                                 int phy, int reg, int val)
90 {
91         struct jme_adapter *jme = netdev_priv(netdev);
92         int i;
93
94         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
95                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
96                 smi_phy_addr(phy) | smi_reg_addr(reg));
97
98         wmb();
99         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
100                 udelay(20);
101                 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
102                         break;
103         }
104
105         if (i == 0)
106                 jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
107
108         return;
109 }
110
111 __always_inline static void
112 jme_reset_phy_processor(struct jme_adapter *jme)
113 {
114         __u32 val;
115
116         jme_mdio_write(jme->dev,
117                         jme->mii_if.phy_id,
118                         MII_ADVERTISE, ADVERTISE_ALL |
119                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
120
121         if(jme->pdev->device == JME_GE_DEVICE)
122                 jme_mdio_write(jme->dev,
123                                 jme->mii_if.phy_id,
124                                 MII_CTRL1000,
125                                 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
126
127         val = jme_mdio_read(jme->dev,
128                                 jme->mii_if.phy_id,
129                                 MII_BMCR);
130
131         jme_mdio_write(jme->dev,
132                         jme->mii_if.phy_id,
133                         MII_BMCR, val | BMCR_RESET);
134
135         return;
136 }
137
138 static void
139 jme_setup_wakeup_frame(struct jme_adapter *jme,
140                 __u32 *mask, __u32 crc, int fnr)
141 {
142         int i;
143
144         /*
145          * Setup CRC pattern
146          */
147         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
148         wmb();
149         jwrite32(jme, JME_WFODP, crc);
150         wmb();
151
152         /*
153          * Setup Mask
154          */
155         for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
156                 jwrite32(jme, JME_WFOI,
157                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
158                                 (fnr & WFOI_FRAME_SEL));
159                 wmb();
160                 jwrite32(jme, JME_WFODP, mask[i]);
161                 wmb();
162         }
163 }
164
165 __always_inline static void
166 jme_reset_mac_processor(struct jme_adapter *jme)
167 {
168         __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
169         __u32 crc = 0xCDCDCDCD;
170         __u32 gpreg0;
171         int i;
172
173         jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
174         udelay(2);
175         jwrite32(jme, JME_GHC, jme->reg_ghc);
176         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
177         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
178         for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
179                 jme_setup_wakeup_frame(jme, mask, crc, i);
180         if(jme->fpgaver)
181                 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
182         else
183                 gpreg0 = GPREG0_DEFAULT;
184         jwrite32(jme, JME_GPREG0, gpreg0);
185         jwrite32(jme, JME_GPREG1, 0);
186 }
187
188 __always_inline static void
189 jme_clear_pm(struct jme_adapter *jme)
190 {
191         jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
192         pci_set_power_state(jme->pdev, PCI_D0);
193         pci_enable_wake(jme->pdev, PCI_D0, false);
194 }
195
196 static int
197 jme_reload_eeprom(struct jme_adapter *jme)
198 {
199         __u32 val;
200         int i;
201
202         val = jread32(jme, JME_SMBCSR);
203
204         if(val & SMBCSR_EEPROMD)
205         {
206                 val |= SMBCSR_CNACK;
207                 jwrite32(jme, JME_SMBCSR, val);
208                 val |= SMBCSR_RELOAD;
209                 jwrite32(jme, JME_SMBCSR, val);
210                 mdelay(12);
211
212                 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i)
213                 {
214                         mdelay(1);
215                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
216                                 break;
217                 }
218
219                 if(i == 0) {
220                         jeprintk("jme", "eeprom reload timeout\n");
221                         return -EIO;
222                 }
223         }
224
225         return 0;
226 }
227
228 static void
229 jme_load_macaddr(struct net_device *netdev)
230 {
231         struct jme_adapter *jme = netdev_priv(netdev);
232         unsigned char macaddr[6];
233         __u32 val;
234
235         spin_lock(&jme->macaddr_lock);
236         val = jread32(jme, JME_RXUMA_LO);
237         macaddr[0] = (val >>  0) & 0xFF;
238         macaddr[1] = (val >>  8) & 0xFF;
239         macaddr[2] = (val >> 16) & 0xFF;
240         macaddr[3] = (val >> 24) & 0xFF;
241         val = jread32(jme, JME_RXUMA_HI);
242         macaddr[4] = (val >>  0) & 0xFF;
243         macaddr[5] = (val >>  8) & 0xFF;
244         memcpy(netdev->dev_addr, macaddr, 6);
245         spin_unlock(&jme->macaddr_lock);
246 }
247
248 __always_inline static void
249 jme_set_rx_pcc(struct jme_adapter *jme, int p)
250 {
251         switch(p) {
252         case PCC_OFF:
253                 jwrite32(jme, JME_PCCRX0,
254                         ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
255                         ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
256                 break;
257         case PCC_P1:
258                 jwrite32(jme, JME_PCCRX0,
259                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
260                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
261                 break;
262         case PCC_P2:
263                 jwrite32(jme, JME_PCCRX0,
264                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
265                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
266                 break;
267         case PCC_P3:
268                 jwrite32(jme, JME_PCCRX0,
269                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
270                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
271                 break;
272         default:
273                 break;
274         }
275         wmb();
276
277         if(!(jme->flags & JME_FLAG_POLL))
278                 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
279 }
280
281 static void
282 jme_start_irq(struct jme_adapter *jme)
283 {
284         register struct dynpcc_info *dpi = &(jme->dpi);
285
286         jme_set_rx_pcc(jme, PCC_P1);
287         dpi->cur                = PCC_P1;
288         dpi->attempt            = PCC_P1;
289         dpi->cnt                = 0;
290
291         jwrite32(jme, JME_PCCTX,
292                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
293                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
294                         PCCTXQ0_EN
295                 );
296
297         /*
298          * Enable Interrupts
299          */
300         jwrite32(jme, JME_IENS, INTR_ENABLE);
301 }
302
303 __always_inline static void
304 jme_stop_irq(struct jme_adapter *jme)
305 {
306         /*
307          * Disable Interrupts
308          */
309         jwrite32(jme, JME_IENC, INTR_ENABLE);
310 }
311
312
313 __always_inline static void
314 jme_enable_shadow(struct jme_adapter *jme)
315 {
316         jwrite32(jme,
317                  JME_SHBA_LO,
318                  ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
319 }
320
321 __always_inline static void
322 jme_disable_shadow(struct jme_adapter *jme)
323 {
324         jwrite32(jme, JME_SHBA_LO, 0x0);
325 }
326
327 static __u32
328 jme_linkstat_from_phy(struct jme_adapter *jme)
329 {
330         __u32 phylink, bmsr;
331
332         phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
333         bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
334         if(bmsr & BMSR_ANCOMP)
335                 phylink |= PHY_LINK_AUTONEG_COMPLETE;
336
337         return phylink;
338 }
339
340 static int
341 jme_check_link(struct net_device *netdev, int testonly)
342 {
343         struct jme_adapter *jme = netdev_priv(netdev);
344         __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
345         char linkmsg[64];
346         int rc = 0;
347
348         linkmsg[0] = '\0';
349
350         if(jme->fpgaver)
351                 phylink = jme_linkstat_from_phy(jme);
352         else
353                 phylink = jread32(jme, JME_PHY_LINK);
354
355         if (phylink & PHY_LINK_UP) {
356                 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
357                         /*
358                          * If we did not enable AN
359                          * Speed/Duplex Info should be obtained from SMI
360                          */
361                         phylink = PHY_LINK_UP;
362
363                         bmcr = jme_mdio_read(jme->dev,
364                                                 jme->mii_if.phy_id,
365                                                 MII_BMCR);
366
367
368                         phylink |= ((bmcr & BMCR_SPEED1000) &&
369                                         (bmcr & BMCR_SPEED100) == 0) ?
370                                         PHY_LINK_SPEED_1000M :
371                                         (bmcr & BMCR_SPEED100) ?
372                                         PHY_LINK_SPEED_100M :
373                                         PHY_LINK_SPEED_10M;
374
375                         phylink |= (bmcr & BMCR_FULLDPLX) ?
376                                          PHY_LINK_DUPLEX : 0;
377
378                         strcat(linkmsg, "Forced: ");
379                 }
380                 else {
381                         /*
382                          * Keep polling for speed/duplex resolve complete
383                          */
384                         while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
385                                 --cnt) {
386
387                                 udelay(1);
388
389                                 if(jme->fpgaver)
390                                         phylink = jme_linkstat_from_phy(jme);
391                                 else
392                                         phylink = jread32(jme, JME_PHY_LINK);
393                         }
394
395                         if(!cnt)
396                                 jeprintk(netdev->name,
397                                         "Waiting speed resolve timeout.\n");
398
399                         strcat(linkmsg, "ANed: ");
400                 }
401
402                 if(jme->phylink == phylink) {
403                         rc = 1;
404                         goto out;
405                 }
406                 if(testonly)
407                         goto out;
408
409                 jme->phylink = phylink;
410
411                 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
412                                         GHC_SPEED_100M |
413                                         GHC_SPEED_1000M |
414                                         GHC_DPX);
415                 switch(phylink & PHY_LINK_SPEED_MASK) {
416                         case PHY_LINK_SPEED_10M:
417                                 ghc |= GHC_SPEED_10M;
418                                 strcat(linkmsg, "10 Mbps, ");
419                                 break;
420                         case PHY_LINK_SPEED_100M:
421                                 ghc |= GHC_SPEED_100M;
422                                 strcat(linkmsg, "100 Mbps, ");
423                                 break;
424                         case PHY_LINK_SPEED_1000M:
425                                 ghc |= GHC_SPEED_1000M;
426                                 strcat(linkmsg, "1000 Mbps, ");
427                                 break;
428                         default:
429                                 break;
430                 }
431                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
432
433                 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
434                                         "Full-Duplex, " :
435                                         "Half-Duplex, ");
436
437                 if(phylink & PHY_LINK_MDI_STAT)
438                         strcat(linkmsg, "MDI-X");
439                 else
440                         strcat(linkmsg, "MDI");
441
442                 if(phylink & PHY_LINK_DUPLEX)
443                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
444                 else {
445                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
446                                                 TXMCS_BACKOFF |
447                                                 TXMCS_CARRIERSENSE |
448                                                 TXMCS_COLLISION);
449                         jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
450                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
451                                 TXTRHD_TXREN |
452                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
453                 }
454
455                 jme->reg_ghc = ghc;
456                 jwrite32(jme, JME_GHC, ghc);
457
458                 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
459                 netif_carrier_on(netdev);
460         }
461         else {
462                 if(testonly)
463                         goto out;
464
465                 jprintk(netdev->name, "Link is down.\n");
466                 jme->phylink = 0;
467                 netif_carrier_off(netdev);
468         }
469
470 out:
471         return rc;
472 }
473
474 static int
475 jme_setup_tx_resources(struct jme_adapter *jme)
476 {
477         struct jme_ring *txring = &(jme->txring[0]);
478
479         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
480                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
481                                    &(txring->dmaalloc),
482                                    GFP_ATOMIC);
483
484         if(!txring->alloc) {
485                 txring->desc = NULL;
486                 txring->dmaalloc = 0;
487                 txring->dma = 0;
488                 return -ENOMEM;
489         }
490
491         /*
492          * 16 Bytes align
493          */
494         txring->desc            = (void*)ALIGN((unsigned long)(txring->alloc),
495                                                 RING_DESC_ALIGN);
496         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
497         txring->next_to_use     = 0;
498         atomic_set(&txring->next_to_clean, 0);
499         atomic_set(&txring->nr_free, jme->tx_ring_size);
500
501         /*
502          * Initialize Transmit Descriptors
503          */
504         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
505         memset(txring->bufinf, 0,
506                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
507
508         return 0;
509 }
510
511 static void
512 jme_free_tx_resources(struct jme_adapter *jme)
513 {
514         int i;
515         struct jme_ring *txring = &(jme->txring[0]);
516         struct jme_buffer_info *txbi = txring->bufinf;
517
518         if(txring->alloc) {
519                 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
520                         txbi = txring->bufinf + i;
521                         if(txbi->skb) {
522                                 dev_kfree_skb(txbi->skb);
523                                 txbi->skb = NULL;
524                         }
525                         txbi->mapping           = 0;
526                         txbi->len               = 0;
527                         txbi->nr_desc           = 0;
528                         txbi->start_xmit        = 0;
529                 }
530
531                 dma_free_coherent(&(jme->pdev->dev),
532                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
533                                   txring->alloc,
534                                   txring->dmaalloc);
535
536                 txring->alloc           = NULL;
537                 txring->desc            = NULL;
538                 txring->dmaalloc        = 0;
539                 txring->dma             = 0;
540         }
541         txring->next_to_use     = 0;
542         atomic_set(&txring->next_to_clean, 0);
543         atomic_set(&txring->nr_free, 0);
544
545 }
546
547 __always_inline static void
548 jme_enable_tx_engine(struct jme_adapter *jme)
549 {
550         /*
551          * Select Queue 0
552          */
553         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
554
555         /*
556          * Setup TX Queue 0 DMA Bass Address
557          */
558         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
559         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
560         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
561
562         /*
563          * Setup TX Descptor Count
564          */
565         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
566
567         /*
568          * Enable TX Engine
569          */
570         wmb();
571         jwrite32(jme, JME_TXCS, jme->reg_txcs |
572                                 TXCS_SELECT_QUEUE0 |
573                                 TXCS_ENABLE);
574
575 }
576
577 __always_inline static void
578 jme_restart_tx_engine(struct jme_adapter *jme)
579 {
580         /*
581          * Restart TX Engine
582          */
583         jwrite32(jme, JME_TXCS, jme->reg_txcs |
584                                 TXCS_SELECT_QUEUE0 |
585                                 TXCS_ENABLE);
586 }
587
588 __always_inline static void
589 jme_disable_tx_engine(struct jme_adapter *jme)
590 {
591         int i;
592         __u32 val;
593
594         /*
595          * Disable TX Engine
596          */
597         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
598
599         val = jread32(jme, JME_TXCS);
600         for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
601         {
602                 mdelay(1);
603                 val = jread32(jme, JME_TXCS);
604         }
605
606         if(!i) {
607                 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
608                 jme_reset_mac_processor(jme);
609         }
610
611
612 }
613
614 static void
615 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
616 {
617         struct jme_ring *rxring = jme->rxring;
618         register volatile struct rxdesc* rxdesc = rxring->desc;
619         struct jme_buffer_info *rxbi = rxring->bufinf;
620         rxdesc += i;
621         rxbi += i;
622
623         rxdesc->dw[0] = 0;
624         rxdesc->dw[1] = 0;
625         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
626         rxdesc->desc1.bufaddrl  = cpu_to_le32(
627                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
628         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
629         if(jme->dev->features & NETIF_F_HIGHDMA)
630                 rxdesc->desc1.flags = RXFLAG_64BIT;
631         wmb();
632         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
633 }
634
635 static int
636 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
637 {
638         struct jme_ring *rxring = &(jme->rxring[0]);
639         struct jme_buffer_info *rxbi = rxring->bufinf + i;
640         unsigned long offset;
641         struct sk_buff* skb;
642
643         skb = netdev_alloc_skb(jme->dev,
644                 jme->dev->mtu + RX_EXTRA_LEN);
645         if(unlikely(!skb))
646                 return -ENOMEM;
647
648         if(unlikely(offset =
649                         (unsigned long)(skb->data)
650                         & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
651                 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
652
653         rxbi->skb = skb;
654         rxbi->len = skb_tailroom(skb);
655         rxbi->mapping = pci_map_page(jme->pdev,
656                                         virt_to_page(skb->data),
657                                         offset_in_page(skb->data),
658                                         rxbi->len,
659                                         PCI_DMA_FROMDEVICE);
660
661         return 0;
662 }
663
664 static void
665 jme_free_rx_buf(struct jme_adapter *jme, int i)
666 {
667         struct jme_ring *rxring = &(jme->rxring[0]);
668         struct jme_buffer_info *rxbi = rxring->bufinf;
669         rxbi += i;
670
671         if(rxbi->skb) {
672                 pci_unmap_page(jme->pdev,
673                                  rxbi->mapping,
674                                  rxbi->len,
675                                  PCI_DMA_FROMDEVICE);
676                 dev_kfree_skb(rxbi->skb);
677                 rxbi->skb = NULL;
678                 rxbi->mapping = 0;
679                 rxbi->len = 0;
680         }
681 }
682
683 static void
684 jme_free_rx_resources(struct jme_adapter *jme)
685 {
686         int i;
687         struct jme_ring *rxring = &(jme->rxring[0]);
688
689         if(rxring->alloc) {
690                 for(i = 0 ; i < jme->rx_ring_size ; ++i)
691                         jme_free_rx_buf(jme, i);
692
693                 dma_free_coherent(&(jme->pdev->dev),
694                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
695                                   rxring->alloc,
696                                   rxring->dmaalloc);
697                 rxring->alloc    = NULL;
698                 rxring->desc     = NULL;
699                 rxring->dmaalloc = 0;
700                 rxring->dma      = 0;
701         }
702         rxring->next_to_use   = 0;
703         atomic_set(&rxring->next_to_clean, 0);
704 }
705
706 static int
707 jme_setup_rx_resources(struct jme_adapter *jme)
708 {
709         int i;
710         struct jme_ring *rxring = &(jme->rxring[0]);
711
712         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
713                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
714                                    &(rxring->dmaalloc),
715                                    GFP_ATOMIC);
716         if(!rxring->alloc) {
717                 rxring->desc = NULL;
718                 rxring->dmaalloc = 0;
719                 rxring->dma = 0;
720                 return -ENOMEM;
721         }
722
723         /*
724          * 16 Bytes align
725          */
726         rxring->desc            = (void*)ALIGN((unsigned long)(rxring->alloc),
727                                                 RING_DESC_ALIGN);
728         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
729         rxring->next_to_use     = 0;
730         atomic_set(&rxring->next_to_clean, 0);
731
732         /*
733          * Initiallize Receive Descriptors
734          */
735         for(i = 0 ; i < jme->rx_ring_size ; ++i) {
736                 if(unlikely(jme_make_new_rx_buf(jme, i))) {
737                         jme_free_rx_resources(jme);
738                         return -ENOMEM;
739                 }
740
741                 jme_set_clean_rxdesc(jme, i);
742         }
743
744         return 0;
745 }
746
747 __always_inline static void
748 jme_enable_rx_engine(struct jme_adapter *jme)
749 {
750         /*
751          * Setup RX DMA Bass Address
752          */
753         jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
754         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
755         jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
756
757         /*
758          * Setup RX Descriptor Count
759          */
760         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
761
762         /*
763          * Setup Unicast Filter
764          */
765         jme_set_multi(jme->dev);
766
767         /*
768          * Enable RX Engine
769          */
770         wmb();
771         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
772                                 RXCS_QUEUESEL_Q0 |
773                                 RXCS_ENABLE |
774                                 RXCS_QST);
775 }
776
777 __always_inline static void
778 jme_restart_rx_engine(struct jme_adapter *jme)
779 {
780         /*
781          * Start RX Engine
782          */
783         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
784                                 RXCS_QUEUESEL_Q0 |
785                                 RXCS_ENABLE |
786                                 RXCS_QST);
787 }
788
789
790 __always_inline static void
791 jme_disable_rx_engine(struct jme_adapter *jme)
792 {
793         int i;
794         __u32 val;
795
796         /*
797          * Disable RX Engine
798          */
799         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
800
801         val = jread32(jme, JME_RXCS);
802         for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
803         {
804                 mdelay(1);
805                 val = jread32(jme, JME_RXCS);
806         }
807
808         if(!i)
809                 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
810
811 }
812
813 static int
814 jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
815 {
816         if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
817                 return false;
818
819         if(unlikely((flags & RXWBFLAG_TCPON) &&
820         !(flags & RXWBFLAG_TCPCS))) {
821                 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
822                 goto out_sumerr;
823         }
824
825         if(unlikely((flags & RXWBFLAG_UDPON) &&
826         !(flags & RXWBFLAG_UDPCS))) {
827                 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
828                 goto out_sumerr;
829         }
830
831         if(unlikely((flags & RXWBFLAG_IPV4) &&
832         !(flags & RXWBFLAG_IPCS))) {
833                 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
834                 goto out_sumerr;
835         }
836
837         return true;
838
839 out_sumerr:
840         csum_dbg(jme->dev->name, "%s%s%s%s\n",
841                 (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
842                 (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
843                 (flags & RXWBFLAG_UDPON)?"UDP ":"",
844                 (flags & RXWBFLAG_TCPON)?"TCP":"");
845         return false;
846 }
847
848 static void
849 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
850 {
851         struct jme_ring *rxring = &(jme->rxring[0]);
852         volatile struct rxdesc *rxdesc = rxring->desc;
853         struct jme_buffer_info *rxbi = rxring->bufinf;
854         struct sk_buff *skb;
855         int framesize;
856
857         rxdesc += idx;
858         rxbi += idx;
859
860         skb = rxbi->skb;
861         pci_dma_sync_single_for_cpu(jme->pdev,
862                                         rxbi->mapping,
863                                         rxbi->len,
864                                         PCI_DMA_FROMDEVICE);
865
866         if(unlikely(jme_make_new_rx_buf(jme, idx))) {
867                 pci_dma_sync_single_for_device(jme->pdev,
868                                                 rxbi->mapping,
869                                                 rxbi->len,
870                                                 PCI_DMA_FROMDEVICE);
871
872                 ++(NET_STAT(jme).rx_dropped);
873         }
874         else {
875                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
876                                 - RX_PREPAD_SIZE;
877
878                 skb_reserve(skb, RX_PREPAD_SIZE);
879                 skb_put(skb, framesize);
880                 skb->protocol = eth_type_trans(skb, jme->dev);
881
882                 if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
883                         skb->ip_summed = CHECKSUM_UNNECESSARY;
884                 else
885                         skb->ip_summed = CHECKSUM_NONE;
886
887
888                 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
889                         vlan_dbg(jme->dev->name, "VLAN: %04x\n",
890                                         rxdesc->descwb.vlan);
891                         if(jme->vlgrp) {
892                                 vlan_dbg(jme->dev->name,
893                                         "VLAN Passed to kernel.\n");
894                                 jme->jme_vlan_rx(skb, jme->vlgrp,
895                                         le32_to_cpu(rxdesc->descwb.vlan));
896                                 NET_STAT(jme).rx_bytes += 4;
897                         }
898                 }
899                 else {
900                         jme->jme_rx(skb);
901                 }
902
903                 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
904                                 RXWBFLAG_DEST_MUL)
905                         ++(NET_STAT(jme).multicast);
906
907                 jme->dev->last_rx = jiffies;
908                 NET_STAT(jme).rx_bytes += framesize;
909                 ++(NET_STAT(jme).rx_packets);
910         }
911
912         jme_set_clean_rxdesc(jme, idx);
913
914 }
915
916
917
918 static int
919 jme_process_receive(struct jme_adapter *jme, int limit)
920 {
921         struct jme_ring *rxring = &(jme->rxring[0]);
922         volatile struct rxdesc *rxdesc = rxring->desc;
923         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
924
925         if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
926                 goto out_inc;
927
928         if(unlikely(atomic_read(&jme->link_changing) != 1))
929                 goto out_inc;
930
931         if(unlikely(!netif_carrier_ok(jme->dev)))
932                 goto out_inc;
933
934         i = atomic_read(&rxring->next_to_clean);
935         while( limit-- > 0 )
936         {
937                 rxdesc = rxring->desc;
938                 rxdesc += i;
939
940                 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
941                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
942                         goto out;
943
944                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
945
946                 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
947
948                 if(unlikely(desccnt > 1 ||
949                 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
950
951                         if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
952                                 ++(NET_STAT(jme).rx_crc_errors);
953                         else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
954                                 ++(NET_STAT(jme).rx_fifo_errors);
955                         else
956                                 ++(NET_STAT(jme).rx_errors);
957
958                         if(desccnt > 1) {
959                                 rx_dbg(jme->dev->name,
960                                         "RX: More than one(%d) descriptor, "
961                                         "framelen=%d\n",
962                                         desccnt, le16_to_cpu(rxdesc->descwb.framesize));
963                                 limit -= desccnt - 1;
964                         }
965
966                         for(j = i, ccnt = desccnt ; ccnt-- ; ) {
967                                 jme_set_clean_rxdesc(jme, j);
968                                 j = (j + 1) & (mask);
969                         }
970
971                 }
972                 else {
973                         jme_alloc_and_feed_skb(jme, i);
974                 }
975
976                 i = (i + desccnt) & (mask);
977         }
978
979
980 out:
981         rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
982         rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
983                 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
984                         >> 4);
985
986         atomic_set(&rxring->next_to_clean, i);
987
988 out_inc:
989         atomic_inc(&jme->rx_cleaning);
990
991         return limit > 0 ? limit : 0;
992
993 }
994
995 static void
996 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
997 {
998         if(likely(atmp == dpi->cur)) {
999                 dpi->cnt = 0;
1000                 return;
1001         }
1002
1003         if(dpi->attempt == atmp) {
1004                 ++(dpi->cnt);
1005         }
1006         else {
1007                 dpi->attempt = atmp;
1008                 dpi->cnt = 0;
1009         }
1010
1011 }
1012
1013 static void
1014 jme_dynamic_pcc(struct jme_adapter *jme)
1015 {
1016         register struct dynpcc_info *dpi = &(jme->dpi);
1017
1018         if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1019                 jme_attempt_pcc(dpi, PCC_P3);
1020         else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1021         || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1022                 jme_attempt_pcc(dpi, PCC_P2);
1023         else
1024                 jme_attempt_pcc(dpi, PCC_P1);
1025
1026         if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1027                 jme_set_rx_pcc(jme, dpi->attempt);
1028                 dpi->cur = dpi->attempt;
1029                 dpi->cnt = 0;
1030         }
1031 }
1032
1033 static void
1034 jme_start_pcc_timer(struct jme_adapter *jme)
1035 {
1036         struct dynpcc_info *dpi = &(jme->dpi);
1037         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1038         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1039         dpi->intr_cnt           = 0;
1040         jwrite32(jme, JME_TMCSR,
1041                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1042 }
1043
1044 __always_inline static void
1045 jme_stop_pcc_timer(struct jme_adapter *jme)
1046 {
1047         jwrite32(jme, JME_TMCSR, 0);
1048 }
1049
1050 static void
1051 jme_pcc_tasklet(unsigned long arg)
1052 {
1053         struct jme_adapter *jme = (struct jme_adapter*)arg;
1054         struct net_device *netdev = jme->dev;
1055
1056
1057         if(unlikely(!netif_carrier_ok(netdev) ||
1058                 (atomic_read(&jme->link_changing) != 1)
1059         )) {
1060                 jme_stop_pcc_timer(jme);
1061                 return;
1062         }
1063
1064         if(!(jme->flags & JME_FLAG_POLL))
1065                 jme_dynamic_pcc(jme);
1066
1067         jme_start_pcc_timer(jme);
1068 }
1069
1070 __always_inline static void
1071 jme_polling_mode(struct jme_adapter *jme)
1072 {
1073         jme_set_rx_pcc(jme, PCC_OFF);
1074 }
1075
1076 __always_inline static void
1077 jme_interrupt_mode(struct jme_adapter *jme)
1078 {
1079         jme_set_rx_pcc(jme, PCC_P1);
1080 }
1081
1082 static void
1083 jme_link_change_tasklet(unsigned long arg)
1084 {
1085         struct jme_adapter *jme = (struct jme_adapter*)arg;
1086         struct net_device *netdev = jme->dev;
1087         int timeout = WAIT_TASKLET_TIMEOUT;
1088         int rc;
1089
1090         if(!atomic_dec_and_test(&jme->link_changing))
1091                 goto out;
1092
1093         if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1094                 goto out;
1095
1096         jme->old_mtu = netdev->mtu;
1097         netif_stop_queue(netdev);
1098
1099         while(--timeout > 0 &&
1100                 (
1101                 atomic_read(&jme->rx_cleaning) != 1 ||
1102                 atomic_read(&jme->tx_cleaning) != 1
1103                 )) {
1104
1105                 mdelay(1);
1106         }
1107
1108         if(netif_carrier_ok(netdev)) {
1109                 jme_stop_pcc_timer(jme);
1110                 jme_reset_mac_processor(jme);
1111                 jme_free_rx_resources(jme);
1112                 jme_free_tx_resources(jme);
1113
1114                 if(jme->flags & JME_FLAG_POLL)
1115                         jme_polling_mode(jme);
1116         }
1117
1118         jme_check_link(netdev, 0);
1119         if(netif_carrier_ok(netdev)) {
1120                 rc = jme_setup_rx_resources(jme);
1121                 if(rc) {
1122                         jeprintk(netdev->name,
1123                                 "Allocating resources for RX error"
1124                                 ", Device STOPPED!\n");
1125                         goto out;
1126                 }
1127
1128
1129                 rc = jme_setup_tx_resources(jme);
1130                 if(rc) {
1131                         jeprintk(netdev->name,
1132                                 "Allocating resources for TX error"
1133                                 ", Device STOPPED!\n");
1134                         goto err_out_free_rx_resources;
1135                 }
1136
1137                 jme_enable_rx_engine(jme);
1138                 jme_enable_tx_engine(jme);
1139
1140                 netif_start_queue(netdev);
1141
1142                 if(jme->flags & JME_FLAG_POLL)
1143                         jme_interrupt_mode(jme);
1144
1145                 jme_start_pcc_timer(jme);
1146         }
1147
1148         goto out;
1149
1150 err_out_free_rx_resources:
1151         jme_free_rx_resources(jme);
1152 out:
1153         atomic_inc(&jme->link_changing);
1154 }
1155
1156 static void
1157 jme_rx_clean_tasklet(unsigned long arg)
1158 {
1159         struct jme_adapter *jme = (struct jme_adapter*)arg;
1160         struct dynpcc_info *dpi = &(jme->dpi);
1161
1162         jme_process_receive(jme, jme->rx_ring_size);
1163         ++(dpi->intr_cnt);
1164
1165 }
1166
1167 static int
1168 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1169 {
1170         struct jme_adapter *jme = jme_napi_priv(holder);
1171         struct net_device *netdev = jme->dev;
1172         int rest;
1173
1174         rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1175
1176         while(atomic_read(&jme->rx_empty) > 0) {
1177                 atomic_dec(&jme->rx_empty);
1178                 ++(NET_STAT(jme).rx_dropped);
1179                 jme_restart_rx_engine(jme);
1180         }
1181         atomic_inc(&jme->rx_empty);
1182
1183         if(rest) {
1184                 JME_RX_COMPLETE(netdev, holder);
1185                 jme_interrupt_mode(jme);
1186         }
1187
1188         JME_NAPI_WEIGHT_SET(budget, rest);
1189         return JME_NAPI_WEIGHT_VAL(budget) - rest;
1190 }
1191
1192 static void
1193 jme_rx_empty_tasklet(unsigned long arg)
1194 {
1195         struct jme_adapter *jme = (struct jme_adapter*)arg;
1196
1197         if(unlikely(atomic_read(&jme->link_changing) != 1))
1198                 return;
1199
1200         if(unlikely(!netif_carrier_ok(jme->dev)))
1201                 return;
1202
1203         queue_dbg(jme->dev->name, "RX Queue Full!\n");
1204
1205         jme_rx_clean_tasklet(arg);
1206
1207         while(atomic_read(&jme->rx_empty) > 0) {
1208                 atomic_dec(&jme->rx_empty);
1209                 ++(NET_STAT(jme).rx_dropped);
1210                 jme_restart_rx_engine(jme);
1211         }
1212         atomic_inc(&jme->rx_empty);
1213 }
1214
1215 static void
1216 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1217 {
1218         struct jme_ring *txring = jme->txring;
1219
1220         smp_wmb();
1221         if(unlikely(netif_queue_stopped(jme->dev) &&
1222         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1223
1224                 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1225                 netif_wake_queue(jme->dev);
1226
1227         }
1228
1229 }
1230
1231 static void
1232 jme_tx_clean_tasklet(unsigned long arg)
1233 {
1234         struct jme_adapter *jme = (struct jme_adapter*)arg;
1235         struct jme_ring *txring = &(jme->txring[0]);
1236         volatile struct txdesc *txdesc = txring->desc;
1237         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1238         int i, j, cnt = 0, max, err, mask;
1239
1240         if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1241                 goto out;
1242
1243         if(unlikely(atomic_read(&jme->link_changing) != 1))
1244                 goto out;
1245
1246         if(unlikely(!netif_carrier_ok(jme->dev)))
1247                 goto out;
1248
1249         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1250         mask = jme->tx_ring_mask;
1251
1252         tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1253
1254         for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1255
1256                 ctxbi = txbi + i;
1257
1258                 if(likely(ctxbi->skb &&
1259                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1260
1261                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1262
1263                         tx_dbg(jme->dev->name,
1264                                 "Tx Tasklet: Clean %d+%d\n",
1265                                 i, ctxbi->nr_desc);
1266
1267                         for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1268                                 ttxbi = txbi + ((i + j) & (mask));
1269                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1270
1271                                 pci_unmap_page(jme->pdev,
1272                                                  ttxbi->mapping,
1273                                                  ttxbi->len,
1274                                                  PCI_DMA_TODEVICE);
1275
1276                                 ttxbi->mapping = 0;
1277                                 ttxbi->len = 0;
1278                         }
1279
1280                         dev_kfree_skb(ctxbi->skb);
1281
1282                         cnt += ctxbi->nr_desc;
1283
1284                         if(unlikely(err))
1285                                 ++(NET_STAT(jme).tx_carrier_errors);
1286                         else {
1287                                 ++(NET_STAT(jme).tx_packets);
1288                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1289                         }
1290
1291                         ctxbi->skb = NULL;
1292                         ctxbi->len = 0;
1293                         ctxbi->start_xmit = 0;
1294                 }
1295                 else {
1296                         if(!ctxbi->skb)
1297                                 tx_dbg(jme->dev->name,
1298                                         "Tx Tasklet:"
1299                                         " Stopped due to no skb.\n");
1300                         else
1301                                 tx_dbg(jme->dev->name,
1302                                         "Tx Tasklet:"
1303                                         "Stopped due to not done.\n");
1304                         break;
1305                 }
1306
1307                 i = (i + ctxbi->nr_desc) & mask;
1308
1309                 ctxbi->nr_desc = 0;
1310         }
1311
1312         tx_dbg(jme->dev->name,
1313                 "Tx Tasklet: Stop %d Jiffies %lu\n",
1314                 i, jiffies);
1315
1316         atomic_set(&txring->next_to_clean, i);
1317         atomic_add(cnt, &txring->nr_free);
1318
1319         jme_wake_queue_if_stopped(jme);
1320
1321 out:
1322         atomic_inc(&jme->tx_cleaning);
1323 }
1324
1325 static void
1326 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1327 {
1328         /*
1329          * Disable interrupt
1330          */
1331         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1332
1333         if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1334                 /*
1335                  * Link change event is critical
1336                  * all other events are ignored
1337                  */
1338                 jwrite32(jme, JME_IEVE, intrstat);
1339                 tasklet_schedule(&jme->linkch_task);
1340                 goto out_reenable;
1341         }
1342
1343         if(intrstat & INTR_TMINTR) {
1344                 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1345                 tasklet_schedule(&jme->pcc_task);
1346         }
1347
1348         if(intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1349                 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1350                 tasklet_schedule(&jme->txclean_task);
1351         }
1352
1353         if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1354                 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1355                                                      INTR_PCCRX0 |
1356                                                      INTR_RX0EMP)) |
1357                                         INTR_RX0);
1358         }
1359
1360         if(jme->flags & JME_FLAG_POLL) {
1361                 if(intrstat & INTR_RX0EMP)
1362                         atomic_inc(&jme->rx_empty);
1363
1364                 if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1365                         if(likely(JME_RX_SCHEDULE_PREP(jme))) {
1366                                 jme_polling_mode(jme);
1367                                 JME_RX_SCHEDULE(jme);
1368                         }
1369                 }
1370         }
1371         else {
1372                 if(intrstat & INTR_RX0EMP) {
1373                         atomic_inc(&jme->rx_empty);
1374                         tasklet_schedule(&jme->rxempty_task);
1375                 }
1376                 else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1377                         tasklet_schedule(&jme->rxclean_task);
1378         }
1379
1380 out_reenable:
1381         /*
1382          * Re-enable interrupt
1383          */
1384         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1385
1386
1387 }
1388
1389 static irqreturn_t
1390 jme_intr(int irq, void *dev_id)
1391 {
1392         struct net_device *netdev = dev_id;
1393         struct jme_adapter *jme = netdev_priv(netdev);
1394         __u32 intrstat;
1395
1396         intrstat = jread32(jme, JME_IEVE);
1397
1398         /*
1399          * Check if it's really an interrupt for us
1400          */
1401         if(unlikely(intrstat == 0))
1402                 return IRQ_NONE;
1403
1404         /*
1405          * Check if the device still exist
1406          */
1407         if(unlikely(intrstat == ~((typeof(intrstat))0)))
1408                 return IRQ_NONE;
1409
1410         jme_intr_msi(jme, intrstat);
1411
1412         return IRQ_HANDLED;
1413 }
1414
1415 static irqreturn_t
1416 jme_msi(int irq, void *dev_id)
1417 {
1418         struct net_device *netdev = dev_id;
1419         struct jme_adapter *jme = netdev_priv(netdev);
1420         __u32 intrstat;
1421
1422         pci_dma_sync_single_for_cpu(jme->pdev,
1423                                     jme->shadow_dma,
1424                                     sizeof(__u32) * SHADOW_REG_NR,
1425                                     PCI_DMA_FROMDEVICE);
1426         intrstat = jme->shadow_regs[SHADOW_IEVE];
1427         jme->shadow_regs[SHADOW_IEVE] = 0;
1428
1429         jme_intr_msi(jme, intrstat);
1430
1431         return IRQ_HANDLED;
1432 }
1433
1434
1435 static void
1436 jme_reset_link(struct jme_adapter *jme)
1437 {
1438         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1439 }
1440
1441 static void
1442 jme_restart_an(struct jme_adapter *jme)
1443 {
1444         __u32 bmcr;
1445         unsigned long flags;
1446
1447         spin_lock_irqsave(&jme->phy_lock, flags);
1448         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1449         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1450         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1451         spin_unlock_irqrestore(&jme->phy_lock, flags);
1452 }
1453
1454 static int
1455 jme_request_irq(struct jme_adapter *jme)
1456 {
1457         int rc;
1458         struct net_device *netdev = jme->dev;
1459         irq_handler_t handler = jme_intr;
1460         int irq_flags = IRQF_SHARED;
1461
1462         if (!pci_enable_msi(jme->pdev)) {
1463                 jme->flags |= JME_FLAG_MSI;
1464                 handler = jme_msi;
1465                 irq_flags = 0;
1466         }
1467
1468         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1469                           netdev);
1470         if(rc) {
1471                 jeprintk(netdev->name,
1472                         "Unable to request %s interrupt (return: %d)\n",
1473                         jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1474
1475                 if(jme->flags & JME_FLAG_MSI) {
1476                         pci_disable_msi(jme->pdev);
1477                         jme->flags &= ~JME_FLAG_MSI;
1478                 }
1479         }
1480         else {
1481                 netdev->irq = jme->pdev->irq;
1482         }
1483
1484         return rc;
1485 }
1486
1487 static void
1488 jme_free_irq(struct jme_adapter *jme)
1489 {
1490         free_irq(jme->pdev->irq, jme->dev);
1491         if (jme->flags & JME_FLAG_MSI) {
1492                 pci_disable_msi(jme->pdev);
1493                 jme->flags &= ~JME_FLAG_MSI;
1494                 jme->dev->irq = jme->pdev->irq;
1495         }
1496 }
1497
1498 static int
1499 jme_open(struct net_device *netdev)
1500 {
1501         struct jme_adapter *jme = netdev_priv(netdev);
1502         int rc, timeout = 10;
1503
1504         while(
1505                 --timeout > 0 &&
1506                 (
1507                 atomic_read(&jme->link_changing) != 1 ||
1508                 atomic_read(&jme->rx_cleaning) != 1 ||
1509                 atomic_read(&jme->tx_cleaning) != 1
1510                 )
1511         )
1512                 msleep(1);
1513
1514         if(!timeout) {
1515                 rc = -EBUSY;
1516                 goto err_out;
1517         }
1518
1519         jme_clear_pm(jme);
1520         jme_reset_mac_processor(jme);
1521         JME_NAPI_ENABLE(jme);
1522
1523         rc = jme_request_irq(jme);
1524         if(rc)
1525                 goto err_out;
1526
1527         jme_enable_shadow(jme);
1528         jme_start_irq(jme);
1529
1530         if(jme->flags & JME_FLAG_SSET)
1531                 jme_set_settings(netdev, &jme->old_ecmd);
1532         else
1533                 jme_reset_phy_processor(jme);
1534
1535         jme_reset_link(jme);
1536
1537         return 0;
1538
1539 err_out:
1540         netif_stop_queue(netdev);
1541         netif_carrier_off(netdev);
1542         return rc;
1543 }
1544
1545 static void
1546 jme_set_100m_half(struct jme_adapter *jme)
1547 {
1548         __u32 bmcr, tmp;
1549
1550         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1551         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1552                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1553         tmp |= BMCR_SPEED100;
1554
1555         if (bmcr != tmp)
1556                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1557
1558         if(jme->fpgaver)
1559                 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1560         else
1561                 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1562 }
1563
1564 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1565 static void
1566 jme_wait_link(struct jme_adapter *jme)
1567 {
1568         __u32 phylink, to = JME_WAIT_LINK_TIME;
1569
1570         mdelay(1000);
1571         phylink = jme_linkstat_from_phy(jme);
1572         while(!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1573                 mdelay(10);
1574                 phylink = jme_linkstat_from_phy(jme);
1575         }
1576 }
1577
1578 static void
1579 jme_phy_off(struct jme_adapter *jme)
1580 {
1581         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1582 }
1583
1584
1585 static int
1586 jme_close(struct net_device *netdev)
1587 {
1588         struct jme_adapter *jme = netdev_priv(netdev);
1589
1590         netif_stop_queue(netdev);
1591         netif_carrier_off(netdev);
1592
1593         jme_stop_irq(jme);
1594         jme_disable_shadow(jme);
1595         jme_free_irq(jme);
1596
1597         JME_NAPI_DISABLE(jme);
1598
1599         tasklet_kill(&jme->linkch_task);
1600         tasklet_kill(&jme->txclean_task);
1601         tasklet_kill(&jme->rxclean_task);
1602         tasklet_kill(&jme->rxempty_task);
1603
1604         jme_reset_mac_processor(jme);
1605         jme_free_rx_resources(jme);
1606         jme_free_tx_resources(jme);
1607         jme->phylink = 0;
1608         jme_phy_off(jme);
1609
1610         return 0;
1611 }
1612
1613 static int
1614 jme_alloc_txdesc(struct jme_adapter *jme,
1615                         struct sk_buff *skb)
1616 {
1617         struct jme_ring *txring = jme->txring;
1618         int idx, nr_alloc, mask = jme->tx_ring_mask;
1619
1620         idx = txring->next_to_use;
1621         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1622
1623         if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1624                 return -1;
1625
1626         atomic_sub(nr_alloc, &txring->nr_free);
1627
1628         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1629
1630         return idx;
1631 }
1632
1633 static void
1634 jme_fill_tx_map(struct pci_dev *pdev,
1635                 volatile struct txdesc *txdesc,
1636                 struct jme_buffer_info *txbi,
1637                 struct page *page,
1638                 __u32 page_offset,
1639                 __u32 len,
1640                 __u8 hidma)
1641 {
1642         dma_addr_t dmaaddr;
1643
1644         dmaaddr = pci_map_page(pdev,
1645                                 page,
1646                                 page_offset,
1647                                 len,
1648                                 PCI_DMA_TODEVICE);
1649
1650         pci_dma_sync_single_for_device(pdev,
1651                                        dmaaddr,
1652                                        len,
1653                                        PCI_DMA_TODEVICE);
1654
1655         txdesc->dw[0] = 0;
1656         txdesc->dw[1] = 0;
1657         txdesc->desc2.flags     = TXFLAG_OWN;
1658         txdesc->desc2.flags     |= (hidma)?TXFLAG_64BIT:0;
1659         txdesc->desc2.datalen   = cpu_to_le16(len);
1660         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1661         txdesc->desc2.bufaddrl  = cpu_to_le32(
1662                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
1663
1664         txbi->mapping = dmaaddr;
1665         txbi->len = len;
1666 }
1667
1668 static void
1669 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1670 {
1671         struct jme_ring *txring = jme->txring;
1672         volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1673         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1674         __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1675         int i, nr_frags = skb_shinfo(skb)->nr_frags;
1676         int mask = jme->tx_ring_mask;
1677         struct skb_frag_struct *frag;
1678         __u32 len;
1679
1680         for(i = 0 ; i < nr_frags ; ++i) {
1681                 frag = &skb_shinfo(skb)->frags[i];
1682                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1683                 ctxbi = txbi + ((idx + i + 2) & (mask));
1684
1685                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1686                                  frag->page_offset, frag->size, hidma);
1687         }
1688
1689         len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1690         ctxdesc = txdesc + ((idx + 1) & (mask));
1691         ctxbi = txbi + ((idx + 1) & (mask));
1692         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1693                         offset_in_page(skb->data), len, hidma);
1694
1695 }
1696
1697 static int
1698 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1699 {
1700         if(unlikely(skb_shinfo(skb)->gso_size &&
1701                         skb_header_cloned(skb) &&
1702                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1703                 dev_kfree_skb(skb);
1704                 return -1;
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int
1711 jme_tx_tso(struct sk_buff *skb,
1712                 volatile __u16 *mss, __u8 *flags)
1713 {
1714         if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1715                 *flags |= TXFLAG_LSEN;
1716
1717                 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1718                         struct iphdr *iph = ip_hdr(skb);
1719
1720                         iph->check = 0;
1721                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1722                                                                 iph->daddr, 0,
1723                                                                 IPPROTO_TCP,
1724                                                                 0);
1725                 }
1726                 else {
1727                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
1728
1729                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1730                                                                 &ip6h->daddr, 0,
1731                                                                 IPPROTO_TCP,
1732                                                                 0);
1733                 }
1734
1735                 return 0;
1736         }
1737
1738         return 1;
1739 }
1740
1741 static void
1742 jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1743 {
1744         if(skb->ip_summed == CHECKSUM_PARTIAL) {
1745                 __u8 ip_proto;
1746
1747                 switch (skb->protocol) {
1748                 case __constant_htons(ETH_P_IP):
1749                         ip_proto = ip_hdr(skb)->protocol;
1750                         break;
1751                 case __constant_htons(ETH_P_IPV6):
1752                         ip_proto = ipv6_hdr(skb)->nexthdr;
1753                         break;
1754                 default:
1755                         ip_proto = 0;
1756                         break;
1757                 }
1758
1759                 switch(ip_proto) {
1760                 case IPPROTO_TCP:
1761                         *flags |= TXFLAG_TCPCS;
1762                         break;
1763                 case IPPROTO_UDP:
1764                         *flags |= TXFLAG_UDPCS;
1765                         break;
1766                 default:
1767                         jeprintk("jme", "Error upper layer protocol.\n");
1768                         break;
1769                 }
1770         }
1771 }
1772
1773 __always_inline static void
1774 jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
1775 {
1776         if(vlan_tx_tag_present(skb)) {
1777                 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
1778                 *flags |= TXFLAG_TAGON;
1779                 *vlan = vlan_tx_tag_get(skb);
1780         }
1781 }
1782
1783 static int
1784 jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1785 {
1786         struct jme_ring *txring = jme->txring;
1787         volatile struct txdesc *txdesc;
1788         struct jme_buffer_info *txbi;
1789         __u8 flags;
1790
1791         txdesc = (volatile struct txdesc*)txring->desc + idx;
1792         txbi = txring->bufinf + idx;
1793
1794         txdesc->dw[0] = 0;
1795         txdesc->dw[1] = 0;
1796         txdesc->dw[2] = 0;
1797         txdesc->dw[3] = 0;
1798         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1799         /*
1800          * Set OWN bit at final.
1801          * When kernel transmit faster than NIC.
1802          * And NIC trying to send this descriptor before we tell
1803          * it to start sending this TX queue.
1804          * Other fields are already filled correctly.
1805          */
1806         wmb();
1807         flags = TXFLAG_OWN | TXFLAG_INT;
1808         //Set checksum flags while not tso
1809         if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1810                 jme_tx_csum(skb, &flags);
1811         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1812         txdesc->desc1.flags = flags;
1813         /*
1814          * Set tx buffer info after telling NIC to send
1815          * For better tx_clean timing
1816          */
1817         wmb();
1818         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1819         txbi->skb = skb;
1820         txbi->len = skb->len;
1821         if(!(txbi->start_xmit = jiffies))
1822                 txbi->start_xmit = (0UL-1);
1823
1824         return 0;
1825 }
1826
1827 static void
1828 jme_stop_queue_if_full(struct jme_adapter *jme)
1829 {
1830         struct jme_ring *txring = jme->txring;
1831         struct jme_buffer_info *txbi = txring->bufinf;
1832
1833         txbi += atomic_read(&txring->next_to_clean);
1834
1835         smp_wmb();
1836         if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1837                 netif_stop_queue(jme->dev);
1838                 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
1839                 smp_wmb();
1840                 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
1841                         netif_wake_queue(jme->dev);
1842                         queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
1843                 }
1844         }
1845
1846         if(unlikely(    txbi->start_xmit &&
1847                         (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1848                         txbi->skb)) {
1849                 netif_stop_queue(jme->dev);
1850                 queue_dbg(jme->dev->name, "TX Queue Stopped @(%lu).\n", jiffies);
1851         }
1852 }
1853
1854 /*
1855  * This function is already protected by netif_tx_lock()
1856  */
1857 static int
1858 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1859 {
1860         struct jme_adapter *jme = netdev_priv(netdev);
1861         int idx;
1862
1863         if(skb_shinfo(skb)->nr_frags) {
1864                 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
1865                         skb_shinfo(skb)->nr_frags,
1866                         skb_headlen(skb),
1867                         skb->len,
1868                         skb_shinfo(skb)->gso_size,
1869                         skb->ip_summed);
1870         }
1871
1872         if(unlikely(jme_expand_header(jme, skb))) {
1873                 ++(NET_STAT(jme).tx_dropped);
1874                 return NETDEV_TX_OK;
1875         }
1876
1877         idx = jme_alloc_txdesc(jme, skb);
1878
1879         if(unlikely(idx<0)) {
1880                 netif_stop_queue(netdev);
1881                 jeprintk(netdev->name,
1882                                 "BUG! Tx ring full when queue awake!\n");
1883
1884                 return NETDEV_TX_BUSY;
1885         }
1886
1887         jme_map_tx_skb(jme, skb, idx);
1888         jme_fill_first_tx_desc(jme, skb, idx);
1889
1890         tx_dbg(jme->dev->name, "Xmit: %d+%d @(%lu)\n",
1891                         idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
1892
1893         jwrite32(jme, JME_TXCS, jme->reg_txcs |
1894                                 TXCS_SELECT_QUEUE0 |
1895                                 TXCS_QUEUE0S |
1896                                 TXCS_ENABLE);
1897         netdev->trans_start = jiffies;
1898
1899         jme_stop_queue_if_full(jme);
1900
1901         return NETDEV_TX_OK;
1902 }
1903
1904 static int
1905 jme_set_macaddr(struct net_device *netdev, void *p)
1906 {
1907         struct jme_adapter *jme = netdev_priv(netdev);
1908         struct sockaddr *addr = p;
1909         __u32 val;
1910
1911         if(netif_running(netdev))
1912                 return -EBUSY;
1913
1914         spin_lock(&jme->macaddr_lock);
1915         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1916
1917         val = (addr->sa_data[3] & 0xff) << 24 |
1918               (addr->sa_data[2] & 0xff) << 16 |
1919               (addr->sa_data[1] & 0xff) <<  8 |
1920               (addr->sa_data[0] & 0xff);
1921         jwrite32(jme, JME_RXUMA_LO, val);
1922         val = (addr->sa_data[5] & 0xff) << 8 |
1923               (addr->sa_data[4] & 0xff);
1924         jwrite32(jme, JME_RXUMA_HI, val);
1925         spin_unlock(&jme->macaddr_lock);
1926
1927         return 0;
1928 }
1929
1930 static void
1931 jme_set_multi(struct net_device *netdev)
1932 {
1933         struct jme_adapter *jme = netdev_priv(netdev);
1934         u32 mc_hash[2] = {};
1935         int i;
1936         unsigned long flags;
1937
1938         spin_lock_irqsave(&jme->rxmcs_lock, flags);
1939
1940         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1941
1942         if (netdev->flags & IFF_PROMISC) {
1943                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1944         }
1945         else if (netdev->flags & IFF_ALLMULTI) {
1946                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1947         }
1948         else if(netdev->flags & IFF_MULTICAST) {
1949                 struct dev_mc_list *mclist;
1950                 int bit_nr;
1951
1952                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1953                 for (i = 0, mclist = netdev->mc_list;
1954                         mclist && i < netdev->mc_count;
1955                         ++i, mclist = mclist->next) {
1956
1957                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1958                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1959                 }
1960
1961                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1962                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1963         }
1964
1965         wmb();
1966         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1967
1968         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1969 }
1970
1971 static int
1972 jme_change_mtu(struct net_device *netdev, int new_mtu)
1973 {
1974         struct jme_adapter *jme = netdev_priv(netdev);
1975
1976         if(new_mtu == jme->old_mtu)
1977                 return 0;
1978
1979         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1980                 ((new_mtu) < IPV6_MIN_MTU))
1981                 return -EINVAL;
1982
1983         if(new_mtu > 4000) {
1984                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1985                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1986                 jme_restart_rx_engine(jme);
1987         }
1988         else {
1989                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1990                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1991                 jme_restart_rx_engine(jme);
1992         }
1993
1994         if(new_mtu > 1900) {
1995                 netdev->features &= ~(NETIF_F_HW_CSUM |
1996                                 NETIF_F_TSO |
1997                                 NETIF_F_TSO6);
1998         }
1999         else {
2000                 if(jme->flags & JME_FLAG_TXCSUM)
2001                         netdev->features |= NETIF_F_HW_CSUM;
2002                 if(jme->flags & JME_FLAG_TSO)
2003                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2004         }
2005
2006         netdev->mtu = new_mtu;
2007         jme_reset_link(jme);
2008
2009         return 0;
2010 }
2011
2012 static void
2013 jme_tx_timeout(struct net_device *netdev)
2014 {
2015         struct jme_adapter *jme = netdev_priv(netdev);
2016
2017         jme->phylink = 0;
2018         jme_reset_phy_processor(jme);
2019         if(jme->flags & JME_FLAG_SSET)
2020                 jme_set_settings(netdev, &jme->old_ecmd);
2021
2022         /*
2023          * Force to Reset the link again
2024          */
2025         jme_reset_link(jme);
2026 }
2027
2028 static void
2029 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2030 {
2031         struct jme_adapter *jme = netdev_priv(netdev);
2032
2033         jme->vlgrp = grp;
2034 }
2035
2036 static void
2037 jme_get_drvinfo(struct net_device *netdev,
2038                      struct ethtool_drvinfo *info)
2039 {
2040         struct jme_adapter *jme = netdev_priv(netdev);
2041
2042         strcpy(info->driver, DRV_NAME);
2043         strcpy(info->version, DRV_VERSION);
2044         strcpy(info->bus_info, pci_name(jme->pdev));
2045 }
2046
2047 static int
2048 jme_get_regs_len(struct net_device *netdev)
2049 {
2050         return JME_REG_LEN; 
2051 }
2052
2053 static void
2054 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
2055 {
2056         int i;
2057
2058         for(i = 0 ; i < len ; i += 4)
2059                 p[i >> 2] = jread32(jme, reg + i);
2060 }
2061
2062 static void
2063 mdio_memcpy(struct jme_adapter *jme, __u32 *p, int reg_nr)
2064 {
2065         int i;
2066         __u16 *p16 = (__u16*)p;
2067
2068         for(i = 0 ; i < reg_nr ; ++i)
2069                 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2070 }
2071
2072 static void
2073 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2074 {
2075         struct jme_adapter *jme = netdev_priv(netdev);
2076         __u32 *p32 = (__u32*)p;
2077
2078         memset(p, 0xFF, JME_REG_LEN);
2079
2080         regs->version = 1;
2081         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2082
2083         p32 += 0x100 >> 2;
2084         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2085
2086         p32 += 0x100 >> 2;
2087         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2088
2089         p32 += 0x100 >> 2;
2090         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2091
2092         p32 += 0x100 >> 2;
2093         mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2094 }
2095
2096 static int
2097 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2098 {
2099         struct jme_adapter *jme = netdev_priv(netdev);
2100
2101         ecmd->tx_coalesce_usecs = PCC_TX_TO;
2102         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2103
2104         if(jme->flags & JME_FLAG_POLL) {
2105                 ecmd->use_adaptive_rx_coalesce = false;
2106                 ecmd->rx_coalesce_usecs = 0;
2107                 ecmd->rx_max_coalesced_frames = 0;
2108                 return 0;
2109         }
2110
2111         ecmd->use_adaptive_rx_coalesce = true;
2112
2113         switch(jme->dpi.cur) {
2114         case PCC_P1:
2115                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2116                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2117                 break;
2118         case PCC_P2:
2119                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2120                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2121                 break;
2122         case PCC_P3:
2123                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2124                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2125                 break;
2126         default:
2127                 break;
2128         }
2129
2130         return 0;
2131 }
2132
2133 static int
2134 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2135 {
2136         struct jme_adapter *jme = netdev_priv(netdev);
2137         struct dynpcc_info *dpi = &(jme->dpi);
2138
2139         if(netif_running(netdev))
2140                 return -EBUSY;
2141
2142         if(ecmd->use_adaptive_rx_coalesce
2143         && (jme->flags & JME_FLAG_POLL)) {
2144                 jme->flags &= ~JME_FLAG_POLL;
2145                 jme->jme_rx = netif_rx;
2146                 jme->jme_vlan_rx = vlan_hwaccel_rx;
2147                 dpi->cur                = PCC_P1;
2148                 dpi->attempt            = PCC_P1;
2149                 dpi->cnt                = 0;
2150                 jme_set_rx_pcc(jme, PCC_P1);
2151                 jme_interrupt_mode(jme);
2152         }
2153         else if(!(ecmd->use_adaptive_rx_coalesce)
2154         && !(jme->flags & JME_FLAG_POLL)) {
2155                 jme->flags |= JME_FLAG_POLL;
2156                 jme->jme_rx = netif_receive_skb;
2157                 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2158                 jme_interrupt_mode(jme);
2159         }
2160
2161         return 0;
2162 }
2163
2164 static void
2165 jme_get_pauseparam(struct net_device *netdev,
2166                         struct ethtool_pauseparam *ecmd)
2167 {
2168         struct jme_adapter *jme = netdev_priv(netdev);
2169         unsigned long flags;
2170         __u32 val;
2171
2172         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2173         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2174
2175         spin_lock_irqsave(&jme->phy_lock, flags);
2176         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2177         spin_unlock_irqrestore(&jme->phy_lock, flags);
2178
2179         ecmd->autoneg =
2180                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2181 }
2182
2183 static int
2184 jme_set_pauseparam(struct net_device *netdev,
2185                         struct ethtool_pauseparam *ecmd)
2186 {
2187         struct jme_adapter *jme = netdev_priv(netdev);
2188         unsigned long flags;
2189         __u32 val;
2190
2191         if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
2192                 (ecmd->tx_pause != 0)) {
2193
2194                 if(ecmd->tx_pause)
2195                         jme->reg_txpfc |= TXPFC_PF_EN;
2196                 else
2197                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2198
2199                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2200         }
2201
2202         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2203         if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2204                 (ecmd->rx_pause != 0)) {
2205
2206                 if(ecmd->rx_pause)
2207                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2208                 else
2209                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2210
2211                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2212         }
2213         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2214
2215         spin_lock_irqsave(&jme->phy_lock, flags);
2216         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2217         if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
2218                 (ecmd->autoneg != 0)) {
2219
2220                 if(ecmd->autoneg)
2221                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2222                 else
2223                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2224
2225                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2226                                 MII_ADVERTISE, val);
2227         }
2228         spin_unlock_irqrestore(&jme->phy_lock, flags);
2229
2230         return 0;
2231 }
2232
2233 static void
2234 jme_get_wol(struct net_device *netdev,
2235                 struct ethtool_wolinfo *wol)
2236 {
2237         struct jme_adapter *jme = netdev_priv(netdev);
2238
2239         wol->supported = WAKE_MAGIC | WAKE_PHY;
2240
2241         wol->wolopts = 0;
2242
2243         if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2244                 wol->wolopts |= WAKE_PHY;
2245
2246         if(jme->reg_pmcs & PMCS_MFEN)
2247                 wol->wolopts |= WAKE_MAGIC;
2248
2249 }
2250
2251 static int
2252 jme_set_wol(struct net_device *netdev,
2253                 struct ethtool_wolinfo *wol)
2254 {
2255         struct jme_adapter *jme = netdev_priv(netdev);
2256
2257         if(wol->wolopts & (WAKE_MAGICSECURE |
2258                                 WAKE_UCAST |
2259                                 WAKE_MCAST |
2260                                 WAKE_BCAST |
2261                                 WAKE_ARP))
2262                 return -EOPNOTSUPP;
2263
2264         jme->reg_pmcs = 0;
2265
2266         if(wol->wolopts & WAKE_PHY)
2267                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2268
2269         if(wol->wolopts & WAKE_MAGIC)
2270                 jme->reg_pmcs |= PMCS_MFEN;
2271
2272
2273         return 0;
2274 }
2275
2276 static int
2277 jme_get_settings(struct net_device *netdev,
2278                      struct ethtool_cmd *ecmd)
2279 {
2280         struct jme_adapter *jme = netdev_priv(netdev);
2281         int rc;
2282         unsigned long flags;
2283
2284         spin_lock_irqsave(&jme->phy_lock, flags);
2285         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2286         spin_unlock_irqrestore(&jme->phy_lock, flags);
2287         return rc;
2288 }
2289
2290 static int
2291 jme_set_settings(struct net_device *netdev,
2292                      struct ethtool_cmd *ecmd)
2293 {
2294         struct jme_adapter *jme = netdev_priv(netdev);
2295         int rc, fdc=0;
2296         unsigned long flags;
2297
2298         if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2299                 return -EINVAL;
2300
2301         if(jme->mii_if.force_media &&
2302         ecmd->autoneg != AUTONEG_ENABLE &&
2303         (jme->mii_if.full_duplex != ecmd->duplex))
2304                 fdc = 1;
2305
2306         spin_lock_irqsave(&jme->phy_lock, flags);
2307         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2308         spin_unlock_irqrestore(&jme->phy_lock, flags);
2309
2310         if(!rc && fdc)
2311                 jme_reset_link(jme);
2312
2313         if(!rc) {
2314                 jme->flags |= JME_FLAG_SSET;
2315                 jme->old_ecmd = *ecmd;
2316         }
2317
2318         return rc;
2319 }
2320
2321 static __u32
2322 jme_get_link(struct net_device *netdev)
2323 {
2324         struct jme_adapter *jme = netdev_priv(netdev);
2325         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2326 }
2327
2328 static u32
2329 jme_get_rx_csum(struct net_device *netdev)
2330 {
2331         struct jme_adapter *jme = netdev_priv(netdev);
2332
2333         return jme->reg_rxmcs & RXMCS_CHECKSUM;
2334 }
2335
2336 static int
2337 jme_set_rx_csum(struct net_device *netdev, u32 on)
2338 {
2339         struct jme_adapter *jme = netdev_priv(netdev);
2340         unsigned long flags;
2341
2342         spin_lock_irqsave(&jme->rxmcs_lock, flags);
2343         if(on)
2344                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2345         else
2346                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2347         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2348         spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2349
2350         return 0;
2351 }
2352
2353 static int
2354 jme_set_tx_csum(struct net_device *netdev, u32 on)
2355 {
2356         struct jme_adapter *jme = netdev_priv(netdev);
2357
2358         if(on) {
2359                 jme->flags |= JME_FLAG_TXCSUM;
2360                 if(netdev->mtu <= 1900)
2361                         netdev->features |= NETIF_F_HW_CSUM;
2362         }
2363         else {
2364                 jme->flags &= ~JME_FLAG_TXCSUM;
2365                 netdev->features &= ~NETIF_F_HW_CSUM;
2366         }
2367
2368         return 0;
2369 }
2370
2371 static int
2372 jme_set_tso(struct net_device *netdev, u32 on)
2373 {
2374         struct jme_adapter *jme = netdev_priv(netdev);
2375
2376         if (on) {
2377                 jme->flags |= JME_FLAG_TSO;
2378                 if(netdev->mtu <= 1900)
2379                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2380         }
2381         else {
2382                 jme->flags &= ~JME_FLAG_TSO;
2383                 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2384         }
2385
2386         return 0;
2387 }
2388
2389 static int
2390 jme_nway_reset(struct net_device *netdev)
2391 {
2392         struct jme_adapter *jme = netdev_priv(netdev);
2393         jme_restart_an(jme);
2394         return 0;
2395 }
2396
2397 static __u8
2398 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2399 {
2400         __u32 val;
2401         int to;
2402
2403         val = jread32(jme, JME_SMBCSR);
2404         to = JME_SMB_BUSY_TIMEOUT;
2405         while((val & SMBCSR_BUSY) && --to) {
2406                 msleep(1);
2407                 val = jread32(jme, JME_SMBCSR);
2408         }
2409         if(!to) {
2410                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2411                 return 0xFF;
2412         }
2413
2414         jwrite32(jme, JME_SMBINTF,
2415                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2416                 SMBINTF_HWRWN_READ |
2417                 SMBINTF_HWCMD);
2418
2419         val = jread32(jme, JME_SMBINTF);
2420         to = JME_SMB_BUSY_TIMEOUT;
2421         while((val & SMBINTF_HWCMD) && --to) {
2422                 msleep(1);
2423                 val = jread32(jme, JME_SMBINTF);
2424         }
2425         if(!to) {
2426                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2427                 return 0xFF;
2428         }
2429
2430         return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2431 }
2432
2433 static void
2434 jme_smb_write(struct jme_adapter *jme, unsigned int addr, __u8 data)
2435 {
2436         __u32 val;
2437         int to;
2438
2439         val = jread32(jme, JME_SMBCSR);
2440         to = JME_SMB_BUSY_TIMEOUT;
2441         while((val & SMBCSR_BUSY) && --to) {
2442                 msleep(1);
2443                 val = jread32(jme, JME_SMBCSR);
2444         }
2445         if(!to) {
2446                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2447                 return;
2448         }
2449
2450         jwrite32(jme, JME_SMBINTF,
2451                 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2452                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2453                 SMBINTF_HWRWN_WRITE |
2454                 SMBINTF_HWCMD);
2455
2456         val = jread32(jme, JME_SMBINTF);
2457         to = JME_SMB_BUSY_TIMEOUT;
2458         while((val & SMBINTF_HWCMD) && --to) {
2459                 msleep(1);
2460                 val = jread32(jme, JME_SMBINTF);
2461         }
2462         if(!to) {
2463                 jeprintk(jme->dev->name, "SMB Bus Busy.\n");
2464                 return;
2465         }
2466
2467         mdelay(2);
2468 }
2469
2470 static int
2471 jme_get_eeprom_len(struct net_device *netdev)
2472 {
2473         struct jme_adapter *jme = netdev_priv(netdev);
2474         __u32 val;
2475         val = jread32(jme, JME_SMBCSR);
2476         return (val & SMBCSR_EEPROMD)?JME_SMB_LEN:0;
2477 }
2478
2479 static int
2480 jme_get_eeprom(struct net_device *netdev,
2481                 struct ethtool_eeprom *eeprom, u8 *data)
2482 {
2483         struct jme_adapter *jme = netdev_priv(netdev);
2484         int i, offset = eeprom->offset, len = eeprom->len;
2485
2486         /*
2487          * ethtool will check the boundary for us
2488          */
2489         eeprom->magic = JME_EEPROM_MAGIC;
2490         for(i = 0 ; i < len ; ++i)
2491                 data[i] = jme_smb_read(jme, i + offset);
2492
2493         return 0;
2494 }
2495
2496 static int
2497 jme_set_eeprom(struct net_device *netdev,
2498                 struct ethtool_eeprom *eeprom, u8 *data)
2499 {
2500         struct jme_adapter *jme = netdev_priv(netdev);
2501         int i, offset = eeprom->offset, len = eeprom->len;
2502
2503         if (eeprom->magic != JME_EEPROM_MAGIC)
2504                 return -EINVAL;
2505
2506         /*
2507          * ethtool will check the boundary for us
2508          */
2509         for(i = 0 ; i < len ; ++i)
2510                 jme_smb_write(jme, i + offset, data[i]);
2511
2512         return 0;
2513 }
2514
2515 static const struct ethtool_ops jme_ethtool_ops = {
2516         .get_drvinfo            = jme_get_drvinfo,
2517         .get_regs_len           = jme_get_regs_len,
2518         .get_regs               = jme_get_regs,
2519         .get_coalesce           = jme_get_coalesce,
2520         .set_coalesce           = jme_set_coalesce,
2521         .get_pauseparam         = jme_get_pauseparam,
2522         .set_pauseparam         = jme_set_pauseparam,
2523         .get_wol                = jme_get_wol,
2524         .set_wol                = jme_set_wol,
2525         .get_settings           = jme_get_settings,
2526         .set_settings           = jme_set_settings,
2527         .get_link               = jme_get_link,
2528         .get_rx_csum            = jme_get_rx_csum,
2529         .set_rx_csum            = jme_set_rx_csum,
2530         .set_tx_csum            = jme_set_tx_csum,
2531         .set_tso                = jme_set_tso,
2532         .set_sg                 = ethtool_op_set_sg,
2533         .nway_reset             = jme_nway_reset,
2534         .get_eeprom_len         = jme_get_eeprom_len,
2535         .get_eeprom             = jme_get_eeprom,
2536         .set_eeprom             = jme_set_eeprom,
2537 };
2538
2539 static int
2540 jme_pci_dma64(struct pci_dev *pdev)
2541 {
2542         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2543                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2544                         dprintk("jme", "64Bit DMA Selected.\n");
2545                         return 1;
2546                 }
2547
2548         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2549                 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2550                         dprintk("jme", "40Bit DMA Selected.\n");
2551                         return 1;
2552                 }
2553
2554         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2555                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2556                         dprintk("jme", "32Bit DMA Selected.\n");
2557                         return 0;
2558                 }
2559
2560         return -1;
2561 }
2562
2563 __always_inline static void
2564 jme_phy_init(struct jme_adapter *jme)
2565 {
2566         __u16 reg26;
2567
2568         reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2569         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2570 }
2571
2572 __always_inline static void
2573 jme_set_gmii(struct jme_adapter *jme)
2574 {
2575         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
2576 }
2577
2578 static void
2579 jme_check_hw_ver(struct jme_adapter *jme)
2580 {
2581         __u32 chipmode;
2582
2583         chipmode = jread32(jme, JME_CHIPMODE);
2584
2585         jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2586         jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
2587 }
2588
2589 static int __devinit
2590 jme_init_one(struct pci_dev *pdev,
2591              const struct pci_device_id *ent)
2592 {
2593         int rc = 0, using_dac, i;
2594         struct net_device *netdev;
2595         struct jme_adapter *jme;
2596         __u16 bmcr, bmsr;
2597
2598         /*
2599          * set up PCI device basics
2600          */
2601         rc = pci_enable_device(pdev);
2602         if(rc) {
2603                 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2604                 goto err_out;
2605         }
2606
2607         using_dac = jme_pci_dma64(pdev);
2608         if(using_dac < 0) {
2609                 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2610                 rc = -EIO;
2611                 goto err_out_disable_pdev;
2612         }
2613
2614         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2615                 printk(KERN_ERR PFX "No PCI resource region found.\n");
2616                 rc = -ENOMEM;
2617                 goto err_out_disable_pdev;
2618         }
2619
2620         rc = pci_request_regions(pdev, DRV_NAME);
2621         if(rc) {
2622                 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2623                 goto err_out_disable_pdev;
2624         }
2625
2626         pci_set_master(pdev);
2627
2628         /*
2629          * alloc and init net device
2630          */
2631         netdev = alloc_etherdev(sizeof(*jme));
2632         if(!netdev) {
2633                 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
2634                 rc = -ENOMEM;
2635                 goto err_out_release_regions;
2636         }
2637         netdev->open                    = jme_open;
2638         netdev->stop                    = jme_close;
2639         netdev->hard_start_xmit         = jme_start_xmit;
2640         netdev->set_mac_address         = jme_set_macaddr;
2641         netdev->set_multicast_list      = jme_set_multi;
2642         netdev->change_mtu              = jme_change_mtu;
2643         netdev->ethtool_ops             = &jme_ethtool_ops;
2644         netdev->tx_timeout              = jme_tx_timeout;
2645         netdev->watchdog_timeo          = TX_TIMEOUT;
2646         netdev->vlan_rx_register        = jme_vlan_rx_register;
2647         NETDEV_GET_STATS(netdev, &jme_get_stats);
2648         netdev->features                =       NETIF_F_HW_CSUM |
2649                                                 NETIF_F_SG |
2650                                                 NETIF_F_TSO |
2651                                                 NETIF_F_TSO6 |
2652                                                 NETIF_F_HW_VLAN_TX |
2653                                                 NETIF_F_HW_VLAN_RX;
2654         if(using_dac)
2655                 netdev->features        |=      NETIF_F_HIGHDMA;
2656
2657         SET_NETDEV_DEV(netdev, &pdev->dev);
2658         pci_set_drvdata(pdev, netdev);
2659
2660         /*
2661          * init adapter info
2662          */
2663         jme = netdev_priv(netdev);
2664         jme->pdev = pdev;
2665         jme->dev = netdev;
2666         jme->jme_rx = netif_rx;
2667         jme->jme_vlan_rx = vlan_hwaccel_rx;
2668         jme->old_mtu = netdev->mtu = 1500;
2669         jme->phylink = 0;
2670         jme->tx_ring_size = 1 << 10;
2671         jme->tx_ring_mask = jme->tx_ring_size - 1;
2672         jme->tx_wake_threshold = 1 << 9;
2673         jme->rx_ring_size = 1 << 9;
2674         jme->rx_ring_mask = jme->rx_ring_size - 1;
2675         jme->regs = ioremap(pci_resource_start(pdev, 0),
2676                              pci_resource_len(pdev, 0));
2677         if (!(jme->regs)) {
2678                 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
2679                 rc = -ENOMEM;
2680                 goto err_out_free_netdev;
2681         }
2682         jme->shadow_regs = pci_alloc_consistent(pdev,
2683                                                 sizeof(__u32) * SHADOW_REG_NR,
2684                                                 &(jme->shadow_dma));
2685         if (!(jme->shadow_regs)) {
2686                 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
2687                 rc = -ENOMEM;
2688                 goto err_out_unmap;
2689         }
2690
2691         NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2692
2693         spin_lock_init(&jme->phy_lock);
2694         spin_lock_init(&jme->macaddr_lock);
2695         spin_lock_init(&jme->rxmcs_lock);
2696
2697         atomic_set(&jme->link_changing, 1);
2698         atomic_set(&jme->rx_cleaning, 1);
2699         atomic_set(&jme->tx_cleaning, 1);
2700         atomic_set(&jme->rx_empty, 1);
2701
2702         tasklet_init(&jme->pcc_task,
2703                      &jme_pcc_tasklet,
2704                      (unsigned long) jme);
2705         tasklet_init(&jme->linkch_task,
2706                      &jme_link_change_tasklet,
2707                      (unsigned long) jme);
2708         tasklet_init(&jme->txclean_task,
2709                      &jme_tx_clean_tasklet,
2710                      (unsigned long) jme);
2711         tasklet_init(&jme->rxclean_task,
2712                      &jme_rx_clean_tasklet,
2713                      (unsigned long) jme);
2714         tasklet_init(&jme->rxempty_task,
2715                      &jme_rx_empty_tasklet,
2716                      (unsigned long) jme);
2717         jme->dpi.cur = PCC_P1;
2718
2719         if(pdev->device == JME_GE_DEVICE)
2720                 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2721         else
2722                 jme->reg_ghc = GHC_DPX | GHC_SPEED_100M;
2723         jme->reg_rxcs = RXCS_DEFAULT;
2724         jme->reg_rxmcs = RXMCS_DEFAULT;
2725         jme->reg_txpfc = 0;
2726         jme->reg_pmcs = PMCS_MFEN;
2727         jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
2728
2729         /*
2730          * Get Max Read Req Size from PCI Config Space
2731          */
2732         pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2733         switch(jme->mrrs) {
2734                 case MRRS_128B:
2735                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2736                         break;
2737                 case MRRS_256B:
2738                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2739                         break;
2740                 default:
2741                         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2742                         break;
2743         };
2744
2745
2746         /*
2747          * Must check before reset_mac_processor
2748          */
2749         jme_check_hw_ver(jme);
2750         jme->mii_if.dev = netdev;
2751         if(jme->fpgaver) {
2752                 jme->mii_if.phy_id = 0;
2753                 for(i = 1 ; i < 32 ; ++i) {
2754                         bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2755                         bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2756                         if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2757                                 jme->mii_if.phy_id = i;
2758                                 break;
2759                         }
2760                 }
2761
2762                 if(!jme->mii_if.phy_id) {
2763                         rc = -EIO;
2764                         printk(KERN_ERR PFX "Can not find phy_id.\n");
2765                          goto err_out_free_shadow;
2766                 }
2767
2768                 jme->reg_ghc |= GHC_LINK_POLL;
2769         }
2770         else {
2771                 jme->mii_if.phy_id = 1;
2772         }
2773         if(pdev->device == JME_GE_DEVICE)
2774                 jme->mii_if.supports_gmii = true;
2775         else
2776                 jme->mii_if.supports_gmii = false;
2777         jme->mii_if.mdio_read = jme_mdio_read;
2778         jme->mii_if.mdio_write = jme_mdio_write;
2779
2780         jme_clear_pm(jme);
2781         if(jme->fpgaver)
2782                 jme_set_gmii(jme);
2783         else
2784                 jme_phy_init(jme);
2785         jme_phy_off(jme);
2786
2787         /*
2788          * Reset MAC processor and reload EEPROM for MAC Address
2789          */
2790         jme_reset_mac_processor(jme);
2791         rc = jme_reload_eeprom(jme);
2792         if(rc) {
2793                 printk(KERN_ERR PFX
2794                         "Reload eeprom for reading MAC Address error.\n");
2795                 goto err_out_free_shadow;
2796         }
2797         jme_load_macaddr(netdev);
2798
2799
2800         /*
2801          * Tell stack that we are not ready to work until open()
2802          */
2803         netif_carrier_off(netdev);
2804         netif_stop_queue(netdev);
2805
2806         /*
2807          * Register netdev
2808          */
2809         rc = register_netdev(netdev);
2810         if(rc) {
2811                 printk(KERN_ERR PFX "Cannot register net device.\n");
2812                 goto err_out_free_shadow;
2813         }
2814
2815         jprintk(netdev->name,
2816                 "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2817                 (jme->fpgaver != 0)?" (FPGA)":"",
2818                 (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
2819                 netdev->dev_addr[0],
2820                 netdev->dev_addr[1],
2821                 netdev->dev_addr[2],
2822                 netdev->dev_addr[3],
2823                 netdev->dev_addr[4],
2824                 netdev->dev_addr[5]);
2825
2826         return 0;
2827
2828 err_out_free_shadow:
2829         pci_free_consistent(pdev,
2830                             sizeof(__u32) * SHADOW_REG_NR,
2831                             jme->shadow_regs,
2832                             jme->shadow_dma);
2833 err_out_unmap:
2834         iounmap(jme->regs);
2835 err_out_free_netdev:
2836         pci_set_drvdata(pdev, NULL);
2837         free_netdev(netdev);
2838 err_out_release_regions:
2839         pci_release_regions(pdev);
2840 err_out_disable_pdev:
2841         pci_disable_device(pdev);
2842 err_out:
2843         return rc;
2844 }
2845
2846 static void __devexit
2847 jme_remove_one(struct pci_dev *pdev)
2848 {
2849         struct net_device *netdev = pci_get_drvdata(pdev);
2850         struct jme_adapter *jme = netdev_priv(netdev);
2851
2852         unregister_netdev(netdev);
2853         pci_free_consistent(pdev,
2854                             sizeof(__u32) * SHADOW_REG_NR,
2855                             jme->shadow_regs,
2856                             jme->shadow_dma);
2857         iounmap(jme->regs);
2858         pci_set_drvdata(pdev, NULL);
2859         free_netdev(netdev);
2860         pci_release_regions(pdev);
2861         pci_disable_device(pdev);
2862
2863 }
2864
2865 static int
2866 jme_suspend(struct pci_dev *pdev, pm_message_t state)
2867 {
2868         struct net_device *netdev = pci_get_drvdata(pdev);
2869         struct jme_adapter *jme = netdev_priv(netdev);
2870         int timeout = 100;
2871
2872         atomic_dec(&jme->link_changing);
2873
2874         netif_device_detach(netdev);
2875         netif_stop_queue(netdev);
2876         jme_stop_irq(jme);
2877         //jme_free_irq(jme);
2878
2879         while(--timeout > 0 &&
2880         (
2881                 atomic_read(&jme->rx_cleaning) != 1 ||
2882                 atomic_read(&jme->tx_cleaning) != 1
2883         )) {
2884                 mdelay(1);
2885         }
2886         if(!timeout) {
2887                 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2888                 return -EBUSY;
2889         }
2890         jme_disable_shadow(jme);
2891
2892         if(netif_carrier_ok(netdev)) {
2893                 if(jme->flags & JME_FLAG_POLL)
2894                         jme_polling_mode(jme);
2895
2896                 jme_stop_pcc_timer(jme);
2897                 jme_reset_mac_processor(jme);
2898                 jme_free_rx_resources(jme);
2899                 jme_free_tx_resources(jme);
2900                 netif_carrier_off(netdev);
2901                 jme->phylink = 0;
2902         }
2903
2904
2905         pci_save_state(pdev);
2906         if(jme->reg_pmcs) {
2907                 jme_set_100m_half(jme);
2908
2909                 if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2910                         jme_wait_link(jme);
2911
2912                 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2913                 pci_enable_wake(pdev, PCI_D1, true);
2914                 pci_enable_wake(pdev, PCI_D3hot, true);
2915                 pci_enable_wake(pdev, PCI_D3cold, true);
2916         }
2917         else {
2918                 jme_phy_off(jme);
2919                 pci_enable_wake(pdev, PCI_D1, false);
2920                 pci_enable_wake(pdev, PCI_D3hot, false);
2921                 pci_enable_wake(pdev, PCI_D3cold, false);
2922         }
2923         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2924
2925         return 0;
2926 }
2927
2928 static int
2929 jme_resume(struct pci_dev *pdev)
2930 {
2931         struct net_device *netdev = pci_get_drvdata(pdev);
2932         struct jme_adapter *jme = netdev_priv(netdev);
2933
2934         jme_clear_pm(jme);
2935         pci_restore_state(pdev);
2936
2937         if(jme->flags & JME_FLAG_SSET)
2938                 jme_set_settings(netdev, &jme->old_ecmd);
2939         else
2940                 jme_reset_phy_processor(jme);
2941
2942         jme_reset_mac_processor(jme);
2943         jme_enable_shadow(jme);
2944         //jme_request_irq(jme);
2945         jme_start_irq(jme);
2946         netif_device_attach(netdev);
2947
2948         atomic_inc(&jme->link_changing);
2949
2950         jme_reset_link(jme);
2951
2952         return 0;
2953 }
2954
2955 static struct pci_device_id jme_pci_tbl[] = {
2956         { PCI_VDEVICE(JMICRON, JME_GE_DEVICE) },
2957         { PCI_VDEVICE(JMICRON, JME_FE_DEVICE) },
2958         { }
2959 };
2960
2961 static struct pci_driver jme_driver = {
2962         .name           = DRV_NAME,
2963         .id_table       = jme_pci_tbl,
2964         .probe          = jme_init_one,
2965         .remove         = __devexit_p(jme_remove_one),
2966 #ifdef CONFIG_PM
2967         .suspend        = jme_suspend,
2968         .resume         = jme_resume,
2969 #endif /* CONFIG_PM */
2970 };
2971
2972 static int __init
2973 jme_init_module(void)
2974 {
2975         printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2976                "driver version %s\n", DRV_VERSION);
2977         return pci_register_driver(&jme_driver);
2978 }
2979
2980 static void __exit
2981 jme_cleanup_module(void)
2982 {
2983         pci_unregister_driver(&jme_driver);
2984 }
2985
2986 module_init(jme_init_module);
2987 module_exit(jme_cleanup_module);
2988
2989 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2990 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2991 MODULE_LICENSE("GPL");
2992 MODULE_VERSION(DRV_VERSION);
2993 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2994