]> bbs.cooldavid.org Git - jme.git/blob - jme.c
Add fedora compile test
[jme.git] / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
7  *
8  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24
25 #include <linux/version.h>
26 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #endif
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/crc32.h>
38 #include <linux/delay.h>
39 #include <linux/spinlock.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/tcp.h>
44 #include <linux/udp.h>
45 #include <linux/if_vlan.h>
46 #include <linux/slab.h>
47 #include <net/ip6_checksum.h>
48 #include "jme.h"
49
50 static int force_pseudohp = -1;
51 static int no_pseudohp = -1;
52 static int no_extplug = -1;
53 module_param(force_pseudohp, int, 0);
54 MODULE_PARM_DESC(force_pseudohp,
55         "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
56 module_param(no_pseudohp, int, 0);
57 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
58 module_param(no_extplug, int, 0);
59 MODULE_PARM_DESC(no_extplug,
60         "Do not use external plug signal for pseudo hot-plug.");
61
62 #ifndef JME_NEW_PM_API
63 static void
64 jme_pci_wakeup_enable(struct jme_adapter *jme, int enable)
65 {
66 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
67         pci_enable_wake(jme->pdev, PCI_D1, enable);
68         pci_enable_wake(jme->pdev, PCI_D2, enable);
69         pci_enable_wake(jme->pdev, PCI_D3hot, enable);
70         pci_enable_wake(jme->pdev, PCI_D3cold, enable);
71 #else
72         pci_pme_active(jme->pdev, enable);
73 #endif
74 }
75 #endif
76
77 static int
78 jme_mdio_read(struct net_device *netdev, int phy, int reg)
79 {
80         struct jme_adapter *jme = netdev_priv(netdev);
81         int i, val, again = (reg == MII_BMSR) ? 1 : 0;
82
83 read_again:
84         jwrite32(jme, JME_SMI, SMI_OP_REQ |
85                                 smi_phy_addr(phy) |
86                                 smi_reg_addr(reg));
87
88         wmb();
89         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
90                 udelay(20);
91                 val = jread32(jme, JME_SMI);
92                 if ((val & SMI_OP_REQ) == 0)
93                         break;
94         }
95
96         if (i == 0) {
97                 pr_err("phy(%d) read timeout : %d\n", phy, reg);
98                 return 0;
99         }
100
101         if (again--)
102                 goto read_again;
103
104         return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
105 }
106
107 static void
108 jme_mdio_write(struct net_device *netdev,
109                                 int phy, int reg, int val)
110 {
111         struct jme_adapter *jme = netdev_priv(netdev);
112         int i;
113
114         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
115                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
116                 smi_phy_addr(phy) | smi_reg_addr(reg));
117
118         wmb();
119         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
120                 udelay(20);
121                 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
122                         break;
123         }
124
125         if (i == 0)
126                 pr_err("phy(%d) write timeout : %d\n", phy, reg);
127 }
128
129 static inline void
130 jme_reset_phy_processor(struct jme_adapter *jme)
131 {
132         u32 val;
133
134         jme_mdio_write(jme->dev,
135                         jme->mii_if.phy_id,
136                         MII_ADVERTISE, ADVERTISE_ALL |
137                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
138
139         if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
140                 jme_mdio_write(jme->dev,
141                                 jme->mii_if.phy_id,
142                                 MII_CTRL1000,
143                                 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
144
145         val = jme_mdio_read(jme->dev,
146                                 jme->mii_if.phy_id,
147                                 MII_BMCR);
148
149         jme_mdio_write(jme->dev,
150                         jme->mii_if.phy_id,
151                         MII_BMCR, val | BMCR_RESET);
152 }
153
154 static void
155 jme_setup_wakeup_frame(struct jme_adapter *jme,
156                        const u32 *mask, u32 crc, int fnr)
157 {
158         int i;
159
160         /*
161          * Setup CRC pattern
162          */
163         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
164         wmb();
165         jwrite32(jme, JME_WFODP, crc);
166         wmb();
167
168         /*
169          * Setup Mask
170          */
171         for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
172                 jwrite32(jme, JME_WFOI,
173                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
174                                 (fnr & WFOI_FRAME_SEL));
175                 wmb();
176                 jwrite32(jme, JME_WFODP, mask[i]);
177                 wmb();
178         }
179 }
180
181 static inline void
182 jme_mac_rxclk_off(struct jme_adapter *jme)
183 {
184         jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
185         jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
186 }
187
188 static inline void
189 jme_mac_rxclk_on(struct jme_adapter *jme)
190 {
191         jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
192         jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
193 }
194
195 static inline void
196 jme_mac_txclk_off(struct jme_adapter *jme)
197 {
198         jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
199         jwrite32f(jme, JME_GHC, jme->reg_ghc);
200 }
201
202 static inline void
203 jme_mac_txclk_on(struct jme_adapter *jme)
204 {
205         u32 speed = jme->reg_ghc & GHC_SPEED;
206         if (speed == GHC_SPEED_1000M)
207                 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
208         else
209                 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
210         jwrite32f(jme, JME_GHC, jme->reg_ghc);
211 }
212
213 static inline void
214 jme_reset_ghc_speed(struct jme_adapter *jme)
215 {
216         jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
217         jwrite32f(jme, JME_GHC, jme->reg_ghc);
218 }
219
220 static inline void
221 jme_reset_250A2_workaround(struct jme_adapter *jme)
222 {
223         jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
224                              GPREG1_RSSPATCH);
225         jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
226 }
227
228 static inline void
229 jme_assert_ghc_reset(struct jme_adapter *jme)
230 {
231         jme->reg_ghc |= GHC_SWRST;
232         jwrite32f(jme, JME_GHC, jme->reg_ghc);
233 }
234
235 static inline void
236 jme_clear_ghc_reset(struct jme_adapter *jme)
237 {
238         jme->reg_ghc &= ~GHC_SWRST;
239         jwrite32f(jme, JME_GHC, jme->reg_ghc);
240 }
241
242 static inline void
243 jme_reset_mac_processor(struct jme_adapter *jme)
244 {
245         static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
246         u32 crc = 0xCDCDCDCD;
247         u32 gpreg0;
248         int i;
249
250         jme_reset_ghc_speed(jme);
251         jme_reset_250A2_workaround(jme);
252
253         jme_mac_rxclk_on(jme);
254         jme_mac_txclk_on(jme);
255         udelay(1);
256         jme_assert_ghc_reset(jme);
257         udelay(1);
258         jme_mac_rxclk_off(jme);
259         jme_mac_txclk_off(jme);
260         udelay(1);
261         jme_clear_ghc_reset(jme);
262         udelay(1);
263         jme_mac_rxclk_on(jme);
264         jme_mac_txclk_on(jme);
265         udelay(1);
266         jme_mac_rxclk_off(jme);
267         jme_mac_txclk_off(jme);
268
269         jwrite32(jme, JME_RXDBA_LO, 0x00000000);
270         jwrite32(jme, JME_RXDBA_HI, 0x00000000);
271         jwrite32(jme, JME_RXQDC, 0x00000000);
272         jwrite32(jme, JME_RXNDA, 0x00000000);
273         jwrite32(jme, JME_TXDBA_LO, 0x00000000);
274         jwrite32(jme, JME_TXDBA_HI, 0x00000000);
275         jwrite32(jme, JME_TXQDC, 0x00000000);
276         jwrite32(jme, JME_TXNDA, 0x00000000);
277
278         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
279         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
280         for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
281                 jme_setup_wakeup_frame(jme, mask, crc, i);
282         if (jme->fpgaver)
283                 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
284         else
285                 gpreg0 = GPREG0_DEFAULT;
286         jwrite32(jme, JME_GPREG0, gpreg0);
287 }
288
289 static inline void
290 jme_clear_pm(struct jme_adapter *jme)
291 {
292         jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
293 }
294
295 static int
296 jme_reload_eeprom(struct jme_adapter *jme)
297 {
298         u32 val;
299         int i;
300
301         val = jread32(jme, JME_SMBCSR);
302
303         if (val & SMBCSR_EEPROMD) {
304                 val |= SMBCSR_CNACK;
305                 jwrite32(jme, JME_SMBCSR, val);
306                 val |= SMBCSR_RELOAD;
307                 jwrite32(jme, JME_SMBCSR, val);
308                 mdelay(12);
309
310                 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
311                         mdelay(1);
312                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
313                                 break;
314                 }
315
316                 if (i == 0) {
317                         pr_err("eeprom reload timeout\n");
318                         return -EIO;
319                 }
320         }
321
322         return 0;
323 }
324
325 static void
326 jme_load_macaddr(struct net_device *netdev)
327 {
328         struct jme_adapter *jme = netdev_priv(netdev);
329         unsigned char macaddr[6];
330         u32 val;
331
332         spin_lock_bh(&jme->macaddr_lock);
333         val = jread32(jme, JME_RXUMA_LO);
334         macaddr[0] = (val >>  0) & 0xFF;
335         macaddr[1] = (val >>  8) & 0xFF;
336         macaddr[2] = (val >> 16) & 0xFF;
337         macaddr[3] = (val >> 24) & 0xFF;
338         val = jread32(jme, JME_RXUMA_HI);
339         macaddr[4] = (val >>  0) & 0xFF;
340         macaddr[5] = (val >>  8) & 0xFF;
341         memcpy(netdev->dev_addr, macaddr, 6);
342         spin_unlock_bh(&jme->macaddr_lock);
343 }
344
345 static inline void
346 jme_set_rx_pcc(struct jme_adapter *jme, int p)
347 {
348         switch (p) {
349         case PCC_OFF:
350                 jwrite32(jme, JME_PCCRX0,
351                         ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
352                         ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
353                 break;
354         case PCC_P1:
355                 jwrite32(jme, JME_PCCRX0,
356                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
357                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
358                 break;
359         case PCC_P2:
360                 jwrite32(jme, JME_PCCRX0,
361                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
362                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
363                 break;
364         case PCC_P3:
365                 jwrite32(jme, JME_PCCRX0,
366                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
367                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
368                 break;
369         default:
370                 break;
371         }
372         wmb();
373
374         if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
375                 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
376 }
377
378 static void
379 jme_start_irq(struct jme_adapter *jme)
380 {
381         register struct dynpcc_info *dpi = &(jme->dpi);
382
383         jme_set_rx_pcc(jme, PCC_P1);
384         dpi->cur                = PCC_P1;
385         dpi->attempt            = PCC_P1;
386         dpi->cnt                = 0;
387
388         jwrite32(jme, JME_PCCTX,
389                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
390                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
391                         PCCTXQ0_EN
392                 );
393
394         /*
395          * Enable Interrupts
396          */
397         jwrite32(jme, JME_IENS, INTR_ENABLE);
398 }
399
400 static inline void
401 jme_stop_irq(struct jme_adapter *jme)
402 {
403         /*
404          * Disable Interrupts
405          */
406         jwrite32f(jme, JME_IENC, INTR_ENABLE);
407 }
408
409 static u32
410 jme_linkstat_from_phy(struct jme_adapter *jme)
411 {
412         u32 phylink, bmsr;
413
414         phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
415         bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
416         if (bmsr & BMSR_ANCOMP)
417                 phylink |= PHY_LINK_AUTONEG_COMPLETE;
418
419         return phylink;
420 }
421
422 static inline void
423 jme_set_phyfifo_5level(struct jme_adapter *jme)
424 {
425         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
426 }
427
428 static inline void
429 jme_set_phyfifo_8level(struct jme_adapter *jme)
430 {
431         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
432 }
433
434 static int
435 jme_check_link(struct net_device *netdev, int testonly)
436 {
437         struct jme_adapter *jme = netdev_priv(netdev);
438         u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
439         char linkmsg[64];
440         int rc = 0;
441
442         linkmsg[0] = '\0';
443
444         if (jme->fpgaver)
445                 phylink = jme_linkstat_from_phy(jme);
446         else
447                 phylink = jread32(jme, JME_PHY_LINK);
448
449         if (phylink & PHY_LINK_UP) {
450                 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
451                         /*
452                          * If we did not enable AN
453                          * Speed/Duplex Info should be obtained from SMI
454                          */
455                         phylink = PHY_LINK_UP;
456
457                         bmcr = jme_mdio_read(jme->dev,
458                                                 jme->mii_if.phy_id,
459                                                 MII_BMCR);
460
461                         phylink |= ((bmcr & BMCR_SPEED1000) &&
462                                         (bmcr & BMCR_SPEED100) == 0) ?
463                                         PHY_LINK_SPEED_1000M :
464                                         (bmcr & BMCR_SPEED100) ?
465                                         PHY_LINK_SPEED_100M :
466                                         PHY_LINK_SPEED_10M;
467
468                         phylink |= (bmcr & BMCR_FULLDPLX) ?
469                                          PHY_LINK_DUPLEX : 0;
470
471                         strcat(linkmsg, "Forced: ");
472                 } else {
473                         /*
474                          * Keep polling for speed/duplex resolve complete
475                          */
476                         while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
477                                 --cnt) {
478
479                                 udelay(1);
480
481                                 if (jme->fpgaver)
482                                         phylink = jme_linkstat_from_phy(jme);
483                                 else
484                                         phylink = jread32(jme, JME_PHY_LINK);
485                         }
486                         if (!cnt)
487                                 pr_err("Waiting speed resolve timeout\n");
488
489                         strcat(linkmsg, "ANed: ");
490                 }
491
492                 if (jme->phylink == phylink) {
493                         rc = 1;
494                         goto out;
495                 }
496                 if (testonly)
497                         goto out;
498
499                 jme->phylink = phylink;
500
501                 /*
502                  * The speed/duplex setting of jme->reg_ghc already cleared
503                  * by jme_reset_mac_processor()
504                  */
505                 switch (phylink & PHY_LINK_SPEED_MASK) {
506                 case PHY_LINK_SPEED_10M:
507                         jme->reg_ghc |= GHC_SPEED_10M;
508                         strcat(linkmsg, "10 Mbps, ");
509                         break;
510                 case PHY_LINK_SPEED_100M:
511                         jme->reg_ghc |= GHC_SPEED_100M;
512                         strcat(linkmsg, "100 Mbps, ");
513                         break;
514                 case PHY_LINK_SPEED_1000M:
515                         jme->reg_ghc |= GHC_SPEED_1000M;
516                         strcat(linkmsg, "1000 Mbps, ");
517                         break;
518                 default:
519                         break;
520                 }
521
522                 if (phylink & PHY_LINK_DUPLEX) {
523                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
524                         jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
525                         jme->reg_ghc |= GHC_DPX;
526                 } else {
527                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
528                                                 TXMCS_BACKOFF |
529                                                 TXMCS_CARRIERSENSE |
530                                                 TXMCS_COLLISION);
531                         jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
532                 }
533
534                 jwrite32(jme, JME_GHC, jme->reg_ghc);
535
536                 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
537                         jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
538                                              GPREG1_RSSPATCH);
539                         if (!(phylink & PHY_LINK_DUPLEX))
540                                 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
541                         switch (phylink & PHY_LINK_SPEED_MASK) {
542                         case PHY_LINK_SPEED_10M:
543                                 jme_set_phyfifo_8level(jme);
544                                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
545                                 break;
546                         case PHY_LINK_SPEED_100M:
547                                 jme_set_phyfifo_5level(jme);
548                                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
549                                 break;
550                         case PHY_LINK_SPEED_1000M:
551                                 jme_set_phyfifo_8level(jme);
552                                 break;
553                         default:
554                                 break;
555                         }
556                 }
557                 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
558
559                 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
560                                         "Full-Duplex, " :
561                                         "Half-Duplex, ");
562                 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
563                                         "MDI-X" :
564                                         "MDI");
565                 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
566                 netif_carrier_on(netdev);
567         } else {
568                 if (testonly)
569                         goto out;
570
571                 netif_info(jme, link, jme->dev, "Link is down\n");
572                 jme->phylink = 0;
573                 netif_carrier_off(netdev);
574         }
575
576 out:
577         return rc;
578 }
579
580 static int
581 jme_setup_tx_resources(struct jme_adapter *jme)
582 {
583         struct jme_ring *txring = &(jme->txring[0]);
584
585         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
586                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
587                                    &(txring->dmaalloc),
588                                    GFP_ATOMIC);
589
590         if (!txring->alloc)
591                 goto err_set_null;
592
593         /*
594          * 16 Bytes align
595          */
596         txring->desc            = (void *)ALIGN((unsigned long)(txring->alloc),
597                                                 RING_DESC_ALIGN);
598         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
599         txring->next_to_use     = 0;
600         atomic_set(&txring->next_to_clean, 0);
601         atomic_set(&txring->nr_free, jme->tx_ring_size);
602
603         txring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
604                                         jme->tx_ring_size, GFP_ATOMIC);
605         if (unlikely(!(txring->bufinf)))
606                 goto err_free_txring;
607
608         /*
609          * Initialize Transmit Descriptors
610          */
611         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
612         memset(txring->bufinf, 0,
613                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
614
615         return 0;
616
617 err_free_txring:
618         dma_free_coherent(&(jme->pdev->dev),
619                           TX_RING_ALLOC_SIZE(jme->tx_ring_size),
620                           txring->alloc,
621                           txring->dmaalloc);
622
623 err_set_null:
624         txring->desc = NULL;
625         txring->dmaalloc = 0;
626         txring->dma = 0;
627         txring->bufinf = NULL;
628
629         return -ENOMEM;
630 }
631
632 static void
633 jme_free_tx_resources(struct jme_adapter *jme)
634 {
635         int i;
636         struct jme_ring *txring = &(jme->txring[0]);
637         struct jme_buffer_info *txbi;
638
639         if (txring->alloc) {
640                 if (txring->bufinf) {
641                         for (i = 0 ; i < jme->tx_ring_size ; ++i) {
642                                 txbi = txring->bufinf + i;
643                                 if (txbi->skb) {
644                                         dev_kfree_skb(txbi->skb);
645                                         txbi->skb = NULL;
646                                 }
647                                 txbi->mapping           = 0;
648                                 txbi->len               = 0;
649                                 txbi->nr_desc           = 0;
650                                 txbi->start_xmit        = 0;
651                         }
652                         kfree(txring->bufinf);
653                 }
654
655                 dma_free_coherent(&(jme->pdev->dev),
656                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
657                                   txring->alloc,
658                                   txring->dmaalloc);
659
660                 txring->alloc           = NULL;
661                 txring->desc            = NULL;
662                 txring->dmaalloc        = 0;
663                 txring->dma             = 0;
664                 txring->bufinf          = NULL;
665         }
666         txring->next_to_use     = 0;
667         atomic_set(&txring->next_to_clean, 0);
668         atomic_set(&txring->nr_free, 0);
669 }
670
671 static inline void
672 jme_enable_tx_engine(struct jme_adapter *jme)
673 {
674         /*
675          * Select Queue 0
676          */
677         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
678         wmb();
679
680         /*
681          * Setup TX Queue 0 DMA Bass Address
682          */
683         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
684         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
685         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
686
687         /*
688          * Setup TX Descptor Count
689          */
690         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
691
692         /*
693          * Enable TX Engine
694          */
695         wmb();
696         jwrite32f(jme, JME_TXCS, jme->reg_txcs |
697                                 TXCS_SELECT_QUEUE0 |
698                                 TXCS_ENABLE);
699
700         /*
701          * Start clock for TX MAC Processor
702          */
703         jme_mac_txclk_on(jme);
704 }
705
706 static inline void
707 jme_restart_tx_engine(struct jme_adapter *jme)
708 {
709         /*
710          * Restart TX Engine
711          */
712         jwrite32(jme, JME_TXCS, jme->reg_txcs |
713                                 TXCS_SELECT_QUEUE0 |
714                                 TXCS_ENABLE);
715 }
716
717 static inline void
718 jme_disable_tx_engine(struct jme_adapter *jme)
719 {
720         int i;
721         u32 val;
722
723         /*
724          * Disable TX Engine
725          */
726         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
727         wmb();
728
729         val = jread32(jme, JME_TXCS);
730         for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
731                 mdelay(1);
732                 val = jread32(jme, JME_TXCS);
733                 rmb();
734         }
735
736         if (!i)
737                 pr_err("Disable TX engine timeout\n");
738
739         /*
740          * Stop clock for TX MAC Processor
741          */
742         jme_mac_txclk_off(jme);
743 }
744
745 static void
746 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
747 {
748         struct jme_ring *rxring = &(jme->rxring[0]);
749         register struct rxdesc *rxdesc = rxring->desc;
750         struct jme_buffer_info *rxbi = rxring->bufinf;
751         rxdesc += i;
752         rxbi += i;
753
754         rxdesc->dw[0] = 0;
755         rxdesc->dw[1] = 0;
756         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
757         rxdesc->desc1.bufaddrl  = cpu_to_le32(
758                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
759         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
760         if (jme->dev->features & NETIF_F_HIGHDMA)
761                 rxdesc->desc1.flags = RXFLAG_64BIT;
762         wmb();
763         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
764 }
765
766 static int
767 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
768 {
769         struct jme_ring *rxring = &(jme->rxring[0]);
770         struct jme_buffer_info *rxbi = rxring->bufinf + i;
771         struct sk_buff *skb;
772
773         skb = netdev_alloc_skb(jme->dev,
774                 jme->dev->mtu + RX_EXTRA_LEN);
775         if (unlikely(!skb))
776                 return -ENOMEM;
777 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
778         skb->dev = jme->dev;
779 #endif
780
781         rxbi->skb = skb;
782         rxbi->len = skb_tailroom(skb);
783         rxbi->mapping = pci_map_page(jme->pdev,
784                                         virt_to_page(skb->data),
785                                         offset_in_page(skb->data),
786                                         rxbi->len,
787                                         PCI_DMA_FROMDEVICE);
788
789         return 0;
790 }
791
792 static void
793 jme_free_rx_buf(struct jme_adapter *jme, int i)
794 {
795         struct jme_ring *rxring = &(jme->rxring[0]);
796         struct jme_buffer_info *rxbi = rxring->bufinf;
797         rxbi += i;
798
799         if (rxbi->skb) {
800                 pci_unmap_page(jme->pdev,
801                                  rxbi->mapping,
802                                  rxbi->len,
803                                  PCI_DMA_FROMDEVICE);
804                 dev_kfree_skb(rxbi->skb);
805                 rxbi->skb = NULL;
806                 rxbi->mapping = 0;
807                 rxbi->len = 0;
808         }
809 }
810
811 static void
812 jme_free_rx_resources(struct jme_adapter *jme)
813 {
814         int i;
815         struct jme_ring *rxring = &(jme->rxring[0]);
816
817         if (rxring->alloc) {
818                 if (rxring->bufinf) {
819                         for (i = 0 ; i < jme->rx_ring_size ; ++i)
820                                 jme_free_rx_buf(jme, i);
821                         kfree(rxring->bufinf);
822                 }
823
824                 dma_free_coherent(&(jme->pdev->dev),
825                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
826                                   rxring->alloc,
827                                   rxring->dmaalloc);
828                 rxring->alloc    = NULL;
829                 rxring->desc     = NULL;
830                 rxring->dmaalloc = 0;
831                 rxring->dma      = 0;
832                 rxring->bufinf   = NULL;
833         }
834         rxring->next_to_use   = 0;
835         atomic_set(&rxring->next_to_clean, 0);
836 }
837
838 static int
839 jme_setup_rx_resources(struct jme_adapter *jme)
840 {
841         int i;
842         struct jme_ring *rxring = &(jme->rxring[0]);
843
844         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
845                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
846                                    &(rxring->dmaalloc),
847                                    GFP_ATOMIC);
848         if (!rxring->alloc)
849                 goto err_set_null;
850
851         /*
852          * 16 Bytes align
853          */
854         rxring->desc            = (void *)ALIGN((unsigned long)(rxring->alloc),
855                                                 RING_DESC_ALIGN);
856         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
857         rxring->next_to_use     = 0;
858         atomic_set(&rxring->next_to_clean, 0);
859
860         rxring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
861                                         jme->rx_ring_size, GFP_ATOMIC);
862         if (unlikely(!(rxring->bufinf)))
863                 goto err_free_rxring;
864
865         /*
866          * Initiallize Receive Descriptors
867          */
868         memset(rxring->bufinf, 0,
869                 sizeof(struct jme_buffer_info) * jme->rx_ring_size);
870         for (i = 0 ; i < jme->rx_ring_size ; ++i) {
871                 if (unlikely(jme_make_new_rx_buf(jme, i))) {
872                         jme_free_rx_resources(jme);
873                         return -ENOMEM;
874                 }
875
876                 jme_set_clean_rxdesc(jme, i);
877         }
878
879         return 0;
880
881 err_free_rxring:
882         dma_free_coherent(&(jme->pdev->dev),
883                           RX_RING_ALLOC_SIZE(jme->rx_ring_size),
884                           rxring->alloc,
885                           rxring->dmaalloc);
886 err_set_null:
887         rxring->desc = NULL;
888         rxring->dmaalloc = 0;
889         rxring->dma = 0;
890         rxring->bufinf = NULL;
891
892         return -ENOMEM;
893 }
894
895 static inline void
896 jme_enable_rx_engine(struct jme_adapter *jme)
897 {
898         /*
899          * Select Queue 0
900          */
901         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
902                                 RXCS_QUEUESEL_Q0);
903         wmb();
904
905         /*
906          * Setup RX DMA Bass Address
907          */
908         jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
909         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
910         jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
911
912         /*
913          * Setup RX Descriptor Count
914          */
915         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
916
917         /*
918          * Setup Unicast Filter
919          */
920         jme_set_unicastaddr(jme->dev);
921         jme_set_multi(jme->dev);
922
923         /*
924          * Enable RX Engine
925          */
926         wmb();
927         jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
928                                 RXCS_QUEUESEL_Q0 |
929                                 RXCS_ENABLE |
930                                 RXCS_QST);
931
932         /*
933          * Start clock for RX MAC Processor
934          */
935         jme_mac_rxclk_on(jme);
936 }
937
938 static inline void
939 jme_restart_rx_engine(struct jme_adapter *jme)
940 {
941         /*
942          * Start RX Engine
943          */
944         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
945                                 RXCS_QUEUESEL_Q0 |
946                                 RXCS_ENABLE |
947                                 RXCS_QST);
948 }
949
950 static inline void
951 jme_disable_rx_engine(struct jme_adapter *jme)
952 {
953         int i;
954         u32 val;
955
956         /*
957          * Disable RX Engine
958          */
959         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
960         wmb();
961
962         val = jread32(jme, JME_RXCS);
963         for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
964                 mdelay(1);
965                 val = jread32(jme, JME_RXCS);
966                 rmb();
967         }
968
969         if (!i)
970                 pr_err("Disable RX engine timeout\n");
971
972         /*
973          * Stop clock for RX MAC Processor
974          */
975         jme_mac_rxclk_off(jme);
976 }
977
978 static u16
979 jme_udpsum(struct sk_buff *skb)
980 {
981         u16 csum = 0xFFFFu;
982 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
983         struct iphdr *iph;
984         int iphlen;
985         struct udphdr *udph;
986 #endif
987
988         if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
989                 return csum;
990         if (skb->protocol != htons(ETH_P_IP))
991                 return csum;
992 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
993         iph = (struct iphdr *)skb_pull(skb, ETH_HLEN);
994         iphlen = (iph->ihl << 2);
995         if ((iph->protocol != IPPROTO_UDP) ||
996             (skb->len < (iphlen + sizeof(struct udphdr)))) {
997                 skb_push(skb, ETH_HLEN);
998                 return csum;
999         }
1000         udph = (struct udphdr *)skb_pull(skb, iphlen);
1001         csum = udph->check;
1002         skb_push(skb, iphlen);
1003         skb_push(skb, ETH_HLEN);
1004 #else
1005         skb_set_network_header(skb, ETH_HLEN);
1006         if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
1007             (skb->len < (ETH_HLEN +
1008                         (ip_hdr(skb)->ihl << 2) +
1009                         sizeof(struct udphdr)))) {
1010                 skb_reset_network_header(skb);
1011                 return csum;
1012         }
1013         skb_set_transport_header(skb,
1014                         ETH_HLEN + (ip_hdr(skb)->ihl << 2));
1015         csum = udp_hdr(skb)->check;
1016         skb_reset_transport_header(skb);
1017         skb_reset_network_header(skb);
1018 #endif
1019
1020         return csum;
1021 }
1022
1023 static int
1024 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
1025 {
1026         if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
1027                 return false;
1028
1029         if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
1030                         == RXWBFLAG_TCPON)) {
1031                 if (flags & RXWBFLAG_IPV4)
1032                         netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
1033                 return false;
1034         }
1035
1036         if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
1037                         == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
1038                 if (flags & RXWBFLAG_IPV4)
1039                         netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
1040                 return false;
1041         }
1042
1043         if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
1044                         == RXWBFLAG_IPV4)) {
1045                 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
1046                 return false;
1047         }
1048
1049         return true;
1050 }
1051
1052 static void
1053 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1054 {
1055         struct jme_ring *rxring = &(jme->rxring[0]);
1056         struct rxdesc *rxdesc = rxring->desc;
1057         struct jme_buffer_info *rxbi = rxring->bufinf;
1058         struct sk_buff *skb;
1059         int framesize;
1060
1061         rxdesc += idx;
1062         rxbi += idx;
1063
1064         skb = rxbi->skb;
1065         pci_dma_sync_single_for_cpu(jme->pdev,
1066                                         rxbi->mapping,
1067                                         rxbi->len,
1068                                         PCI_DMA_FROMDEVICE);
1069
1070         if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1071                 pci_dma_sync_single_for_device(jme->pdev,
1072                                                 rxbi->mapping,
1073                                                 rxbi->len,
1074                                                 PCI_DMA_FROMDEVICE);
1075
1076                 ++(NET_STAT(jme).rx_dropped);
1077         } else {
1078                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
1079                                 - RX_PREPAD_SIZE;
1080
1081                 skb_reserve(skb, RX_PREPAD_SIZE);
1082                 skb_put(skb, framesize);
1083                 skb->protocol = eth_type_trans(skb, jme->dev);
1084
1085                 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1086                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1087                 else
1088 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
1089                         skb->ip_summed = CHECKSUM_NONE;
1090 #else
1091                         skb_checksum_none_assert(skb);
1092 #endif
1093
1094                 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1095                         if (jme->vlgrp) {
1096                                 jme->jme_vlan_rx(skb, jme->vlgrp,
1097                                         le16_to_cpu(rxdesc->descwb.vlan));
1098                                 NET_STAT(jme).rx_bytes += 4;
1099                         } else {
1100                                 dev_kfree_skb(skb);
1101                         }
1102                 } else {
1103                         jme->jme_rx(skb);
1104                 }
1105
1106                 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1107                     cpu_to_le16(RXWBFLAG_DEST_MUL))
1108                         ++(NET_STAT(jme).multicast);
1109
1110                 NET_STAT(jme).rx_bytes += framesize;
1111                 ++(NET_STAT(jme).rx_packets);
1112         }
1113
1114         jme_set_clean_rxdesc(jme, idx);
1115
1116 }
1117
1118 static int
1119 jme_process_receive(struct jme_adapter *jme, int limit)
1120 {
1121         struct jme_ring *rxring = &(jme->rxring[0]);
1122         struct rxdesc *rxdesc = rxring->desc;
1123         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1124
1125         if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1126                 goto out_inc;
1127
1128         if (unlikely(atomic_read(&jme->link_changing) != 1))
1129                 goto out_inc;
1130
1131         if (unlikely(!netif_carrier_ok(jme->dev)))
1132                 goto out_inc;
1133
1134         i = atomic_read(&rxring->next_to_clean);
1135         while (limit > 0) {
1136                 rxdesc = rxring->desc;
1137                 rxdesc += i;
1138
1139                 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1140                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1141                         goto out;
1142                 --limit;
1143
1144                 rmb();
1145                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1146
1147                 if (unlikely(desccnt > 1 ||
1148                 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1149
1150                         if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1151                                 ++(NET_STAT(jme).rx_crc_errors);
1152                         else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1153                                 ++(NET_STAT(jme).rx_fifo_errors);
1154                         else
1155                                 ++(NET_STAT(jme).rx_errors);
1156
1157                         if (desccnt > 1)
1158                                 limit -= desccnt - 1;
1159
1160                         for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1161                                 jme_set_clean_rxdesc(jme, j);
1162                                 j = (j + 1) & (mask);
1163                         }
1164
1165                 } else {
1166                         jme_alloc_and_feed_skb(jme, i);
1167                 }
1168
1169                 i = (i + desccnt) & (mask);
1170         }
1171
1172 out:
1173         atomic_set(&rxring->next_to_clean, i);
1174
1175 out_inc:
1176         atomic_inc(&jme->rx_cleaning);
1177
1178         return limit > 0 ? limit : 0;
1179
1180 }
1181
1182 static void
1183 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1184 {
1185         if (likely(atmp == dpi->cur)) {
1186                 dpi->cnt = 0;
1187                 return;
1188         }
1189
1190         if (dpi->attempt == atmp) {
1191                 ++(dpi->cnt);
1192         } else {
1193                 dpi->attempt = atmp;
1194                 dpi->cnt = 0;
1195         }
1196
1197 }
1198
1199 static void
1200 jme_dynamic_pcc(struct jme_adapter *jme)
1201 {
1202         register struct dynpcc_info *dpi = &(jme->dpi);
1203
1204         if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1205                 jme_attempt_pcc(dpi, PCC_P3);
1206         else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1207                  dpi->intr_cnt > PCC_INTR_THRESHOLD)
1208                 jme_attempt_pcc(dpi, PCC_P2);
1209         else
1210                 jme_attempt_pcc(dpi, PCC_P1);
1211
1212         if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1213                 if (dpi->attempt < dpi->cur)
1214                         tasklet_schedule(&jme->rxclean_task);
1215                 jme_set_rx_pcc(jme, dpi->attempt);
1216                 dpi->cur = dpi->attempt;
1217                 dpi->cnt = 0;
1218         }
1219 }
1220
1221 static void
1222 jme_start_pcc_timer(struct jme_adapter *jme)
1223 {
1224         struct dynpcc_info *dpi = &(jme->dpi);
1225         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1226         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1227         dpi->intr_cnt           = 0;
1228         jwrite32(jme, JME_TMCSR,
1229                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1230 }
1231
1232 static inline void
1233 jme_stop_pcc_timer(struct jme_adapter *jme)
1234 {
1235         jwrite32(jme, JME_TMCSR, 0);
1236 }
1237
1238 static void
1239 jme_shutdown_nic(struct jme_adapter *jme)
1240 {
1241         u32 phylink;
1242
1243         phylink = jme_linkstat_from_phy(jme);
1244
1245         if (!(phylink & PHY_LINK_UP)) {
1246                 /*
1247                  * Disable all interrupt before issue timer
1248                  */
1249                 jme_stop_irq(jme);
1250                 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1251         }
1252 }
1253
1254 static void
1255 jme_pcc_tasklet(unsigned long arg)
1256 {
1257         struct jme_adapter *jme = (struct jme_adapter *)arg;
1258         struct net_device *netdev = jme->dev;
1259
1260         if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1261                 jme_shutdown_nic(jme);
1262                 return;
1263         }
1264
1265         if (unlikely(!netif_carrier_ok(netdev) ||
1266                 (atomic_read(&jme->link_changing) != 1)
1267         )) {
1268                 jme_stop_pcc_timer(jme);
1269                 return;
1270         }
1271
1272         if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1273                 jme_dynamic_pcc(jme);
1274
1275         jme_start_pcc_timer(jme);
1276 }
1277
1278 static inline void
1279 jme_polling_mode(struct jme_adapter *jme)
1280 {
1281         jme_set_rx_pcc(jme, PCC_OFF);
1282 }
1283
1284 static inline void
1285 jme_interrupt_mode(struct jme_adapter *jme)
1286 {
1287         jme_set_rx_pcc(jme, PCC_P1);
1288 }
1289
1290 static inline int
1291 jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1292 {
1293         u32 apmc;
1294         apmc = jread32(jme, JME_APMC);
1295         return apmc & JME_APMC_PSEUDO_HP_EN;
1296 }
1297
1298 static void
1299 jme_start_shutdown_timer(struct jme_adapter *jme)
1300 {
1301         u32 apmc;
1302
1303         apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1304         apmc &= ~JME_APMC_EPIEN_CTRL;
1305         if (!no_extplug) {
1306                 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1307                 wmb();
1308         }
1309         jwrite32f(jme, JME_APMC, apmc);
1310
1311         jwrite32f(jme, JME_TIMER2, 0);
1312         set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1313         jwrite32(jme, JME_TMCSR,
1314                 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1315 }
1316
1317 static void
1318 jme_stop_shutdown_timer(struct jme_adapter *jme)
1319 {
1320         u32 apmc;
1321
1322         jwrite32f(jme, JME_TMCSR, 0);
1323         jwrite32f(jme, JME_TIMER2, 0);
1324         clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1325
1326         apmc = jread32(jme, JME_APMC);
1327         apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1328         jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1329         wmb();
1330         jwrite32f(jme, JME_APMC, apmc);
1331 }
1332
1333 static void
1334 jme_link_change_tasklet(unsigned long arg)
1335 {
1336         struct jme_adapter *jme = (struct jme_adapter *)arg;
1337         struct net_device *netdev = jme->dev;
1338         int rc;
1339
1340         while (!atomic_dec_and_test(&jme->link_changing)) {
1341                 atomic_inc(&jme->link_changing);
1342                 netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1343                 while (atomic_read(&jme->link_changing) != 1)
1344                         netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1345         }
1346
1347         if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1348                 goto out;
1349
1350         jme->old_mtu = netdev->mtu;
1351         netif_stop_queue(netdev);
1352         if (jme_pseudo_hotplug_enabled(jme))
1353                 jme_stop_shutdown_timer(jme);
1354
1355         jme_stop_pcc_timer(jme);
1356         tasklet_disable(&jme->txclean_task);
1357         tasklet_disable(&jme->rxclean_task);
1358         tasklet_disable(&jme->rxempty_task);
1359
1360         if (netif_carrier_ok(netdev)) {
1361                 jme_disable_rx_engine(jme);
1362                 jme_disable_tx_engine(jme);
1363                 jme_reset_mac_processor(jme);
1364                 jme_free_rx_resources(jme);
1365                 jme_free_tx_resources(jme);
1366
1367                 if (test_bit(JME_FLAG_POLL, &jme->flags))
1368                         jme_polling_mode(jme);
1369
1370                 netif_carrier_off(netdev);
1371         }
1372
1373         jme_check_link(netdev, 0);
1374         if (netif_carrier_ok(netdev)) {
1375                 rc = jme_setup_rx_resources(jme);
1376                 if (rc) {
1377                         pr_err("Allocating resources for RX error, Device STOPPED!\n");
1378                         goto out_enable_tasklet;
1379                 }
1380
1381                 rc = jme_setup_tx_resources(jme);
1382                 if (rc) {
1383                         pr_err("Allocating resources for TX error, Device STOPPED!\n");
1384                         goto err_out_free_rx_resources;
1385                 }
1386
1387                 jme_enable_rx_engine(jme);
1388                 jme_enable_tx_engine(jme);
1389
1390                 netif_start_queue(netdev);
1391
1392                 if (test_bit(JME_FLAG_POLL, &jme->flags))
1393                         jme_interrupt_mode(jme);
1394
1395                 jme_start_pcc_timer(jme);
1396         } else if (jme_pseudo_hotplug_enabled(jme)) {
1397                 jme_start_shutdown_timer(jme);
1398         }
1399
1400         goto out_enable_tasklet;
1401
1402 err_out_free_rx_resources:
1403         jme_free_rx_resources(jme);
1404 out_enable_tasklet:
1405         tasklet_enable(&jme->txclean_task);
1406         tasklet_hi_enable(&jme->rxclean_task);
1407         tasklet_hi_enable(&jme->rxempty_task);
1408 out:
1409         atomic_inc(&jme->link_changing);
1410 }
1411
1412 static void
1413 jme_rx_clean_tasklet(unsigned long arg)
1414 {
1415         struct jme_adapter *jme = (struct jme_adapter *)arg;
1416         struct dynpcc_info *dpi = &(jme->dpi);
1417
1418         jme_process_receive(jme, jme->rx_ring_size);
1419         ++(dpi->intr_cnt);
1420
1421 }
1422
1423 static int
1424 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1425 {
1426         struct jme_adapter *jme = jme_napi_priv(holder);
1427         DECLARE_NETDEV
1428         int rest;
1429
1430         rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1431
1432         while (atomic_read(&jme->rx_empty) > 0) {
1433                 atomic_dec(&jme->rx_empty);
1434                 ++(NET_STAT(jme).rx_dropped);
1435                 jme_restart_rx_engine(jme);
1436         }
1437         atomic_inc(&jme->rx_empty);
1438
1439         if (rest) {
1440                 JME_RX_COMPLETE(netdev, holder);
1441                 jme_interrupt_mode(jme);
1442         }
1443
1444         JME_NAPI_WEIGHT_SET(budget, rest);
1445         return JME_NAPI_WEIGHT_VAL(budget) - rest;
1446 }
1447
1448 static void
1449 jme_rx_empty_tasklet(unsigned long arg)
1450 {
1451         struct jme_adapter *jme = (struct jme_adapter *)arg;
1452
1453         if (unlikely(atomic_read(&jme->link_changing) != 1))
1454                 return;
1455
1456         if (unlikely(!netif_carrier_ok(jme->dev)))
1457                 return;
1458
1459         netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1460
1461         jme_rx_clean_tasklet(arg);
1462
1463         while (atomic_read(&jme->rx_empty) > 0) {
1464                 atomic_dec(&jme->rx_empty);
1465                 ++(NET_STAT(jme).rx_dropped);
1466                 jme_restart_rx_engine(jme);
1467         }
1468         atomic_inc(&jme->rx_empty);
1469 }
1470
1471 static void
1472 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1473 {
1474         struct jme_ring *txring = &(jme->txring[0]);
1475
1476         smp_wmb();
1477         if (unlikely(netif_queue_stopped(jme->dev) &&
1478         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1479                 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1480                 netif_wake_queue(jme->dev);
1481         }
1482
1483 }
1484
1485 static void
1486 jme_tx_clean_tasklet(unsigned long arg)
1487 {
1488         struct jme_adapter *jme = (struct jme_adapter *)arg;
1489         struct jme_ring *txring = &(jme->txring[0]);
1490         struct txdesc *txdesc = txring->desc;
1491         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1492         int i, j, cnt = 0, max, err, mask;
1493
1494         tx_dbg(jme, "Into txclean\n");
1495
1496         if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1497                 goto out;
1498
1499         if (unlikely(atomic_read(&jme->link_changing) != 1))
1500                 goto out;
1501
1502         if (unlikely(!netif_carrier_ok(jme->dev)))
1503                 goto out;
1504
1505         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1506         mask = jme->tx_ring_mask;
1507
1508         for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1509
1510                 ctxbi = txbi + i;
1511
1512                 if (likely(ctxbi->skb &&
1513                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1514
1515                         tx_dbg(jme, "txclean: %d+%d@%lu\n",
1516                                i, ctxbi->nr_desc, jiffies);
1517
1518                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1519
1520                         for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1521                                 ttxbi = txbi + ((i + j) & (mask));
1522                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1523
1524                                 pci_unmap_page(jme->pdev,
1525                                                  ttxbi->mapping,
1526                                                  ttxbi->len,
1527                                                  PCI_DMA_TODEVICE);
1528
1529                                 ttxbi->mapping = 0;
1530                                 ttxbi->len = 0;
1531                         }
1532
1533                         dev_kfree_skb(ctxbi->skb);
1534
1535                         cnt += ctxbi->nr_desc;
1536
1537                         if (unlikely(err)) {
1538                                 ++(NET_STAT(jme).tx_carrier_errors);
1539                         } else {
1540                                 ++(NET_STAT(jme).tx_packets);
1541                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1542                         }
1543
1544                         ctxbi->skb = NULL;
1545                         ctxbi->len = 0;
1546                         ctxbi->start_xmit = 0;
1547
1548                 } else {
1549                         break;
1550                 }
1551
1552                 i = (i + ctxbi->nr_desc) & mask;
1553
1554                 ctxbi->nr_desc = 0;
1555         }
1556
1557         tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1558         atomic_set(&txring->next_to_clean, i);
1559         atomic_add(cnt, &txring->nr_free);
1560
1561         jme_wake_queue_if_stopped(jme);
1562
1563 out:
1564         atomic_inc(&jme->tx_cleaning);
1565 }
1566
1567 static void
1568 jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1569 {
1570         /*
1571          * Disable interrupt
1572          */
1573         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1574
1575         if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1576                 /*
1577                  * Link change event is critical
1578                  * all other events are ignored
1579                  */
1580                 jwrite32(jme, JME_IEVE, intrstat);
1581                 tasklet_schedule(&jme->linkch_task);
1582                 goto out_reenable;
1583         }
1584
1585         if (intrstat & INTR_TMINTR) {
1586                 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1587                 tasklet_schedule(&jme->pcc_task);
1588         }
1589
1590         if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1591                 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1592                 tasklet_schedule(&jme->txclean_task);
1593         }
1594
1595         if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1596                 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1597                                                      INTR_PCCRX0 |
1598                                                      INTR_RX0EMP)) |
1599                                         INTR_RX0);
1600         }
1601
1602         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1603                 if (intrstat & INTR_RX0EMP)
1604                         atomic_inc(&jme->rx_empty);
1605
1606                 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1607                         if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1608                                 jme_polling_mode(jme);
1609                                 JME_RX_SCHEDULE(jme);
1610                         }
1611                 }
1612         } else {
1613                 if (intrstat & INTR_RX0EMP) {
1614                         atomic_inc(&jme->rx_empty);
1615                         tasklet_hi_schedule(&jme->rxempty_task);
1616                 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1617                         tasklet_hi_schedule(&jme->rxclean_task);
1618                 }
1619         }
1620
1621 out_reenable:
1622         /*
1623          * Re-enable interrupt
1624          */
1625         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1626 }
1627
1628 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1629 static irqreturn_t
1630 jme_intr(int irq, void *dev_id, struct pt_regs *regs)
1631 #else
1632 static irqreturn_t
1633 jme_intr(int irq, void *dev_id)
1634 #endif
1635 {
1636         struct net_device *netdev = dev_id;
1637         struct jme_adapter *jme = netdev_priv(netdev);
1638         u32 intrstat;
1639
1640         intrstat = jread32(jme, JME_IEVE);
1641
1642         /*
1643          * Check if it's really an interrupt for us
1644          */
1645         if (unlikely((intrstat & INTR_ENABLE) == 0))
1646                 return IRQ_NONE;
1647
1648         /*
1649          * Check if the device still exist
1650          */
1651         if (unlikely(intrstat == ~((typeof(intrstat))0)))
1652                 return IRQ_NONE;
1653
1654         jme_intr_msi(jme, intrstat);
1655
1656         return IRQ_HANDLED;
1657 }
1658
1659 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1660 static irqreturn_t
1661 jme_msi(int irq, void *dev_id, struct pt_regs *regs)
1662 #else
1663 static irqreturn_t
1664 jme_msi(int irq, void *dev_id)
1665 #endif
1666 {
1667         struct net_device *netdev = dev_id;
1668         struct jme_adapter *jme = netdev_priv(netdev);
1669         u32 intrstat;
1670
1671         intrstat = jread32(jme, JME_IEVE);
1672
1673         jme_intr_msi(jme, intrstat);
1674
1675         return IRQ_HANDLED;
1676 }
1677
1678 static void
1679 jme_reset_link(struct jme_adapter *jme)
1680 {
1681         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1682 }
1683
1684 static void
1685 jme_restart_an(struct jme_adapter *jme)
1686 {
1687         u32 bmcr;
1688
1689         spin_lock_bh(&jme->phy_lock);
1690         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1691         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1692         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1693         spin_unlock_bh(&jme->phy_lock);
1694 }
1695
1696 static int
1697 jme_request_irq(struct jme_adapter *jme)
1698 {
1699         int rc;
1700         struct net_device *netdev = jme->dev;
1701 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1702         irqreturn_t (*handler)(int, void *, struct pt_regs *) = jme_intr;
1703         int irq_flags = SA_SHIRQ;
1704 #else
1705         irq_handler_t handler = jme_intr;
1706         int irq_flags = IRQF_SHARED;
1707 #endif
1708
1709         if (!pci_enable_msi(jme->pdev)) {
1710                 set_bit(JME_FLAG_MSI, &jme->flags);
1711                 handler = jme_msi;
1712                 irq_flags = 0;
1713         }
1714
1715         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1716                           netdev);
1717         if (rc) {
1718                 netdev_err(netdev,
1719                            "Unable to request %s interrupt (return: %d)\n",
1720                            test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1721                            rc);
1722
1723                 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1724                         pci_disable_msi(jme->pdev);
1725                         clear_bit(JME_FLAG_MSI, &jme->flags);
1726                 }
1727         } else {
1728                 netdev->irq = jme->pdev->irq;
1729         }
1730
1731         return rc;
1732 }
1733
1734 static void
1735 jme_free_irq(struct jme_adapter *jme)
1736 {
1737         free_irq(jme->pdev->irq, jme->dev);
1738         if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1739                 pci_disable_msi(jme->pdev);
1740                 clear_bit(JME_FLAG_MSI, &jme->flags);
1741                 jme->dev->irq = jme->pdev->irq;
1742         }
1743 }
1744
1745 static inline void
1746 jme_new_phy_on(struct jme_adapter *jme)
1747 {
1748         u32 reg;
1749
1750         reg = jread32(jme, JME_PHY_PWR);
1751         reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1752                  PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1753         jwrite32(jme, JME_PHY_PWR, reg);
1754
1755         pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1756         reg &= ~PE1_GPREG0_PBG;
1757         reg |= PE1_GPREG0_ENBG;
1758         pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1759 }
1760
1761 static inline void
1762 jme_new_phy_off(struct jme_adapter *jme)
1763 {
1764         u32 reg;
1765
1766         reg = jread32(jme, JME_PHY_PWR);
1767         reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1768                PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1769         jwrite32(jme, JME_PHY_PWR, reg);
1770
1771         pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1772         reg &= ~PE1_GPREG0_PBG;
1773         reg |= PE1_GPREG0_PDD3COLD;
1774         pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1775 }
1776
1777 static inline void
1778 jme_phy_on(struct jme_adapter *jme)
1779 {
1780         u32 bmcr;
1781
1782         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1783         bmcr &= ~BMCR_PDOWN;
1784         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1785
1786         if (new_phy_power_ctrl(jme->chip_main_rev))
1787                 jme_new_phy_on(jme);
1788 }
1789
1790 static inline void
1791 jme_phy_off(struct jme_adapter *jme)
1792 {
1793         u32 bmcr;
1794
1795         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1796         bmcr |= BMCR_PDOWN;
1797         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1798
1799         if (new_phy_power_ctrl(jme->chip_main_rev))
1800                 jme_new_phy_off(jme);
1801 }
1802
1803 static int
1804 jme_open(struct net_device *netdev)
1805 {
1806         struct jme_adapter *jme = netdev_priv(netdev);
1807         int rc;
1808
1809         jme_clear_pm(jme);
1810         JME_NAPI_ENABLE(jme);
1811
1812         tasklet_enable(&jme->linkch_task);
1813         tasklet_enable(&jme->txclean_task);
1814         tasklet_hi_enable(&jme->rxclean_task);
1815         tasklet_hi_enable(&jme->rxempty_task);
1816
1817         rc = jme_request_irq(jme);
1818         if (rc)
1819                 goto err_out;
1820
1821         jme_start_irq(jme);
1822
1823         jme_phy_on(jme);
1824         if (test_bit(JME_FLAG_SSET, &jme->flags))
1825                 jme_set_settings(netdev, &jme->old_ecmd);
1826         else
1827                 jme_reset_phy_processor(jme);
1828
1829         jme_reset_link(jme);
1830
1831         return 0;
1832
1833 err_out:
1834         netif_stop_queue(netdev);
1835         netif_carrier_off(netdev);
1836         return rc;
1837 }
1838
1839 static void
1840 jme_set_100m_half(struct jme_adapter *jme)
1841 {
1842         u32 bmcr, tmp;
1843
1844         jme_phy_on(jme);
1845         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1846         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1847                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1848         tmp |= BMCR_SPEED100;
1849
1850         if (bmcr != tmp)
1851                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1852
1853         if (jme->fpgaver)
1854                 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1855         else
1856                 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1857 }
1858
1859 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1860 static void
1861 jme_wait_link(struct jme_adapter *jme)
1862 {
1863         u32 phylink, to = JME_WAIT_LINK_TIME;
1864
1865         mdelay(1000);
1866         phylink = jme_linkstat_from_phy(jme);
1867         while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1868                 mdelay(10);
1869                 phylink = jme_linkstat_from_phy(jme);
1870         }
1871 }
1872
1873 static void
1874 jme_powersave_phy(struct jme_adapter *jme)
1875 {
1876         if (jme->reg_pmcs) {
1877                 jme_set_100m_half(jme);
1878                 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1879                         jme_wait_link(jme);
1880                 jme_clear_pm(jme);
1881         } else {
1882                 jme_phy_off(jme);
1883         }
1884 }
1885
1886 static int
1887 jme_close(struct net_device *netdev)
1888 {
1889         struct jme_adapter *jme = netdev_priv(netdev);
1890
1891         netif_stop_queue(netdev);
1892         netif_carrier_off(netdev);
1893
1894         jme_stop_irq(jme);
1895         jme_free_irq(jme);
1896
1897         JME_NAPI_DISABLE(jme);
1898
1899         tasklet_disable(&jme->linkch_task);
1900         tasklet_disable(&jme->txclean_task);
1901         tasklet_disable(&jme->rxclean_task);
1902         tasklet_disable(&jme->rxempty_task);
1903
1904         jme_disable_rx_engine(jme);
1905         jme_disable_tx_engine(jme);
1906         jme_reset_mac_processor(jme);
1907         jme_free_rx_resources(jme);
1908         jme_free_tx_resources(jme);
1909         jme->phylink = 0;
1910         jme_phy_off(jme);
1911
1912         return 0;
1913 }
1914
1915 static int
1916 jme_alloc_txdesc(struct jme_adapter *jme,
1917                         struct sk_buff *skb)
1918 {
1919         struct jme_ring *txring = &(jme->txring[0]);
1920         int idx, nr_alloc, mask = jme->tx_ring_mask;
1921
1922         idx = txring->next_to_use;
1923         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1924
1925         if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1926                 return -1;
1927
1928         atomic_sub(nr_alloc, &txring->nr_free);
1929
1930         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1931
1932         return idx;
1933 }
1934
1935 static void
1936 jme_fill_tx_map(struct pci_dev *pdev,
1937                 struct txdesc *txdesc,
1938                 struct jme_buffer_info *txbi,
1939                 struct page *page,
1940                 u32 page_offset,
1941                 u32 len,
1942                 u8 hidma)
1943 {
1944         dma_addr_t dmaaddr;
1945
1946         dmaaddr = pci_map_page(pdev,
1947                                 page,
1948                                 page_offset,
1949                                 len,
1950                                 PCI_DMA_TODEVICE);
1951
1952         pci_dma_sync_single_for_device(pdev,
1953                                        dmaaddr,
1954                                        len,
1955                                        PCI_DMA_TODEVICE);
1956
1957         txdesc->dw[0] = 0;
1958         txdesc->dw[1] = 0;
1959         txdesc->desc2.flags     = TXFLAG_OWN;
1960         txdesc->desc2.flags     |= (hidma) ? TXFLAG_64BIT : 0;
1961         txdesc->desc2.datalen   = cpu_to_le16(len);
1962         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1963         txdesc->desc2.bufaddrl  = cpu_to_le32(
1964                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
1965
1966         txbi->mapping = dmaaddr;
1967         txbi->len = len;
1968 }
1969
1970 static void
1971 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1972 {
1973         struct jme_ring *txring = &(jme->txring[0]);
1974         struct txdesc *txdesc = txring->desc, *ctxdesc;
1975         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1976         u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1977         int i, nr_frags = skb_shinfo(skb)->nr_frags;
1978         int mask = jme->tx_ring_mask;
1979         struct skb_frag_struct *frag;
1980         u32 len;
1981
1982         for (i = 0 ; i < nr_frags ; ++i) {
1983                 frag = &skb_shinfo(skb)->frags[i];
1984                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1985                 ctxbi = txbi + ((idx + i + 2) & (mask));
1986
1987                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1988                                  frag->page_offset, frag->size, hidma);
1989         }
1990
1991         len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1992         ctxdesc = txdesc + ((idx + 1) & (mask));
1993         ctxbi = txbi + ((idx + 1) & (mask));
1994         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1995                         offset_in_page(skb->data), len, hidma);
1996
1997 }
1998
1999 static int
2000 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2001 {
2002         if (unlikely(
2003 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)
2004         skb_shinfo(skb)->tso_size
2005 #else
2006         skb_shinfo(skb)->gso_size
2007 #endif
2008                         && skb_header_cloned(skb) &&
2009                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2010                 dev_kfree_skb(skb);
2011                 return -1;
2012         }
2013
2014         return 0;
2015 }
2016
2017 static int
2018 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2019 {
2020 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)
2021         *mss = cpu_to_le16(skb_shinfo(skb)->tso_size << TXDESC_MSS_SHIFT);
2022 #else
2023         *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
2024 #endif
2025         if (*mss) {
2026                 *flags |= TXFLAG_LSEN;
2027
2028                 if (skb->protocol == htons(ETH_P_IP)) {
2029                         struct iphdr *iph = ip_hdr(skb);
2030
2031                         iph->check = 0;
2032                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2033                                                                 iph->daddr, 0,
2034                                                                 IPPROTO_TCP,
2035                                                                 0);
2036                 } else {
2037                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
2038
2039                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
2040                                                                 &ip6h->daddr, 0,
2041                                                                 IPPROTO_TCP,
2042                                                                 0);
2043                 }
2044
2045                 return 0;
2046         }
2047
2048         return 1;
2049 }
2050
2051 static void
2052 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2053 {
2054 #ifdef CHECKSUM_PARTIAL
2055         if (skb->ip_summed == CHECKSUM_PARTIAL)
2056 #else
2057         if (skb->ip_summed == CHECKSUM_HW)
2058 #endif
2059         {
2060                 u8 ip_proto;
2061
2062 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
2063                 if (skb->protocol == htons(ETH_P_IP))
2064                         ip_proto = ip_hdr(skb)->protocol;
2065                 else if (skb->protocol == htons(ETH_P_IPV6))
2066                         ip_proto = ipv6_hdr(skb)->nexthdr;
2067                 else
2068                         ip_proto = 0;
2069 #else
2070                 switch (skb->protocol) {
2071                 case htons(ETH_P_IP):
2072                         ip_proto = ip_hdr(skb)->protocol;
2073                         break;
2074                 case htons(ETH_P_IPV6):
2075                         ip_proto = ipv6_hdr(skb)->nexthdr;
2076                         break;
2077                 default:
2078                         ip_proto = 0;
2079                         break;
2080                 }
2081 #endif
2082
2083                 switch (ip_proto) {
2084                 case IPPROTO_TCP:
2085                         *flags |= TXFLAG_TCPCS;
2086                         break;
2087                 case IPPROTO_UDP:
2088                         *flags |= TXFLAG_UDPCS;
2089                         break;
2090                 default:
2091                         netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2092                         break;
2093                 }
2094         }
2095 }
2096
2097 static inline void
2098 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2099 {
2100         if (vlan_tx_tag_present(skb)) {
2101                 *flags |= TXFLAG_TAGON;
2102                 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2103         }
2104 }
2105
2106 static int
2107 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2108 {
2109         struct jme_ring *txring = &(jme->txring[0]);
2110         struct txdesc *txdesc;
2111         struct jme_buffer_info *txbi;
2112         u8 flags;
2113
2114         txdesc = (struct txdesc *)txring->desc + idx;
2115         txbi = txring->bufinf + idx;
2116
2117         txdesc->dw[0] = 0;
2118         txdesc->dw[1] = 0;
2119         txdesc->dw[2] = 0;
2120         txdesc->dw[3] = 0;
2121         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2122         /*
2123          * Set OWN bit at final.
2124          * When kernel transmit faster than NIC.
2125          * And NIC trying to send this descriptor before we tell
2126          * it to start sending this TX queue.
2127          * Other fields are already filled correctly.
2128          */
2129         wmb();
2130         flags = TXFLAG_OWN | TXFLAG_INT;
2131         /*
2132          * Set checksum flags while not tso
2133          */
2134         if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2135                 jme_tx_csum(jme, skb, &flags);
2136         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2137         jme_map_tx_skb(jme, skb, idx);
2138         txdesc->desc1.flags = flags;
2139         /*
2140          * Set tx buffer info after telling NIC to send
2141          * For better tx_clean timing
2142          */
2143         wmb();
2144         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2145         txbi->skb = skb;
2146         txbi->len = skb->len;
2147         txbi->start_xmit = jiffies;
2148         if (!txbi->start_xmit)
2149                 txbi->start_xmit = (0UL-1);
2150
2151         return 0;
2152 }
2153
2154 static void
2155 jme_stop_queue_if_full(struct jme_adapter *jme)
2156 {
2157         struct jme_ring *txring = &(jme->txring[0]);
2158         struct jme_buffer_info *txbi = txring->bufinf;
2159         int idx = atomic_read(&txring->next_to_clean);
2160
2161         txbi += idx;
2162
2163         smp_wmb();
2164         if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2165                 netif_stop_queue(jme->dev);
2166                 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2167                 smp_wmb();
2168                 if (atomic_read(&txring->nr_free)
2169                         >= (jme->tx_wake_threshold)) {
2170                         netif_wake_queue(jme->dev);
2171                         netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2172                 }
2173         }
2174
2175         if (unlikely(txbi->start_xmit &&
2176                         (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2177                         txbi->skb)) {
2178                 netif_stop_queue(jme->dev);
2179                 netif_info(jme, tx_queued, jme->dev,
2180                            "TX Queue Stopped %d@%lu\n", idx, jiffies);
2181         }
2182 }
2183
2184 /*
2185  * This function is already protected by netif_tx_lock()
2186  */
2187
2188 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,31)
2189 static int
2190 #else
2191 static netdev_tx_t
2192 #endif
2193 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2194 {
2195         struct jme_adapter *jme = netdev_priv(netdev);
2196         int idx;
2197
2198         if (unlikely(jme_expand_header(jme, skb))) {
2199                 ++(NET_STAT(jme).tx_dropped);
2200                 return NETDEV_TX_OK;
2201         }
2202
2203         idx = jme_alloc_txdesc(jme, skb);
2204
2205         if (unlikely(idx < 0)) {
2206                 netif_stop_queue(netdev);
2207                 netif_err(jme, tx_err, jme->dev,
2208                           "BUG! Tx ring full when queue awake!\n");
2209
2210                 return NETDEV_TX_BUSY;
2211         }
2212
2213         jme_fill_tx_desc(jme, skb, idx);
2214
2215         jwrite32(jme, JME_TXCS, jme->reg_txcs |
2216                                 TXCS_SELECT_QUEUE0 |
2217                                 TXCS_QUEUE0S |
2218                                 TXCS_ENABLE);
2219 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29)
2220         netdev->trans_start = jiffies;
2221 #endif
2222
2223         tx_dbg(jme, "xmit: %d+%d@%lu\n",
2224                idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2225         jme_stop_queue_if_full(jme);
2226
2227         return NETDEV_TX_OK;
2228 }
2229
2230 static void
2231 jme_set_unicastaddr(struct net_device *netdev)
2232 {
2233         struct jme_adapter *jme = netdev_priv(netdev);
2234         u32 val;
2235
2236         val = (netdev->dev_addr[3] & 0xff) << 24 |
2237               (netdev->dev_addr[2] & 0xff) << 16 |
2238               (netdev->dev_addr[1] & 0xff) <<  8 |
2239               (netdev->dev_addr[0] & 0xff);
2240         jwrite32(jme, JME_RXUMA_LO, val);
2241         val = (netdev->dev_addr[5] & 0xff) << 8 |
2242               (netdev->dev_addr[4] & 0xff);
2243         jwrite32(jme, JME_RXUMA_HI, val);
2244 }
2245
2246 static int
2247 jme_set_macaddr(struct net_device *netdev, void *p)
2248 {
2249         struct jme_adapter *jme = netdev_priv(netdev);
2250         struct sockaddr *addr = p;
2251
2252         if (netif_running(netdev))
2253                 return -EBUSY;
2254
2255         spin_lock_bh(&jme->macaddr_lock);
2256         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2257         jme_set_unicastaddr(netdev);
2258         spin_unlock_bh(&jme->macaddr_lock);
2259
2260         return 0;
2261 }
2262
2263 static void
2264 jme_set_multi(struct net_device *netdev)
2265 {
2266         struct jme_adapter *jme = netdev_priv(netdev);
2267         u32 mc_hash[2] = {};
2268 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)
2269         int i;
2270 #endif
2271
2272         spin_lock_bh(&jme->rxmcs_lock);
2273
2274         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2275
2276         if (netdev->flags & IFF_PROMISC) {
2277                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
2278         } else if (netdev->flags & IFF_ALLMULTI) {
2279                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2280         } else if (netdev->flags & IFF_MULTICAST) {
2281 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
2282                 struct dev_mc_list *mclist;
2283 #else
2284                 struct netdev_hw_addr *ha;
2285 #endif
2286                 int bit_nr;
2287
2288                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2289 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)
2290                 for (i = 0, mclist = netdev->mc_list;
2291                         mclist && i < netdev->mc_count;
2292                         ++i, mclist = mclist->next) {
2293 #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
2294                 netdev_for_each_mc_addr(mclist, netdev) {
2295 #else
2296                 netdev_for_each_mc_addr(ha, netdev) {
2297 #endif
2298 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
2299                         bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2300 #else
2301                         bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2302 #endif
2303                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2304                 }
2305
2306                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2307                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2308         }
2309
2310         wmb();
2311         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2312
2313         spin_unlock_bh(&jme->rxmcs_lock);
2314 }
2315
2316 static int
2317 jme_change_mtu(struct net_device *netdev, int new_mtu)
2318 {
2319         struct jme_adapter *jme = netdev_priv(netdev);
2320
2321         if (new_mtu == jme->old_mtu)
2322                 return 0;
2323
2324         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2325                 ((new_mtu) < IPV6_MIN_MTU))
2326                 return -EINVAL;
2327
2328         if (new_mtu > 4000) {
2329                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2330                 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2331                 jme_restart_rx_engine(jme);
2332         } else {
2333                 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2334                 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2335                 jme_restart_rx_engine(jme);
2336         }
2337
2338         if (new_mtu > 1900) {
2339                 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2340                                 NETIF_F_TSO | NETIF_F_TSO6);
2341         } else {
2342                 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2343                         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2344                 if (test_bit(JME_FLAG_TSO, &jme->flags))
2345                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2346         }
2347
2348         netdev->mtu = new_mtu;
2349         jme_reset_link(jme);
2350
2351         return 0;
2352 }
2353
2354 static void
2355 jme_tx_timeout(struct net_device *netdev)
2356 {
2357         struct jme_adapter *jme = netdev_priv(netdev);
2358
2359         jme->phylink = 0;
2360         jme_reset_phy_processor(jme);
2361         if (test_bit(JME_FLAG_SSET, &jme->flags))
2362                 jme_set_settings(netdev, &jme->old_ecmd);
2363
2364         /*
2365          * Force to Reset the link again
2366          */
2367         jme_reset_link(jme);
2368 }
2369
2370 static inline void jme_pause_rx(struct jme_adapter *jme)
2371 {
2372         atomic_dec(&jme->link_changing);
2373
2374         jme_set_rx_pcc(jme, PCC_OFF);
2375         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2376                 JME_NAPI_DISABLE(jme);
2377         } else {
2378                 tasklet_disable(&jme->rxclean_task);
2379                 tasklet_disable(&jme->rxempty_task);
2380         }
2381 }
2382
2383 static inline void jme_resume_rx(struct jme_adapter *jme)
2384 {
2385         struct dynpcc_info *dpi = &(jme->dpi);
2386
2387         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2388                 JME_NAPI_ENABLE(jme);
2389         } else {
2390                 tasklet_hi_enable(&jme->rxclean_task);
2391                 tasklet_hi_enable(&jme->rxempty_task);
2392         }
2393         dpi->cur                = PCC_P1;
2394         dpi->attempt            = PCC_P1;
2395         dpi->cnt                = 0;
2396         jme_set_rx_pcc(jme, PCC_P1);
2397
2398         atomic_inc(&jme->link_changing);
2399 }
2400
2401 static void
2402 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2403 {
2404         struct jme_adapter *jme = netdev_priv(netdev);
2405
2406         jme_pause_rx(jme);
2407         jme->vlgrp = grp;
2408         jme_resume_rx(jme);
2409 }
2410
2411 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
2412 static void
2413 jme_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid)
2414 {
2415         struct jme_adapter *jme = netdev_priv(netdev);
2416
2417         if(jme->vlgrp) {
2418                 jme_pause_rx(jme);
2419 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
2420                 jme->vlgrp->vlan_devices[vid] = NULL;
2421 #else
2422                 vlan_group_set_device(jme->vlgrp, vid, NULL);
2423 #endif
2424                 jme_resume_rx(jme);
2425         }
2426 }
2427 #endif
2428
2429 static void
2430 jme_get_drvinfo(struct net_device *netdev,
2431                      struct ethtool_drvinfo *info)
2432 {
2433         struct jme_adapter *jme = netdev_priv(netdev);
2434
2435         strcpy(info->driver, DRV_NAME);
2436         strcpy(info->version, DRV_VERSION);
2437         strcpy(info->bus_info, pci_name(jme->pdev));
2438 }
2439
2440 static int
2441 jme_get_regs_len(struct net_device *netdev)
2442 {
2443         return JME_REG_LEN;
2444 }
2445
2446 static void
2447 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2448 {
2449         int i;
2450
2451         for (i = 0 ; i < len ; i += 4)
2452                 p[i >> 2] = jread32(jme, reg + i);
2453 }
2454
2455 static void
2456 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2457 {
2458         int i;
2459         u16 *p16 = (u16 *)p;
2460
2461         for (i = 0 ; i < reg_nr ; ++i)
2462                 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2463 }
2464
2465 static void
2466 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2467 {
2468         struct jme_adapter *jme = netdev_priv(netdev);
2469         u32 *p32 = (u32 *)p;
2470
2471         memset(p, 0xFF, JME_REG_LEN);
2472
2473         regs->version = 1;
2474         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2475
2476         p32 += 0x100 >> 2;
2477         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2478
2479         p32 += 0x100 >> 2;
2480         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2481
2482         p32 += 0x100 >> 2;
2483         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2484
2485         p32 += 0x100 >> 2;
2486         mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2487 }
2488
2489 static int
2490 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2491 {
2492         struct jme_adapter *jme = netdev_priv(netdev);
2493
2494         ecmd->tx_coalesce_usecs = PCC_TX_TO;
2495         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2496
2497         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2498                 ecmd->use_adaptive_rx_coalesce = false;
2499                 ecmd->rx_coalesce_usecs = 0;
2500                 ecmd->rx_max_coalesced_frames = 0;
2501                 return 0;
2502         }
2503
2504         ecmd->use_adaptive_rx_coalesce = true;
2505
2506         switch (jme->dpi.cur) {
2507         case PCC_P1:
2508                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2509                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2510                 break;
2511         case PCC_P2:
2512                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2513                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2514                 break;
2515         case PCC_P3:
2516                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2517                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2518                 break;
2519         default:
2520                 break;
2521         }
2522
2523         return 0;
2524 }
2525
2526 static int
2527 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2528 {
2529         struct jme_adapter *jme = netdev_priv(netdev);
2530         struct dynpcc_info *dpi = &(jme->dpi);
2531
2532         if (netif_running(netdev))
2533                 return -EBUSY;
2534
2535         if (ecmd->use_adaptive_rx_coalesce &&
2536             test_bit(JME_FLAG_POLL, &jme->flags)) {
2537                 clear_bit(JME_FLAG_POLL, &jme->flags);
2538                 jme->jme_rx = netif_rx;
2539                 jme->jme_vlan_rx = vlan_hwaccel_rx;
2540                 dpi->cur                = PCC_P1;
2541                 dpi->attempt            = PCC_P1;
2542                 dpi->cnt                = 0;
2543                 jme_set_rx_pcc(jme, PCC_P1);
2544                 jme_interrupt_mode(jme);
2545         } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2546                    !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2547                 set_bit(JME_FLAG_POLL, &jme->flags);
2548                 jme->jme_rx = netif_receive_skb;
2549                 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2550                 jme_interrupt_mode(jme);
2551         }
2552
2553         return 0;
2554 }
2555
2556 static void
2557 jme_get_pauseparam(struct net_device *netdev,
2558                         struct ethtool_pauseparam *ecmd)
2559 {
2560         struct jme_adapter *jme = netdev_priv(netdev);
2561         u32 val;
2562
2563         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2564         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2565
2566         spin_lock_bh(&jme->phy_lock);
2567         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2568         spin_unlock_bh(&jme->phy_lock);
2569
2570         ecmd->autoneg =
2571                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2572 }
2573
2574 static int
2575 jme_set_pauseparam(struct net_device *netdev,
2576                         struct ethtool_pauseparam *ecmd)
2577 {
2578         struct jme_adapter *jme = netdev_priv(netdev);
2579         u32 val;
2580
2581         if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2582                 (ecmd->tx_pause != 0)) {
2583
2584                 if (ecmd->tx_pause)
2585                         jme->reg_txpfc |= TXPFC_PF_EN;
2586                 else
2587                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2588
2589                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2590         }
2591
2592         spin_lock_bh(&jme->rxmcs_lock);
2593         if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2594                 (ecmd->rx_pause != 0)) {
2595
2596                 if (ecmd->rx_pause)
2597                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2598                 else
2599                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2600
2601                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2602         }
2603         spin_unlock_bh(&jme->rxmcs_lock);
2604
2605         spin_lock_bh(&jme->phy_lock);
2606         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2607         if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2608                 (ecmd->autoneg != 0)) {
2609
2610                 if (ecmd->autoneg)
2611                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2612                 else
2613                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2614
2615                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2616                                 MII_ADVERTISE, val);
2617         }
2618         spin_unlock_bh(&jme->phy_lock);
2619
2620         return 0;
2621 }
2622
2623 static void
2624 jme_get_wol(struct net_device *netdev,
2625                 struct ethtool_wolinfo *wol)
2626 {
2627         struct jme_adapter *jme = netdev_priv(netdev);
2628
2629         wol->supported = WAKE_MAGIC | WAKE_PHY;
2630
2631         wol->wolopts = 0;
2632
2633         if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2634                 wol->wolopts |= WAKE_PHY;
2635
2636         if (jme->reg_pmcs & PMCS_MFEN)
2637                 wol->wolopts |= WAKE_MAGIC;
2638
2639 }
2640
2641 static int
2642 jme_set_wol(struct net_device *netdev,
2643                 struct ethtool_wolinfo *wol)
2644 {
2645         struct jme_adapter *jme = netdev_priv(netdev);
2646
2647         if (wol->wolopts & (WAKE_MAGICSECURE |
2648                                 WAKE_UCAST |
2649                                 WAKE_MCAST |
2650                                 WAKE_BCAST |
2651                                 WAKE_ARP))
2652                 return -EOPNOTSUPP;
2653
2654         jme->reg_pmcs = 0;
2655
2656         if (wol->wolopts & WAKE_PHY)
2657                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2658
2659         if (wol->wolopts & WAKE_MAGIC)
2660                 jme->reg_pmcs |= PMCS_MFEN;
2661
2662         jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2663 #ifndef JME_NEW_PM_API
2664         jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
2665 #endif
2666 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
2667         device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2668 #endif
2669
2670         return 0;
2671 }
2672
2673 static int
2674 jme_get_settings(struct net_device *netdev,
2675                      struct ethtool_cmd *ecmd)
2676 {
2677         struct jme_adapter *jme = netdev_priv(netdev);
2678         int rc;
2679
2680         spin_lock_bh(&jme->phy_lock);
2681         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2682         spin_unlock_bh(&jme->phy_lock);
2683         return rc;
2684 }
2685
2686 static int
2687 jme_set_settings(struct net_device *netdev,
2688                      struct ethtool_cmd *ecmd)
2689 {
2690         struct jme_adapter *jme = netdev_priv(netdev);
2691         int rc, fdc = 0;
2692
2693         if (ethtool_cmd_speed(ecmd) == SPEED_1000
2694             && ecmd->autoneg != AUTONEG_ENABLE)
2695                 return -EINVAL;
2696
2697         /*
2698          * Check If user changed duplex only while force_media.
2699          * Hardware would not generate link change interrupt.
2700          */
2701         if (jme->mii_if.force_media &&
2702         ecmd->autoneg != AUTONEG_ENABLE &&
2703         (jme->mii_if.full_duplex != ecmd->duplex))
2704                 fdc = 1;
2705
2706         spin_lock_bh(&jme->phy_lock);
2707         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2708         spin_unlock_bh(&jme->phy_lock);
2709
2710         if (!rc) {
2711                 if (fdc)
2712                         jme_reset_link(jme);
2713                 jme->old_ecmd = *ecmd;
2714                 set_bit(JME_FLAG_SSET, &jme->flags);
2715         }
2716
2717         return rc;
2718 }
2719
2720 static int
2721 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2722 {
2723         int rc;
2724         struct jme_adapter *jme = netdev_priv(netdev);
2725         struct mii_ioctl_data *mii_data = if_mii(rq);
2726         unsigned int duplex_chg;
2727
2728         if (cmd == SIOCSMIIREG) {
2729                 u16 val = mii_data->val_in;
2730                 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2731                     (val & BMCR_SPEED1000))
2732                         return -EINVAL;
2733         }
2734
2735         spin_lock_bh(&jme->phy_lock);
2736         rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2737         spin_unlock_bh(&jme->phy_lock);
2738
2739         if (!rc && (cmd == SIOCSMIIREG)) {
2740                 if (duplex_chg)
2741                         jme_reset_link(jme);
2742                 jme_get_settings(netdev, &jme->old_ecmd);
2743                 set_bit(JME_FLAG_SSET, &jme->flags);
2744         }
2745
2746         return rc;
2747 }
2748
2749 static u32
2750 jme_get_link(struct net_device *netdev)
2751 {
2752         struct jme_adapter *jme = netdev_priv(netdev);
2753         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2754 }
2755
2756 static u32
2757 jme_get_msglevel(struct net_device *netdev)
2758 {
2759         struct jme_adapter *jme = netdev_priv(netdev);
2760         return jme->msg_enable;
2761 }
2762
2763 static void
2764 jme_set_msglevel(struct net_device *netdev, u32 value)
2765 {
2766         struct jme_adapter *jme = netdev_priv(netdev);
2767         jme->msg_enable = value;
2768 }
2769
2770 static u32
2771 jme_get_rx_csum(struct net_device *netdev)
2772 {
2773         struct jme_adapter *jme = netdev_priv(netdev);
2774         return jme->reg_rxmcs & RXMCS_CHECKSUM;
2775 }
2776
2777 static int
2778 jme_set_rx_csum(struct net_device *netdev, u32 on)
2779 {
2780         struct jme_adapter *jme = netdev_priv(netdev);
2781
2782         spin_lock_bh(&jme->rxmcs_lock);
2783         if (on)
2784                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2785         else
2786                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2787         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2788         spin_unlock_bh(&jme->rxmcs_lock);
2789
2790         return 0;
2791 }
2792
2793 static int
2794 jme_set_tx_csum(struct net_device *netdev, u32 on)
2795 {
2796         struct jme_adapter *jme = netdev_priv(netdev);
2797
2798         if (on) {
2799                 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2800                 if (netdev->mtu <= 1900)
2801                         netdev->features |=
2802                                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2803         } else {
2804                 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2805                 netdev->features &=
2806                                 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2807         }
2808
2809         return 0;
2810 }
2811
2812 static int
2813 jme_set_tso(struct net_device *netdev, u32 on)
2814 {
2815         struct jme_adapter *jme = netdev_priv(netdev);
2816
2817         if (on) {
2818                 set_bit(JME_FLAG_TSO, &jme->flags);
2819                 if (netdev->mtu <= 1900)
2820                         netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2821         } else {
2822                 clear_bit(JME_FLAG_TSO, &jme->flags);
2823                 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2824         }
2825
2826         return 0;
2827 }
2828
2829 static int
2830 jme_nway_reset(struct net_device *netdev)
2831 {
2832         struct jme_adapter *jme = netdev_priv(netdev);
2833         jme_restart_an(jme);
2834         return 0;
2835 }
2836
2837 static u8
2838 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2839 {
2840         u32 val;
2841         int to;
2842
2843         val = jread32(jme, JME_SMBCSR);
2844         to = JME_SMB_BUSY_TIMEOUT;
2845         while ((val & SMBCSR_BUSY) && --to) {
2846                 msleep(1);
2847                 val = jread32(jme, JME_SMBCSR);
2848         }
2849         if (!to) {
2850                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2851                 return 0xFF;
2852         }
2853
2854         jwrite32(jme, JME_SMBINTF,
2855                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2856                 SMBINTF_HWRWN_READ |
2857                 SMBINTF_HWCMD);
2858
2859         val = jread32(jme, JME_SMBINTF);
2860         to = JME_SMB_BUSY_TIMEOUT;
2861         while ((val & SMBINTF_HWCMD) && --to) {
2862                 msleep(1);
2863                 val = jread32(jme, JME_SMBINTF);
2864         }
2865         if (!to) {
2866                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2867                 return 0xFF;
2868         }
2869
2870         return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2871 }
2872
2873 static void
2874 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2875 {
2876         u32 val;
2877         int to;
2878
2879         val = jread32(jme, JME_SMBCSR);
2880         to = JME_SMB_BUSY_TIMEOUT;
2881         while ((val & SMBCSR_BUSY) && --to) {
2882                 msleep(1);
2883                 val = jread32(jme, JME_SMBCSR);
2884         }
2885         if (!to) {
2886                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2887                 return;
2888         }
2889
2890         jwrite32(jme, JME_SMBINTF,
2891                 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2892                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2893                 SMBINTF_HWRWN_WRITE |
2894                 SMBINTF_HWCMD);
2895
2896         val = jread32(jme, JME_SMBINTF);
2897         to = JME_SMB_BUSY_TIMEOUT;
2898         while ((val & SMBINTF_HWCMD) && --to) {
2899                 msleep(1);
2900                 val = jread32(jme, JME_SMBINTF);
2901         }
2902         if (!to) {
2903                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2904                 return;
2905         }
2906
2907         mdelay(2);
2908 }
2909
2910 static int
2911 jme_get_eeprom_len(struct net_device *netdev)
2912 {
2913         struct jme_adapter *jme = netdev_priv(netdev);
2914         u32 val;
2915         val = jread32(jme, JME_SMBCSR);
2916         return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2917 }
2918
2919 static int
2920 jme_get_eeprom(struct net_device *netdev,
2921                 struct ethtool_eeprom *eeprom, u8 *data)
2922 {
2923         struct jme_adapter *jme = netdev_priv(netdev);
2924         int i, offset = eeprom->offset, len = eeprom->len;
2925
2926         /*
2927          * ethtool will check the boundary for us
2928          */
2929         eeprom->magic = JME_EEPROM_MAGIC;
2930         for (i = 0 ; i < len ; ++i)
2931                 data[i] = jme_smb_read(jme, i + offset);
2932
2933         return 0;
2934 }
2935
2936 static int
2937 jme_set_eeprom(struct net_device *netdev,
2938                 struct ethtool_eeprom *eeprom, u8 *data)
2939 {
2940         struct jme_adapter *jme = netdev_priv(netdev);
2941         int i, offset = eeprom->offset, len = eeprom->len;
2942
2943         if (eeprom->magic != JME_EEPROM_MAGIC)
2944                 return -EINVAL;
2945
2946         /*
2947          * ethtool will check the boundary for us
2948          */
2949         for (i = 0 ; i < len ; ++i)
2950                 jme_smb_write(jme, i + offset, data[i]);
2951
2952         return 0;
2953 }
2954
2955 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2956 static struct ethtool_ops jme_ethtool_ops = {
2957 #else
2958 static const struct ethtool_ops jme_ethtool_ops = {
2959 #endif
2960         .get_drvinfo            = jme_get_drvinfo,
2961         .get_regs_len           = jme_get_regs_len,
2962         .get_regs               = jme_get_regs,
2963         .get_coalesce           = jme_get_coalesce,
2964         .set_coalesce           = jme_set_coalesce,
2965         .get_pauseparam         = jme_get_pauseparam,
2966         .set_pauseparam         = jme_set_pauseparam,
2967         .get_wol                = jme_get_wol,
2968         .set_wol                = jme_set_wol,
2969         .get_settings           = jme_get_settings,
2970         .set_settings           = jme_set_settings,
2971         .get_link               = jme_get_link,
2972         .get_msglevel           = jme_get_msglevel,
2973         .set_msglevel           = jme_set_msglevel,
2974         .get_rx_csum            = jme_get_rx_csum,
2975         .set_rx_csum            = jme_set_rx_csum,
2976         .set_tx_csum            = jme_set_tx_csum,
2977         .set_tso                = jme_set_tso,
2978         .set_sg                 = ethtool_op_set_sg,
2979         .nway_reset             = jme_nway_reset,
2980         .get_eeprom_len         = jme_get_eeprom_len,
2981         .get_eeprom             = jme_get_eeprom,
2982         .set_eeprom             = jme_set_eeprom,
2983 };
2984
2985 static int
2986 jme_pci_dma64(struct pci_dev *pdev)
2987 {
2988         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2990             !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2991 #else
2992             !pci_set_dma_mask(pdev, DMA_64BIT_MASK)
2993 #endif
2994            )
2995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2996                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2997 #else
2998                 if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2999 #endif
3000                         return 1;
3001
3002         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3004             !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))
3005 #else
3006             !pci_set_dma_mask(pdev, DMA_40BIT_MASK)
3007 #endif
3008            )
3009 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3010                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
3011 #else
3012                 if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
3013 #endif
3014                         return 1;
3015
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3017         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
3018                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
3019 #else
3020         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3021                 if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
3022 #endif
3023                         return 0;
3024
3025         return -1;
3026 }
3027
3028 static inline void
3029 jme_phy_init(struct jme_adapter *jme)
3030 {
3031         u16 reg26;
3032
3033         reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
3034         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
3035 }
3036
3037 static inline void
3038 jme_check_hw_ver(struct jme_adapter *jme)
3039 {
3040         u32 chipmode;
3041
3042         chipmode = jread32(jme, JME_CHIPMODE);
3043
3044         jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
3045         jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
3046         jme->chip_main_rev = jme->chiprev & 0xF;
3047         jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
3048 }
3049
3050 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3051 static const struct net_device_ops jme_netdev_ops = {
3052         .ndo_open               = jme_open,
3053         .ndo_stop               = jme_close,
3054         .ndo_validate_addr      = eth_validate_addr,
3055         .ndo_do_ioctl           = jme_ioctl,
3056         .ndo_start_xmit         = jme_start_xmit,
3057         .ndo_set_mac_address    = jme_set_macaddr,
3058         .ndo_set_multicast_list = jme_set_multi,
3059         .ndo_change_mtu         = jme_change_mtu,
3060         .ndo_tx_timeout         = jme_tx_timeout,
3061         .ndo_vlan_rx_register   = jme_vlan_rx_register,
3062 };
3063 #endif
3064
3065 static int __devinit
3066 jme_init_one(struct pci_dev *pdev,
3067              const struct pci_device_id *ent)
3068 {
3069         int rc = 0, using_dac, i;
3070         struct net_device *netdev;
3071         struct jme_adapter *jme;
3072         u16 bmcr, bmsr;
3073         u32 apmc;
3074
3075         /*
3076          * set up PCI device basics
3077          */
3078         rc = pci_enable_device(pdev);
3079         if (rc) {
3080                 pr_err("Cannot enable PCI device\n");
3081                 goto err_out;
3082         }
3083
3084         using_dac = jme_pci_dma64(pdev);
3085         if (using_dac < 0) {
3086                 pr_err("Cannot set PCI DMA Mask\n");
3087                 rc = -EIO;
3088                 goto err_out_disable_pdev;
3089         }
3090
3091         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3092                 pr_err("No PCI resource region found\n");
3093                 rc = -ENOMEM;
3094                 goto err_out_disable_pdev;
3095         }
3096
3097         rc = pci_request_regions(pdev, DRV_NAME);
3098         if (rc) {
3099                 pr_err("Cannot obtain PCI resource region\n");
3100                 goto err_out_disable_pdev;
3101         }
3102
3103         pci_set_master(pdev);
3104
3105         /*
3106          * alloc and init net device
3107          */
3108         netdev = alloc_etherdev(sizeof(*jme));
3109         if (!netdev) {
3110                 pr_err("Cannot allocate netdev structure\n");
3111                 rc = -ENOMEM;
3112                 goto err_out_release_regions;
3113         }
3114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3115         netdev->netdev_ops = &jme_netdev_ops;
3116 #else
3117         netdev->open                    = jme_open;
3118         netdev->stop                    = jme_close;
3119         netdev->do_ioctl                = jme_ioctl;
3120         netdev->hard_start_xmit         = jme_start_xmit;
3121         netdev->set_mac_address         = jme_set_macaddr;
3122         netdev->set_multicast_list      = jme_set_multi;
3123         netdev->change_mtu              = jme_change_mtu;
3124         netdev->tx_timeout              = jme_tx_timeout;
3125         netdev->vlan_rx_register        = jme_vlan_rx_register;
3126 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
3127         netdev->vlan_rx_kill_vid        = jme_vlan_rx_kill_vid;
3128 #endif
3129         NETDEV_GET_STATS(netdev, &jme_get_stats);
3130 #endif
3131         netdev->ethtool_ops             = &jme_ethtool_ops;
3132         netdev->watchdog_timeo          = TX_TIMEOUT;
3133         netdev->features                =       NETIF_F_IP_CSUM |
3134                                                 NETIF_F_IPV6_CSUM |
3135                                                 NETIF_F_SG |
3136                                                 NETIF_F_TSO |
3137                                                 NETIF_F_TSO6 |
3138                                                 NETIF_F_HW_VLAN_TX |
3139                                                 NETIF_F_HW_VLAN_RX;
3140         if (using_dac)
3141                 netdev->features        |=      NETIF_F_HIGHDMA;
3142
3143         SET_NETDEV_DEV(netdev, &pdev->dev);
3144         pci_set_drvdata(pdev, netdev);
3145
3146         /*
3147          * init adapter info
3148          */
3149         jme = netdev_priv(netdev);
3150         jme->pdev = pdev;
3151         jme->dev = netdev;
3152         jme->jme_rx = netif_rx;
3153         jme->jme_vlan_rx = vlan_hwaccel_rx;
3154         jme->old_mtu = netdev->mtu = 1500;
3155         jme->phylink = 0;
3156         jme->tx_ring_size = 1 << 10;
3157         jme->tx_ring_mask = jme->tx_ring_size - 1;
3158         jme->tx_wake_threshold = 1 << 9;
3159         jme->rx_ring_size = 1 << 9;
3160         jme->rx_ring_mask = jme->rx_ring_size - 1;
3161         jme->msg_enable = JME_DEF_MSG_ENABLE;
3162         jme->regs = ioremap(pci_resource_start(pdev, 0),
3163                              pci_resource_len(pdev, 0));
3164         if (!(jme->regs)) {
3165                 pr_err("Mapping PCI resource region error\n");
3166                 rc = -ENOMEM;
3167                 goto err_out_free_netdev;
3168         }
3169
3170         if (no_pseudohp) {
3171                 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
3172                 jwrite32(jme, JME_APMC, apmc);
3173         } else if (force_pseudohp) {
3174                 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
3175                 jwrite32(jme, JME_APMC, apmc);
3176         }
3177
3178         NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
3179
3180         spin_lock_init(&jme->phy_lock);
3181         spin_lock_init(&jme->macaddr_lock);
3182         spin_lock_init(&jme->rxmcs_lock);
3183
3184         atomic_set(&jme->link_changing, 1);
3185         atomic_set(&jme->rx_cleaning, 1);
3186         atomic_set(&jme->tx_cleaning, 1);
3187         atomic_set(&jme->rx_empty, 1);
3188
3189         tasklet_init(&jme->pcc_task,
3190                      jme_pcc_tasklet,
3191                      (unsigned long) jme);
3192         tasklet_init(&jme->linkch_task,
3193                      jme_link_change_tasklet,
3194                      (unsigned long) jme);
3195         tasklet_init(&jme->txclean_task,
3196                      jme_tx_clean_tasklet,
3197                      (unsigned long) jme);
3198         tasklet_init(&jme->rxclean_task,
3199                      jme_rx_clean_tasklet,
3200                      (unsigned long) jme);
3201         tasklet_init(&jme->rxempty_task,
3202                      jme_rx_empty_tasklet,
3203                      (unsigned long) jme);
3204         tasklet_disable_nosync(&jme->linkch_task);
3205         tasklet_disable_nosync(&jme->txclean_task);
3206         tasklet_disable_nosync(&jme->rxclean_task);
3207         tasklet_disable_nosync(&jme->rxempty_task);
3208         jme->dpi.cur = PCC_P1;
3209
3210         jme->reg_ghc = 0;
3211         jme->reg_rxcs = RXCS_DEFAULT;
3212         jme->reg_rxmcs = RXMCS_DEFAULT;
3213         jme->reg_txpfc = 0;
3214         jme->reg_pmcs = PMCS_MFEN;
3215         jme->reg_gpreg1 = GPREG1_DEFAULT;
3216         set_bit(JME_FLAG_TXCSUM, &jme->flags);
3217         set_bit(JME_FLAG_TSO, &jme->flags);
3218
3219         /*
3220          * Get Max Read Req Size from PCI Config Space
3221          */
3222         pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
3223         jme->mrrs &= PCI_DCSR_MRRS_MASK;
3224         switch (jme->mrrs) {
3225         case MRRS_128B:
3226                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3227                 break;
3228         case MRRS_256B:
3229                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3230                 break;
3231         default:
3232                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3233                 break;
3234         }
3235
3236         /*
3237          * Must check before reset_mac_processor
3238          */
3239         jme_check_hw_ver(jme);
3240         jme->mii_if.dev = netdev;
3241         if (jme->fpgaver) {
3242                 jme->mii_if.phy_id = 0;
3243                 for (i = 1 ; i < 32 ; ++i) {
3244                         bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3245                         bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3246                         if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3247                                 jme->mii_if.phy_id = i;
3248                                 break;
3249                         }
3250                 }
3251
3252                 if (!jme->mii_if.phy_id) {
3253                         rc = -EIO;
3254                         pr_err("Can not find phy_id\n");
3255                         goto err_out_unmap;
3256                 }
3257
3258                 jme->reg_ghc |= GHC_LINK_POLL;
3259         } else {
3260                 jme->mii_if.phy_id = 1;
3261         }
3262         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3263                 jme->mii_if.supports_gmii = true;
3264         else
3265                 jme->mii_if.supports_gmii = false;
3266         jme->mii_if.phy_id_mask = 0x1F;
3267         jme->mii_if.reg_num_mask = 0x1F;
3268         jme->mii_if.mdio_read = jme_mdio_read;
3269         jme->mii_if.mdio_write = jme_mdio_write;
3270
3271         jme_clear_pm(jme);
3272         pci_set_power_state(jme->pdev, PCI_D0);
3273 #ifndef JME_NEW_PM_API
3274         jme_pci_wakeup_enable(jme, true);
3275 #endif
3276 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
3277         device_set_wakeup_enable(&pdev->dev, true);
3278 #endif
3279
3280         jme_set_phyfifo_5level(jme);
3281 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
3282         pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev);
3283 #else
3284         jme->pcirev = pdev->revision;
3285 #endif
3286         if (!jme->fpgaver)
3287                 jme_phy_init(jme);
3288         jme_phy_off(jme);
3289
3290         /*
3291          * Reset MAC processor and reload EEPROM for MAC Address
3292          */
3293         jme_reset_mac_processor(jme);
3294         rc = jme_reload_eeprom(jme);
3295         if (rc) {
3296                 pr_err("Reload eeprom for reading MAC Address error\n");
3297                 goto err_out_unmap;
3298         }
3299         jme_load_macaddr(netdev);
3300
3301         /*
3302          * Tell stack that we are not ready to work until open()
3303          */
3304         netif_carrier_off(netdev);
3305
3306         rc = register_netdev(netdev);
3307         if (rc) {
3308                 pr_err("Cannot register net device\n");
3309                 goto err_out_unmap;
3310         }
3311
3312         netif_info(jme, probe, jme->dev, "%s%s chipver:%x pcirev:%x "
3313                    "macaddr: %02x:%02x:%02x:%02x:%02x:%02x\n",
3314                    (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3315                    "JMC250 Gigabit Ethernet" :
3316                    (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3317                    "JMC260 Fast Ethernet" : "Unknown",
3318                    (jme->fpgaver != 0) ? " (FPGA)" : "",
3319                    (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3320                    jme->pcirev,
3321                    netdev->dev_addr[0],
3322                    netdev->dev_addr[1],
3323                    netdev->dev_addr[2],
3324                    netdev->dev_addr[3],
3325                    netdev->dev_addr[4],
3326                    netdev->dev_addr[5]);
3327
3328         return 0;
3329
3330 err_out_unmap:
3331         iounmap(jme->regs);
3332 err_out_free_netdev:
3333         pci_set_drvdata(pdev, NULL);
3334         free_netdev(netdev);
3335 err_out_release_regions:
3336         pci_release_regions(pdev);
3337 err_out_disable_pdev:
3338         pci_disable_device(pdev);
3339 err_out:
3340         return rc;
3341 }
3342
3343 static void __devexit
3344 jme_remove_one(struct pci_dev *pdev)
3345 {
3346         struct net_device *netdev = pci_get_drvdata(pdev);
3347         struct jme_adapter *jme = netdev_priv(netdev);
3348
3349         unregister_netdev(netdev);
3350         iounmap(jme->regs);
3351         pci_set_drvdata(pdev, NULL);
3352         free_netdev(netdev);
3353         pci_release_regions(pdev);
3354         pci_disable_device(pdev);
3355
3356 }
3357
3358 static void
3359 jme_shutdown(struct pci_dev *pdev)
3360 {
3361         struct net_device *netdev = pci_get_drvdata(pdev);
3362         struct jme_adapter *jme = netdev_priv(netdev);
3363
3364         jme_powersave_phy(jme);
3365 #ifndef JME_NEW_PM_API
3366         jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
3367 #endif
3368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
3369         device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
3370 #endif
3371 }
3372
3373 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
3374         #ifdef CONFIG_PM
3375                 #define JME_HAVE_PM
3376         #endif
3377 #else
3378         #ifdef CONFIG_PM_SLEEP
3379                 #define JME_HAVE_PM
3380         #endif
3381 #endif
3382
3383 #ifdef JME_HAVE_PM
3384 static int
3385 #ifdef JME_NEW_PM_API
3386 jme_suspend(struct device *dev)
3387 #else
3388 jme_suspend(struct pci_dev *pdev, pm_message_t state)
3389 #endif
3390 {
3391 #ifdef JME_NEW_PM_API
3392         struct pci_dev *pdev = to_pci_dev(dev);
3393 #endif
3394         struct net_device *netdev = pci_get_drvdata(pdev);
3395         struct jme_adapter *jme = netdev_priv(netdev);
3396
3397         atomic_dec(&jme->link_changing);
3398
3399         netif_device_detach(netdev);
3400         netif_stop_queue(netdev);
3401         jme_stop_irq(jme);
3402
3403         tasklet_disable(&jme->txclean_task);
3404         tasklet_disable(&jme->rxclean_task);
3405         tasklet_disable(&jme->rxempty_task);
3406
3407         if (netif_carrier_ok(netdev)) {
3408                 if (test_bit(JME_FLAG_POLL, &jme->flags))
3409                         jme_polling_mode(jme);
3410
3411                 jme_stop_pcc_timer(jme);
3412                 jme_disable_rx_engine(jme);
3413                 jme_disable_tx_engine(jme);
3414                 jme_reset_mac_processor(jme);
3415                 jme_free_rx_resources(jme);
3416                 jme_free_tx_resources(jme);
3417                 netif_carrier_off(netdev);
3418                 jme->phylink = 0;
3419         }
3420
3421         tasklet_enable(&jme->txclean_task);
3422         tasklet_hi_enable(&jme->rxclean_task);
3423         tasklet_hi_enable(&jme->rxempty_task);
3424
3425         jme_powersave_phy(jme);
3426 #ifndef JME_NEW_PM_API
3427         pci_save_state(pdev);
3428         jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
3429         pci_set_power_state(pdev, PCI_D3hot);
3430 #endif
3431
3432         return 0;
3433 }
3434
3435 static int
3436 #ifdef JME_NEW_PM_API
3437 jme_resume(struct device *dev)
3438 #else
3439 jme_resume(struct pci_dev *pdev)
3440 #endif
3441 {
3442 #ifdef JME_NEW_PM_API
3443         struct pci_dev *pdev = to_pci_dev(dev);
3444 #endif
3445         struct net_device *netdev = pci_get_drvdata(pdev);
3446         struct jme_adapter *jme = netdev_priv(netdev);
3447
3448         jme_clear_pm(jme);
3449 #ifndef JME_NEW_PM_API
3450         pci_set_power_state(pdev, PCI_D0);
3451         pci_restore_state(pdev);
3452 #endif
3453
3454         jme_phy_on(jme);
3455         if (test_bit(JME_FLAG_SSET, &jme->flags))
3456                 jme_set_settings(netdev, &jme->old_ecmd);
3457         else
3458                 jme_reset_phy_processor(jme);
3459
3460         jme_start_irq(jme);
3461         netif_device_attach(netdev);
3462
3463         atomic_inc(&jme->link_changing);
3464
3465         jme_reset_link(jme);
3466
3467         return 0;
3468 }
3469
3470 #ifdef JME_NEW_PM_API
3471 static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3472 #define JME_PM_OPS (&jme_pm_ops)
3473 #endif
3474
3475 #else
3476
3477 #ifdef JME_NEW_PM_API
3478 #define JME_PM_OPS NULL
3479 #endif
3480 #endif
3481
3482 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)
3483 static struct pci_device_id jme_pci_tbl[] = {
3484 #else
3485 static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
3486 #endif
3487         { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3488         { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3489         { }
3490 };
3491
3492 static struct pci_driver jme_driver = {
3493         .name           = DRV_NAME,
3494         .id_table       = jme_pci_tbl,
3495         .probe          = jme_init_one,
3496         .remove         = __devexit_p(jme_remove_one),
3497         .shutdown       = jme_shutdown,
3498 #ifndef JME_NEW_PM_API
3499         .suspend        = jme_suspend,
3500         .resume         = jme_resume
3501 #else
3502         .driver.pm      = JME_PM_OPS,
3503 #endif
3504 };
3505
3506 static int __init
3507 jme_init_module(void)
3508 {
3509         pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3510         return pci_register_driver(&jme_driver);
3511 }
3512
3513 static void __exit
3514 jme_cleanup_module(void)
3515 {
3516         pci_unregister_driver(&jme_driver);
3517 }
3518
3519 module_init(jme_init_module);
3520 module_exit(jme_cleanup_module);
3521
3522 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3523 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3524 MODULE_LICENSE("GPL");
3525 MODULE_VERSION(DRV_VERSION);
3526 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3527