]> bbs.cooldavid.org Git - jme.git/blame_incremental - jme.c
Import jme 0.3 source
[jme.git] / jme.c
... / ...
CommitLineData
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24/*
25 * Note:
26 * Watchdog:
27 * check if rx queue stoped.
28 * And restart it after rx ring cleaned.
29 */
30
31/*
32 * Timeline before release:
33 * Stage 2: Error handling.
34 * - Wathch dog
35 * - Transmit timeout
36 *
37 * Stage 3: Basic offloading support.
38 * - Use pci_map_page on scattered sk_buff for HIGHMEM support
39 * - Implement scatter-gather offloading.
40 * A system page per RX (buffer|descriptor)?
41 * Handle fraged sk_buff to TX descriptors.
42 * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
43 *
44 * Stage 4: Basic feature support.
45 * - Implement Power Managemt related functions.
46 * - Implement Jumboframe.
47 * - Implement MSI.
48 *
49 * Stage 5: Advanced offloading support.
50 * - Implement VLAN offloading.
51 * - Implement TCP Segement offloading.
52 *
53 * Stage 6: CPU Load balancing.
54 * - Implement MSI-X.
55 * Along with multiple RX queue, for CPU load balancing.
56 *
57 * Stage 7:
58 * - Use NAPI instead of rx_tasklet?
59 * PCC Support Both Packet Counter and Timeout Interrupt for
60 * receive and transmit complete, does NAPI really needed?
61 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
62 * - Test and Release 1.0
63 */
64
65#include <linux/version.h>
66#include <linux/module.h>
67#include <linux/kernel.h>
68#include <linux/pci.h>
69#include <linux/netdevice.h>
70#include <linux/etherdevice.h>
71#include <linux/ethtool.h>
72#include <linux/mii.h>
73#include <linux/crc32.h>
74#include <linux/delay.h>
75#include "jme.h"
76
77#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
78static struct net_device_stats *
79jme_get_stats(struct net_device *netdev)
80{
81 struct jme_adapter *jme = netdev_priv(netdev);
82 return &jme->stats;
83}
84#endif
85
86static int
87jme_mdio_read(struct net_device *netdev, int phy, int reg)
88{
89 struct jme_adapter *jme = netdev_priv(netdev);
90 int i, val;
91
92 jwrite32(jme, JME_SMI, SMI_OP_REQ |
93 smi_phy_addr(phy) |
94 smi_reg_addr(reg));
95
96 wmb();
97 for (i = JME_PHY_TIMEOUT; i > 0; --i) {
98 udelay(1);
99 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
100 break;
101 }
102
103 if (i == 0) {
104 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
105 return 0;
106 }
107
108 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
109}
110
111static void
112jme_mdio_write(struct net_device *netdev,
113 int phy, int reg, int val)
114{
115 struct jme_adapter *jme = netdev_priv(netdev);
116 int i;
117
118 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
119 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
120 smi_phy_addr(phy) | smi_reg_addr(reg));
121
122 wmb();
123 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
124 udelay(1);
125 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
126 break;
127 }
128
129 if (i == 0)
130 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
131
132 return;
133}
134
135__always_inline static void
136jme_reset_phy_processor(struct jme_adapter *jme)
137{
138 int i, val;
139
140 val = jme_mdio_read(jme->dev,
141 jme->mii_if.phy_id,
142 MII_BMCR);
143
144 jme_mdio_write(jme->dev,
145 jme->mii_if.phy_id,
146 MII_BMCR, val | BMCR_RESET);
147
148 for(i = JME_PHY_RST_TIMEOUT ; i > 0 ; --i) {
149 udelay(1);
150 val = jme_mdio_read(jme->dev,
151 jme->mii_if.phy_id,
152 MII_BMCR);
153 if(!(val & BMCR_RESET))
154 break;
155 }
156
157 if (i == 0)
158 jeprintk(jme->dev->name, "phy reset timeout\n");
159
160 jme_mdio_write(jme->dev,
161 jme->mii_if.phy_id,
162 MII_ADVERTISE, ADVERTISE_ALL);
163
164 jme_mdio_write(jme->dev,
165 jme->mii_if.phy_id,
166 MII_CTRL1000,
167 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
168
169 return;
170}
171
172
173__always_inline static void
174jme_reset_mac_processor(struct jme_adapter *jme)
175{
176 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
177 udelay(2);
178 jwrite32(jme, JME_GHC, jme->reg_ghc);
179 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
180 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
181 jwrite32(jme, JME_WFODP, 0);
182 jwrite32(jme, JME_WFOI, 0);
183 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
184 jwrite32(jme, JME_GPREG1, 0);
185}
186
187__always_inline static void
188jme_clear_pm(struct jme_adapter *jme)
189{
190 jwrite32(jme, JME_PMCS, 0xFFFF0000);
191 pci_set_power_state(jme->pdev, PCI_D0);
192}
193
194static int
195jme_reload_eeprom(struct jme_adapter *jme)
196{
197 __u32 val;
198 int i;
199
200 val = jread32(jme, JME_SMBCSR);
201
202 if(val & SMBCSR_EEPROMD)
203 {
204 val |= SMBCSR_CNACK;
205 jwrite32(jme, JME_SMBCSR, val);
206 val |= SMBCSR_RELOAD;
207 jwrite32(jme, JME_SMBCSR, val);
208 mdelay(12);
209
210 for (i = JME_SMB_TIMEOUT; i > 0; --i)
211 {
212 mdelay(1);
213 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
214 break;
215 }
216
217 if(i == 0) {
218 jeprintk(jme->dev->name, "eeprom reload timeout\n");
219 return -EIO;
220 }
221 }
222 else
223 return -EIO;
224
225 return 0;
226}
227
228static void
229jme_load_macaddr(struct net_device *netdev)
230{
231 struct jme_adapter *jme = netdev_priv(netdev);
232 unsigned char macaddr[6];
233 __u32 val;
234
235 spin_lock(&jme->phy_lock);
236 val = jread32(jme, JME_RXUMA_LO);
237 macaddr[0] = (val >> 0) & 0xFF;
238 macaddr[1] = (val >> 8) & 0xFF;
239 macaddr[2] = (val >> 16) & 0xFF;
240 macaddr[3] = (val >> 24) & 0xFF;
241 val = jread32(jme, JME_RXUMA_HI);
242 macaddr[4] = (val >> 0) & 0xFF;
243 macaddr[5] = (val >> 8) & 0xFF;
244 memcpy(netdev->dev_addr, macaddr, 6);
245 spin_unlock(&jme->phy_lock);
246}
247
248static void
249jme_set_rx_pcc(struct jme_adapter *jme, int p)
250{
251 switch(p) {
252 case PCC_P1:
253 jwrite32(jme, JME_PCCRX0,
254 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
255 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
256 break;
257 case PCC_P2:
258 jwrite32(jme, JME_PCCRX0,
259 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
260 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
261 break;
262 case PCC_P3:
263 jwrite32(jme, JME_PCCRX0,
264 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
265 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
266 break;
267 default:
268 break;
269 }
270
271 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
272}
273
274__always_inline static void
275jme_start_irq(struct jme_adapter *jme)
276{
277 register struct dynpcc_info *dpi = &(jme->dpi);
278
279 jme_set_rx_pcc(jme, PCC_P1);
280
281 dpi->check_point = jiffies + PCC_INTERVAL;
282 dpi->last_bytes = NET_STAT(jme).rx_bytes;
283 dpi->last_pkts = NET_STAT(jme).rx_packets;
284 dpi->cur = PCC_P1;
285 dpi->attempt = PCC_P1;
286 dpi->cnt = 0;
287
288 jwrite32(jme, JME_PCCTX,
289 ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
290 ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
291 PCCTXQ0_EN
292 );
293
294 /*
295 * Enable Interrupts
296 */
297 atomic_set(&jme->intr_sem, 1);
298 jwrite32(jme, JME_IENS, INTR_ENABLE);
299}
300
301__always_inline static void
302jme_stop_irq(struct jme_adapter *jme)
303{
304 /*
305 * Disable Interrupts
306 */
307 jwrite32(jme, JME_IENC, INTR_ENABLE);
308}
309
310
311__always_inline static void
312jme_enable_shadow(struct jme_adapter *jme)
313{
314 jwrite32(jme,
315 JME_SHBA_LO,
316 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
317}
318
319__always_inline static void
320jme_disable_shadow(struct jme_adapter *jme)
321{
322 jwrite32(jme, JME_SHBA_LO, 0x0);
323}
324
325static void
326jme_check_link(struct net_device *netdev)
327{
328 struct jme_adapter *jme = netdev_priv(netdev);
329 __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
330 char linkmsg[32];
331
332 spin_lock(&jme->phy_lock);
333 phylink = jread32(jme, JME_PHY_LINK);
334
335 if (phylink & PHY_LINK_UP) {
336 /*
337 * Keep polling for autoneg complete
338 */
339 while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
340 mdelay(1);
341 phylink = jread32(jme, JME_PHY_LINK);
342 }
343
344 if(!cnt)
345 jeprintk(netdev->name, "Waiting autoneg timeout.\n");
346
347 switch(phylink & PHY_LINK_SPEED_MASK) {
348 case PHY_LINK_SPEED_10M:
349 ghc = GHC_SPEED_10M;
350 strcpy(linkmsg, "10 Mbps, ");
351 break;
352 case PHY_LINK_SPEED_100M:
353 ghc = GHC_SPEED_100M;
354 strcpy(linkmsg, "100 Mbps, ");
355 break;
356 case PHY_LINK_SPEED_1000M:
357 ghc = GHC_SPEED_1000M;
358 strcpy(linkmsg, "1000 Mbps, ");
359 break;
360 default:
361 ghc = 0;
362 break;
363 }
364 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
365 jme->reg_ghc = ghc;
366 jwrite32(jme, JME_GHC, ghc);
367 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
368 "Full-Duplex" :
369 "Half-Duplex");
370
371 if(phylink & PHY_LINK_DUPLEX)
372 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
373 else
374 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
375 TXMCS_BACKOFF |
376 TXMCS_CARRIERSENSE |
377 TXMCS_COLLISION);
378
379 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
380 netif_carrier_on(netdev);
381 }
382 else {
383 jprintk(netdev->name, "Link is down.\n");
384 netif_carrier_off(netdev);
385 }
386 spin_unlock(&jme->phy_lock);
387}
388
389
390static int
391jme_alloc_txdesc(struct jme_adapter *jme,
392 int nr_alloc)
393{
394 struct jme_ring *txring = jme->txring;
395 int idx;
396
397 idx = txring->next_to_use;
398
399 if(unlikely(txring->nr_free < nr_alloc))
400 return -1;
401
402 spin_lock(&jme->tx_lock);
403 txring->nr_free -= nr_alloc;
404
405 if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
406 txring->next_to_use -= RING_DESC_NR;
407 spin_unlock(&jme->tx_lock);
408
409 return idx;
410}
411
412static int
413jme_set_new_txdesc(struct jme_adapter *jme,
414 struct sk_buff *skb)
415{
416 struct jme_ring *txring = jme->txring;
417 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
418 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
419 dma_addr_t dmaaddr;
420 int i, idx, nr_desc;
421
422 nr_desc = 2;
423 idx = jme_alloc_txdesc(jme, nr_desc);
424
425 if(unlikely(idx<0))
426 return NETDEV_TX_BUSY;
427
428 for(i = 1 ; i < nr_desc ; ++i) {
429 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
430 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
431
432 dmaaddr = pci_map_single(jme->pdev,
433 skb->data,
434 skb->len,
435 PCI_DMA_TODEVICE);
436
437 pci_dma_sync_single_for_device(jme->pdev,
438 dmaaddr,
439 skb->len,
440 PCI_DMA_TODEVICE);
441
442 ctxdesc->dw[0] = 0;
443 ctxdesc->dw[1] = 0;
444 ctxdesc->desc2.flags = TXFLAG_OWN;
445 if(jme->dev->features & NETIF_F_HIGHDMA)
446 ctxdesc->desc2.flags |= TXFLAG_64BIT;
447 ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
448 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
449 ctxdesc->desc2.bufaddrl = cpu_to_le32(dmaaddr & 0xFFFFFFFF);
450
451 ctxbi->mapping = dmaaddr;
452 ctxbi->len = skb->len;
453 }
454
455 ctxdesc = txdesc + idx;
456 ctxbi = txbi + idx;
457
458 ctxdesc->dw[0] = 0;
459 ctxdesc->dw[1] = 0;
460 ctxdesc->dw[2] = 0;
461 ctxdesc->dw[3] = 0;
462 ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
463 /*
464 * Set OWN bit at final.
465 * When kernel transmit faster than NIC.
466 * And NIC trying to send this descriptor before we tell
467 * it to start sending this TX queue.
468 * Other fields are already filled correctly.
469 */
470 wmb();
471 ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
472 /*
473 * Set tx buffer info after telling NIC to send
474 * For better tx_clean timing
475 */
476 wmb();
477 ctxbi->nr_desc = nr_desc;
478 ctxbi->skb = skb;
479
480 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
481
482 return 0;
483}
484
485
486static int
487jme_setup_tx_resources(struct jme_adapter *jme)
488{
489 struct jme_ring *txring = &(jme->txring[0]);
490
491 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
492 TX_RING_ALLOC_SIZE,
493 &(txring->dmaalloc),
494 GFP_KERNEL);
495 if(!txring->alloc) {
496 txring->desc = NULL;
497 txring->dmaalloc = 0;
498 txring->dma = 0;
499 return -ENOMEM;
500 }
501
502 /*
503 * 16 Bytes align
504 */
505 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
506 RING_DESC_ALIGN);
507 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
508 txring->next_to_use = 0;
509 txring->next_to_clean = 0;
510 txring->nr_free = RING_DESC_NR;
511
512 /*
513 * Initiallize Transmit Descriptors
514 */
515 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
516 memset(txring->bufinf, 0,
517 sizeof(struct jme_buffer_info) * RING_DESC_NR);
518
519 return 0;
520}
521
522static void
523jme_free_tx_resources(struct jme_adapter *jme)
524{
525 int i;
526 struct jme_ring *txring = &(jme->txring[0]);
527 struct jme_buffer_info *txbi = txring->bufinf;
528
529 if(txring->alloc) {
530 for(i = 0 ; i < RING_DESC_NR ; ++i) {
531 txbi = txring->bufinf + i;
532 if(txbi->skb) {
533 dev_kfree_skb(txbi->skb);
534 txbi->skb = NULL;
535 }
536 txbi->mapping = 0;
537 txbi->len = 0;
538 txbi->nr_desc = 0;
539 }
540
541 dma_free_coherent(&(jme->pdev->dev),
542 TX_RING_ALLOC_SIZE,
543 txring->alloc,
544 txring->dmaalloc);
545
546 txring->alloc = NULL;
547 txring->desc = NULL;
548 txring->dmaalloc = 0;
549 txring->dma = 0;
550 }
551 txring->next_to_use = 0;
552 txring->next_to_clean = 0;
553 txring->nr_free = 0;
554
555}
556
557__always_inline static void
558jme_enable_tx_engine(struct jme_adapter *jme)
559{
560 __u8 mrrs;
561
562 /*
563 * Select Queue 0
564 */
565 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
566
567 /*
568 * Setup TX Queue 0 DMA Bass Address
569 */
570 jwrite32(jme, JME_TXDBA_LO, jme->txring[0].dma);
571 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
572 jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
573
574 /*
575 * Setup TX Descptor Count
576 */
577 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
578
579 /*
580 * Get Max Read Req Size from PCI Config Space
581 */
582 pci_read_config_byte(jme->pdev, PCI_CONF_DCSR_MRRS, &mrrs);
583 switch(mrrs) {
584 case MRRS_128B:
585 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
586 break;
587 case MRRS_256B:
588 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
589 break;
590 default:
591 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
592 break;
593 };
594
595 /*
596 * Enable TX Engine
597 */
598 wmb();
599 jwrite32(jme, JME_TXCS, jme->reg_txcs |
600 TXCS_SELECT_QUEUE0 |
601 TXCS_ENABLE);
602
603}
604
605__always_inline static void
606jme_disable_tx_engine(struct jme_adapter *jme)
607{
608 int i;
609 __u32 val;
610
611 /*
612 * Disable TX Engine
613 */
614 jwrite32(jme, JME_TXCS, jme->reg_txcs);
615
616 val = jread32(jme, JME_TXCS);
617 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
618 {
619 udelay(1);
620 val = jread32(jme, JME_TXCS);
621 }
622
623 if(!i)
624 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
625
626
627}
628
629static void
630jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
631{
632 struct jme_ring *rxring = jme->rxring;
633 register volatile struct rxdesc* rxdesc = rxring->desc;
634 struct jme_buffer_info *rxbi = rxring->bufinf;
635 rxdesc += i;
636 rxbi += i;
637
638 rxdesc->dw[0] = 0;
639 rxdesc->dw[1] = 0;
640 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
641 rxdesc->desc1.bufaddrl = cpu_to_le32(rxbi->mapping & 0xFFFFFFFF);
642 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
643 if(jme->dev->features & NETIF_F_HIGHDMA)
644 rxdesc->desc1.flags = RXFLAG_64BIT;
645 wmb();
646 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
647}
648
649static int
650jme_make_new_rx_buf(struct jme_adapter *jme, int i)
651{
652 struct jme_ring *rxring = &(jme->rxring[0]);
653 struct jme_buffer_info *rxbi = rxring->bufinf;
654 unsigned long offset;
655 struct sk_buff* skb;
656
657 skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
658 if(unlikely(!skb))
659 return -ENOMEM;
660
661 if(unlikely(skb_is_nonlinear(skb))) {
662 dprintk(jme->dev->name,
663 "Allocated skb fragged(%d).\n",
664 skb_shinfo(skb)->nr_frags);
665 dev_kfree_skb(skb);
666 return -ENOMEM;
667 }
668
669 if(unlikely(offset =
670 (unsigned long)(skb->data)
671 & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
672 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
673
674 rxbi += i;
675 rxbi->skb = skb;
676 rxbi->len = skb_tailroom(skb);
677 rxbi->mapping = pci_map_single(jme->pdev,
678 skb->data,
679 rxbi->len,
680 PCI_DMA_FROMDEVICE);
681
682 return 0;
683}
684
685static void
686jme_free_rx_buf(struct jme_adapter *jme, int i)
687{
688 struct jme_ring *rxring = &(jme->rxring[0]);
689 struct jme_buffer_info *rxbi = rxring->bufinf;
690 rxbi += i;
691
692 if(rxbi->skb) {
693 pci_unmap_single(jme->pdev,
694 rxbi->mapping,
695 rxbi->len,
696 PCI_DMA_FROMDEVICE);
697 dev_kfree_skb(rxbi->skb);
698 rxbi->skb = NULL;
699 rxbi->mapping = 0;
700 rxbi->len = 0;
701 }
702}
703
704static void
705jme_free_rx_resources(struct jme_adapter *jme)
706{
707 int i;
708 struct jme_ring *rxring = &(jme->rxring[0]);
709
710 if(rxring->alloc) {
711 for(i = 0 ; i < RING_DESC_NR ; ++i)
712 jme_free_rx_buf(jme, i);
713
714 dma_free_coherent(&(jme->pdev->dev),
715 RX_RING_ALLOC_SIZE,
716 rxring->alloc,
717 rxring->dmaalloc);
718 rxring->alloc = NULL;
719 rxring->desc = NULL;
720 rxring->dmaalloc = 0;
721 rxring->dma = 0;
722 }
723 rxring->next_to_use = 0;
724 rxring->next_to_clean = 0;
725}
726
727static int
728jme_setup_rx_resources(struct jme_adapter *jme)
729{
730 int i;
731 struct jme_ring *rxring = &(jme->rxring[0]);
732
733 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
734 RX_RING_ALLOC_SIZE,
735 &(rxring->dmaalloc),
736 GFP_KERNEL);
737 if(!rxring->alloc) {
738 rxring->desc = NULL;
739 rxring->dmaalloc = 0;
740 rxring->dma = 0;
741 return -ENOMEM;
742 }
743
744 /*
745 * 16 Bytes align
746 */
747 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
748 RING_DESC_ALIGN);
749 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
750 rxring->next_to_use = 0;
751 rxring->next_to_clean = 0;
752
753 /*
754 * Initiallize Receive Descriptors
755 */
756 for(i = 0 ; i < RING_DESC_NR ; ++i) {
757 if(unlikely(jme_make_new_rx_buf(jme, i))) {
758 jme_free_rx_resources(jme);
759 return -ENOMEM;
760 }
761
762 jme_set_clean_rxdesc(jme, i);
763 }
764
765 return 0;
766}
767
768__always_inline static void
769jme_enable_rx_engine(struct jme_adapter *jme)
770{
771 /*
772 * Setup RX DMA Bass Address
773 */
774 jwrite32(jme, JME_RXDBA_LO, jme->rxring[0].dma);
775 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
776 jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
777
778 /*
779 * Setup RX Descptor Count
780 */
781 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
782
783 /*
784 * Setup Unicast Filter
785 */
786 jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
787 jme_set_multi(jme->dev);
788
789 /*
790 * Enable RX Engine
791 */
792 wmb();
793 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
794 RXCS_QUEUESEL_Q0 |
795 RXCS_ENABLE |
796 RXCS_QST);
797}
798
799__always_inline static void
800jme_restart_rx_engine(struct jme_adapter *jme)
801{
802 /*
803 * Start RX Engine
804 */
805 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
806 RXCS_QUEUESEL_Q0 |
807 RXCS_ENABLE |
808 RXCS_QST);
809}
810
811
812__always_inline static void
813jme_disable_rx_engine(struct jme_adapter *jme)
814{
815 int i;
816 __u32 val;
817
818 /*
819 * Disable RX Engine
820 */
821 val = jread32(jme, JME_RXCS);
822 val &= ~RXCS_ENABLE;
823 jwrite32(jme, JME_RXCS, val);
824
825 val = jread32(jme, JME_RXCS);
826 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
827 {
828 udelay(1);
829 val = jread32(jme, JME_RXCS);
830 }
831
832 if(!i)
833 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
834
835}
836
837static void
838jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
839{
840 if(dpi->attempt == atmp) {
841 ++(dpi->cnt);
842 }
843 else {
844 dpi->attempt = atmp;
845 dpi->cnt = 0;
846 }
847}
848
849static void
850jme_dynamic_pcc(struct jme_adapter *jme)
851{
852 register struct dynpcc_info *dpi = &(jme->dpi);
853
854 if(jiffies >= dpi->check_point) {
855 if(jiffies > (dpi->check_point + PCC_INTERVAL)) {
856 jme_attempt_pcc(dpi, PCC_P1);
857 }
858 else {
859 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
860 PCC_P3_THRESHOLD)
861 jme_attempt_pcc(dpi, PCC_P3);
862 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
863 PCC_P2_THRESHOLD)
864 jme_attempt_pcc(dpi, PCC_P2);
865 else
866 jme_attempt_pcc(dpi, PCC_P1);
867 }
868
869 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
870 jme_set_rx_pcc(jme, dpi->attempt);
871 dpi->cur = dpi->attempt;
872 dpi->cnt = 0;
873 }
874
875 dpi->last_bytes = NET_STAT(jme).rx_bytes;
876 dpi->last_pkts = NET_STAT(jme).rx_packets;
877 dpi->check_point = jiffies + PCC_INTERVAL;
878 }
879}
880
881static void
882jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
883{
884 struct jme_ring *rxring = &(jme->rxring[0]);
885 volatile struct rxdesc *rxdesc = rxring->desc;
886 struct jme_buffer_info *rxbi = rxring->bufinf;
887 struct sk_buff *skb;
888 int framesize;
889
890 rxdesc += idx;
891 rxbi += idx;
892
893 skb = rxbi->skb;
894 pci_dma_sync_single_for_cpu(jme->pdev,
895 rxbi->mapping,
896 rxbi->len,
897 PCI_DMA_FROMDEVICE);
898
899 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
900 pci_dma_sync_single_for_device(jme->pdev,
901 rxbi->mapping,
902 rxbi->len,
903 PCI_DMA_FROMDEVICE);
904
905 ++(NET_STAT(jme).rx_dropped);
906 }
907 else {
908 framesize = le16_to_cpu(rxdesc->descwb.framesize)
909 - RX_PREPAD_SIZE;
910
911 skb_reserve(skb, RX_PREPAD_SIZE);
912 skb_put(skb, framesize);
913 skb->protocol = eth_type_trans(skb, jme->dev);
914
915 netif_rx(skb);
916
917 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
918 ++(NET_STAT(jme).multicast);
919
920 jme->dev->last_rx = jiffies;
921 NET_STAT(jme).rx_bytes += framesize;
922 ++(NET_STAT(jme).rx_packets);
923 }
924
925 jme_set_clean_rxdesc(jme, idx);
926
927}
928
929static int
930jme_process_receive(struct jme_adapter *jme, int limit)
931{
932 struct jme_ring *rxring = &(jme->rxring[0]);
933 volatile struct rxdesc *rxdesc = rxring->desc;
934 int i, j, ccnt, desccnt;
935
936 i = rxring->next_to_clean;
937 while( limit-- > 0 )
938 {
939 rxdesc = rxring->desc;
940 rxdesc += i;
941
942 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
943 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
944 goto out;
945
946 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
947
948 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
949
950 if(desccnt > 1 ||
951 rxdesc->descwb.errstat & RXWBERR_ALLERR) {
952
953 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
954 ++(NET_STAT(jme).rx_crc_errors);
955 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
956 ++(NET_STAT(jme).rx_fifo_errors);
957 else
958 ++(NET_STAT(jme).rx_errors);
959
960 if(desccnt > 1)
961 limit -= desccnt - 1;
962
963 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
964 jme_set_clean_rxdesc(jme, j);
965
966 if(unlikely(++j == RING_DESC_NR))
967 j = 0;
968 }
969
970 }
971 else {
972 jme_alloc_and_feed_skb(jme, i);
973 }
974
975
976 if((i += desccnt) >= RING_DESC_NR)
977 i -= RING_DESC_NR;
978 }
979
980out:
981 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
982 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
983 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
984 >> 4);
985
986 jme_dynamic_pcc(jme);
987 rxring->next_to_clean = i;
988
989 return limit > 0 ? limit : 0;
990
991}
992
993static void
994jme_link_change_tasklet(unsigned long arg)
995{
996 struct jme_adapter *jme = (struct jme_adapter*)arg;
997 jme_check_link(jme->dev);
998}
999
1000static void
1001jme_rx_clean_tasklet(unsigned long arg)
1002{
1003 struct jme_adapter *jme = (struct jme_adapter*)arg;
1004
1005 spin_lock(&jme->rx_lock);
1006 jme_process_receive(jme, RING_DESC_NR);
1007 spin_unlock(&jme->rx_lock);
1008 if(jme->flags & JME_FLAG_RXQ0_EMPTY) {
1009 jme->flags &= ~JME_FLAG_RXQ0_EMPTY;
1010 jme_restart_rx_engine(jme);
1011 }
1012}
1013
1014static void
1015jme_tx_clean_tasklet(unsigned long arg)
1016{
1017 struct jme_adapter *jme = (struct jme_adapter*)arg;
1018 struct jme_ring *txring = &(jme->txring[0]);
1019 volatile struct txdesc *txdesc = txring->desc;
1020 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1021 int i, j, cnt = 0, max;
1022
1023 spin_lock(&jme->tx_lock);
1024 max = RING_DESC_NR - txring->nr_free;
1025 spin_unlock(&jme->tx_lock);
1026
1027 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1028
1029 for(i = txring->next_to_clean ; cnt < max ; ) {
1030
1031 ctxbi = txbi + i;
1032
1033 if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
1034
1035 tx_dbg(jme->dev->name,
1036 "Tx Tasklet: Clean %d+%d\n",
1037 i, ctxbi->nr_desc);
1038
1039 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1040 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1041 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1042
1043 pci_unmap_single(jme->pdev,
1044 ttxbi->mapping,
1045 ttxbi->len,
1046 PCI_DMA_TODEVICE);
1047
1048 NET_STAT(jme).tx_bytes += ttxbi->len;
1049 ttxbi->mapping = 0;
1050 ttxbi->len = 0;
1051 }
1052
1053 dev_kfree_skb(ctxbi->skb);
1054 ctxbi->skb = NULL;
1055
1056 cnt += ctxbi->nr_desc;
1057
1058 ++(NET_STAT(jme).tx_packets);
1059 }
1060 else {
1061 if(!ctxbi->skb)
1062 tx_dbg(jme->dev->name,
1063 "Tx Tasklet:"
1064 " Stoped due to no skb.\n");
1065 else
1066 tx_dbg(jme->dev->name,
1067 "Tx Tasklet:"
1068 "Stoped due to not done.\n");
1069 break;
1070 }
1071
1072 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1073 i -= RING_DESC_NR;
1074
1075 ctxbi->nr_desc = 0;
1076 }
1077
1078 tx_dbg(jme->dev->name,
1079 "Tx Tasklet: Stop %d Jiffies %lu\n",
1080 i, jiffies);
1081 txring->next_to_clean = i;
1082
1083 spin_lock(&jme->tx_lock);
1084 txring->nr_free += cnt;
1085 spin_unlock(&jme->tx_lock);
1086
1087}
1088
1089static irqreturn_t
1090jme_intr(int irq, void *dev_id)
1091{
1092 struct net_device *netdev = dev_id;
1093 struct jme_adapter *jme = netdev_priv(netdev);
1094 irqreturn_t rc = IRQ_HANDLED;
1095 __u32 intrstat;
1096
1097#if USE_IEVE_SHADOW
1098 pci_dma_sync_single_for_cpu(jme->pdev,
1099 jme->shadow_dma,
1100 sizeof(__u32) * SHADOW_REG_NR,
1101 PCI_DMA_FROMDEVICE);
1102 intrstat = jme->shadow_regs[SHADOW_IEVE];
1103 jme->shadow_regs[SHADOW_IEVE] = 0;
1104#else
1105 intrstat = jread32(jme, JME_IEVE);
1106#endif
1107
1108 /*
1109 * Check if it's really an interrupt for us
1110 */
1111 if(intrstat == 0) {
1112 rc = IRQ_NONE;
1113 goto out;
1114 }
1115
1116 /*
1117 * Check if the device still exist
1118 */
1119 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1120 rc = IRQ_NONE;
1121 goto out;
1122 }
1123
1124 /*
1125 * Allow one interrupt handling at a time
1126 */
1127 if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1128 goto out;
1129
1130 /*
1131 * Disable interrupt
1132 */
1133 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1134
1135 if(intrstat & INTR_LINKCH)
1136 tasklet_schedule(&jme->linkch_task);
1137
1138 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP)) {
1139 if(intrstat & INTR_RX0EMP) {
1140 jme->flags |= JME_FLAG_RXQ0_EMPTY;
1141 jeprintk(netdev->name, "Ranout of Receive Queue 0.\n");
1142 }
1143
1144 tasklet_schedule(&jme->rxclean_task);
1145 }
1146
1147 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1148 tasklet_schedule(&jme->txclean_task);
1149
1150 if((intrstat & ~INTR_ENABLE) != 0) {
1151 /*
1152 * Some interrupt not handled
1153 * but not enabled also (for debug)
1154 */
1155 }
1156
1157 /*
1158 * Deassert interrupts
1159 */
1160 jwrite32f(jme, JME_IEVE, intrstat);
1161
1162 /*
1163 * Enable next interrupt handling
1164 */
1165 atomic_set(&jme->intr_sem, 1);
1166
1167 /*
1168 * Re-enable interrupt
1169 */
1170 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1171
1172out:
1173 return rc;
1174}
1175
1176static int
1177jme_open(struct net_device *netdev)
1178{
1179 struct jme_adapter *jme = netdev_priv(netdev);
1180 int rc;
1181
1182 rc = request_irq(jme->pdev->irq, jme_intr,
1183 IRQF_SHARED, netdev->name, netdev);
1184 if(rc) {
1185 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1186 goto err_out;
1187 }
1188
1189 rc = jme_setup_rx_resources(jme);
1190 if(rc) {
1191 printk(KERN_ERR PFX "Allocating resources for RX error.\n");
1192 goto err_out_free_irq;
1193 }
1194
1195
1196 rc = jme_setup_tx_resources(jme);
1197 if(rc) {
1198 printk(KERN_ERR PFX "Allocating resources for TX error.\n");
1199 goto err_out_free_rx_resources;
1200 }
1201
1202 jme_reset_mac_processor(jme);
1203 jme_check_link(netdev);
1204 jme_enable_shadow(jme);
1205 jme_start_irq(jme);
1206 jme_enable_rx_engine(jme);
1207 jme_enable_tx_engine(jme);
1208 netif_start_queue(netdev);
1209
1210 return 0;
1211
1212err_out_free_rx_resources:
1213 jme_free_rx_resources(jme);
1214err_out_free_irq:
1215 free_irq(jme->pdev->irq, jme->dev);
1216err_out:
1217 netif_stop_queue(netdev);
1218 netif_carrier_off(netdev);
1219 return rc;
1220}
1221
1222static int
1223jme_close(struct net_device *netdev)
1224{
1225 struct jme_adapter *jme = netdev_priv(netdev);
1226
1227 netif_stop_queue(netdev);
1228 netif_carrier_off(netdev);
1229
1230 jme_stop_irq(jme);
1231 jme_disable_shadow(jme);
1232 free_irq(jme->pdev->irq, jme->dev);
1233
1234 tasklet_kill(&jme->linkch_task);
1235 tasklet_kill(&jme->txclean_task);
1236 tasklet_kill(&jme->rxclean_task);
1237 jme_disable_rx_engine(jme);
1238 jme_disable_tx_engine(jme);
1239 jme_free_rx_resources(jme);
1240 jme_free_tx_resources(jme);
1241
1242 return 0;
1243}
1244
1245/*
1246 * This function is already protected by netif_tx_lock()
1247 */
1248static int
1249jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1250{
1251 struct jme_adapter *jme = netdev_priv(netdev);
1252 int rc;
1253
1254 rc = jme_set_new_txdesc(jme, skb);
1255
1256 if(unlikely(rc != NETDEV_TX_OK))
1257 return rc;
1258
1259 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1260 TXCS_SELECT_QUEUE0 |
1261 TXCS_QUEUE0S |
1262 TXCS_ENABLE);
1263 netdev->trans_start = jiffies;
1264
1265 return NETDEV_TX_OK;
1266}
1267
1268static int
1269jme_set_macaddr(struct net_device *netdev, void *p)
1270{
1271 struct jme_adapter *jme = netdev_priv(netdev);
1272 struct sockaddr *addr = p;
1273 __u32 val;
1274
1275 if(netif_running(netdev))
1276 return -EBUSY;
1277
1278 spin_lock(&jme->phy_lock);
1279 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1280
1281 val = addr->sa_data[3] << 24 |
1282 addr->sa_data[2] << 16 |
1283 addr->sa_data[1] << 8 |
1284 addr->sa_data[0];
1285 jwrite32(jme, JME_RXUMA_LO, val);
1286 val = addr->sa_data[5] << 8 |
1287 addr->sa_data[4];
1288 jwrite32(jme, JME_RXUMA_HI, val);
1289 spin_unlock(&jme->phy_lock);
1290
1291 return 0;
1292}
1293
1294static void
1295jme_set_multi(struct net_device *netdev)
1296{
1297 struct jme_adapter *jme = netdev_priv(netdev);
1298 u32 mc_hash[2] = {};
1299 __u32 val;
1300 int i;
1301
1302 spin_lock(&jme->phy_lock);
1303 val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1304
1305 if (netdev->flags & IFF_PROMISC) {
1306 val |= RXMCS_ALLFRAME;
1307 }
1308 else if (netdev->flags & IFF_ALLMULTI) {
1309 val |= RXMCS_ALLMULFRAME;
1310 }
1311 else if(netdev->flags & IFF_MULTICAST) {
1312 struct dev_mc_list *mclist;
1313 int bit_nr;
1314
1315 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1316 for (i = 0, mclist = netdev->mc_list;
1317 mclist && i < netdev->mc_count;
1318 ++i, mclist = mclist->next) {
1319
1320 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1321 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1322 }
1323
1324 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1325 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1326 }
1327
1328
1329 wmb();
1330 jwrite32(jme, JME_RXMCS, val);
1331 spin_unlock(&jme->phy_lock);
1332}
1333
1334static int
1335jme_change_mtu(struct net_device *dev, int new_mtu)
1336{
1337 /*
1338 * Not supporting MTU change for now.
1339 */
1340 return -EINVAL;
1341}
1342
1343static void
1344jme_get_drvinfo(struct net_device *netdev,
1345 struct ethtool_drvinfo *info)
1346{
1347 struct jme_adapter *jme = netdev_priv(netdev);
1348
1349 strcpy(info->driver, DRV_NAME);
1350 strcpy(info->version, DRV_VERSION);
1351 strcpy(info->bus_info, pci_name(jme->pdev));
1352}
1353
1354static int
1355jme_get_settings(struct net_device *netdev,
1356 struct ethtool_cmd *ecmd)
1357{
1358 struct jme_adapter *jme = netdev_priv(netdev);
1359 int rc;
1360 spin_lock(&jme->phy_lock);
1361 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1362 spin_unlock(&jme->phy_lock);
1363 return rc;
1364}
1365
1366static int
1367jme_set_settings(struct net_device *netdev,
1368 struct ethtool_cmd *ecmd)
1369{
1370 struct jme_adapter *jme = netdev_priv(netdev);
1371 int rc;
1372 spin_lock(&jme->phy_lock);
1373 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1374 spin_unlock(&jme->phy_lock);
1375 return rc;
1376}
1377
1378static __u32
1379jme_get_link(struct net_device *netdev)
1380{
1381 struct jme_adapter *jme = netdev_priv(netdev);
1382 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1383}
1384
1385static const struct ethtool_ops jme_ethtool_ops = {
1386 .get_drvinfo = jme_get_drvinfo,
1387 .get_settings = jme_get_settings,
1388 .set_settings = jme_set_settings,
1389 .get_link = jme_get_link,
1390};
1391
1392static int
1393jme_pci_dma64(struct pci_dev *pdev)
1394{
1395 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1396 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1397 return 1;
1398
1399 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1400 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1401 return 0;
1402
1403 return -1;
1404}
1405
1406static int __devinit
1407jme_init_one(struct pci_dev *pdev,
1408 const struct pci_device_id *ent)
1409{
1410 int rc = 0, using_dac;
1411 struct net_device *netdev;
1412 struct jme_adapter *jme;
1413
1414 /*
1415 * set up PCI device basics
1416 */
1417 rc = pci_enable_device(pdev);
1418 if(rc) {
1419 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1420 goto err_out;
1421 }
1422
1423 using_dac = jme_pci_dma64(pdev);
1424 if(using_dac < 0) {
1425 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
1426 rc = -EIO;
1427 goto err_out_disable_pdev;
1428 }
1429
1430 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1431 printk(KERN_ERR PFX "No PCI resource region found.\n");
1432 rc = -ENOMEM;
1433 goto err_out_disable_pdev;
1434 }
1435
1436 rc = pci_request_regions(pdev, DRV_NAME);
1437 if(rc) {
1438 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1439 goto err_out_disable_pdev;
1440 }
1441
1442 pci_set_master(pdev);
1443
1444 /*
1445 * alloc and init net device
1446 */
1447 netdev = alloc_etherdev(sizeof(*jme));
1448 if(!netdev) {
1449 rc = -ENOMEM;
1450 goto err_out_release_regions;
1451 }
1452 netdev->open = jme_open;
1453 netdev->stop = jme_close;
1454 netdev->hard_start_xmit = jme_start_xmit;
1455 netdev->irq = pdev->irq;
1456 netdev->set_mac_address = jme_set_macaddr;
1457 netdev->set_multicast_list = jme_set_multi;
1458 netdev->change_mtu = jme_change_mtu;
1459 netdev->ethtool_ops = &jme_ethtool_ops;
1460 NETDEV_GET_STATS(netdev, &jme_get_stats);
1461
1462 if(using_dac)
1463 netdev->features = NETIF_F_HIGHDMA;
1464
1465 SET_NETDEV_DEV(netdev, &pdev->dev);
1466 pci_set_drvdata(pdev, netdev);
1467
1468 /*
1469 * init adapter info
1470 */
1471 jme = netdev_priv(netdev);
1472 jme->pdev = pdev;
1473 jme->dev = netdev;
1474 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
1475 jme->regs = ioremap(pci_resource_start(pdev, 0),
1476 pci_resource_len(pdev, 0));
1477 if (!(jme->regs)) {
1478 rc = -ENOMEM;
1479 goto err_out_free_netdev;
1480 }
1481 jme->shadow_regs = pci_alloc_consistent(pdev,
1482 sizeof(__u32) * SHADOW_REG_NR,
1483 &(jme->shadow_dma));
1484 if (!(jme->shadow_regs)) {
1485 rc = -ENOMEM;
1486 goto err_out_unmap;
1487 }
1488
1489 spin_lock_init(&jme->rx_lock);
1490 spin_lock_init(&jme->tx_lock);
1491 spin_lock_init(&jme->phy_lock);
1492 tasklet_init(&jme->linkch_task,
1493 &jme_link_change_tasklet,
1494 (unsigned long) jme);
1495 tasklet_init(&jme->txclean_task,
1496 &jme_tx_clean_tasklet,
1497 (unsigned long) jme);
1498 tasklet_init(&jme->rxclean_task,
1499 &jme_rx_clean_tasklet,
1500 (unsigned long) jme);
1501 jme->mii_if.dev = netdev;
1502 jme->mii_if.phy_id = 1;
1503 jme->mii_if.supports_gmii = 1;
1504 jme->mii_if.mdio_read = jme_mdio_read;
1505 jme->mii_if.mdio_write = jme_mdio_write;
1506
1507 /*
1508 * Reset MAC processor and reload EEPROM for MAC Address
1509 */
1510 jme_clear_pm(jme);
1511 jme_reset_phy_processor(jme);
1512 jme_reset_mac_processor(jme);
1513 rc = jme_reload_eeprom(jme);
1514 if(rc) {
1515 printk(KERN_ERR PFX
1516 "Rload eeprom for reading MAC Address error.\n");
1517 goto err_out_free_shadow;
1518 }
1519 jme_load_macaddr(netdev);
1520
1521
1522 /*
1523 * Tell stack that we are not ready to work until open()
1524 */
1525 netif_carrier_off(netdev);
1526 netif_stop_queue(netdev);
1527
1528 /*
1529 * Register netdev
1530 */
1531 rc = register_netdev(netdev);
1532 if(rc) {
1533 printk(KERN_ERR PFX "Cannot register net device.\n");
1534 goto err_out_free_shadow;
1535 }
1536
1537 jprintk(netdev->name,
1538 "JMC250 gigabit eth at %llx, "
1539 "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
1540 (unsigned long long) pci_resource_start(pdev, 0),
1541 netdev->dev_addr[0],
1542 netdev->dev_addr[1],
1543 netdev->dev_addr[2],
1544 netdev->dev_addr[3],
1545 netdev->dev_addr[4],
1546 netdev->dev_addr[5],
1547 pdev->irq);
1548
1549 return 0;
1550
1551err_out_free_shadow:
1552 pci_free_consistent(pdev,
1553 sizeof(__u32) * SHADOW_REG_NR,
1554 jme->shadow_regs,
1555 jme->shadow_dma);
1556err_out_unmap:
1557 iounmap(jme->regs);
1558err_out_free_netdev:
1559 pci_set_drvdata(pdev, NULL);
1560 free_netdev(netdev);
1561err_out_release_regions:
1562 pci_release_regions(pdev);
1563err_out_disable_pdev:
1564 pci_disable_device(pdev);
1565err_out:
1566 return rc;
1567}
1568
1569static void __devexit
1570jme_remove_one(struct pci_dev *pdev)
1571{
1572 struct net_device *netdev = pci_get_drvdata(pdev);
1573 struct jme_adapter *jme = netdev_priv(netdev);
1574
1575 unregister_netdev(netdev);
1576 pci_free_consistent(pdev,
1577 sizeof(__u32) * SHADOW_REG_NR,
1578 jme->shadow_regs,
1579 jme->shadow_dma);
1580 iounmap(jme->regs);
1581 pci_set_drvdata(pdev, NULL);
1582 free_netdev(netdev);
1583 pci_release_regions(pdev);
1584 pci_disable_device(pdev);
1585
1586}
1587
1588static struct pci_device_id jme_pci_tbl[] = {
1589 { PCI_VDEVICE(JMICRON, 0x250) },
1590 { }
1591};
1592
1593static struct pci_driver jme_driver = {
1594 .name = DRV_NAME,
1595 .id_table = jme_pci_tbl,
1596 .probe = jme_init_one,
1597 .remove = __devexit_p(jme_remove_one),
1598#if 0
1599#ifdef CONFIG_PM
1600 .suspend = jme_suspend,
1601 .resume = jme_resume,
1602#endif /* CONFIG_PM */
1603#endif
1604};
1605
1606static int __init
1607jme_init_module(void)
1608{
1609 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
1610 "driver version %s\n", DRV_VERSION);
1611 return pci_register_driver(&jme_driver);
1612}
1613
1614static void __exit
1615jme_cleanup_module(void)
1616{
1617 pci_unregister_driver(&jme_driver);
1618}
1619
1620module_init(jme_init_module);
1621module_exit(jme_cleanup_module);
1622
1623MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
1624MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1625MODULE_LICENSE("GPL");
1626MODULE_VERSION(DRV_VERSION);
1627MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
1628
1629