]> bbs.cooldavid.org Git - jme.git/blame - jme.c
Import jme 0.2 source
[jme.git] / jme.c
CommitLineData
d7699f87
GFT
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 */
21
22/*
4330c2f2
GFT
23 * Timeline before release:
24 * Stage 1: Basic Performance / Capbility fine tune.
25 * - Implement PCC -- Dynamic adjustment.
26 * - Use NAPI instead of rx_tasklet?
27 * PCC Support Both Packet Counter and Timeout Interrupt for
28 * receive and transmit complete, does NAPI really needed?
29 * I'll add NAPI support anyway..
30 * For CPU busy and heavy network loading system..
31 * - Try setting 64bit DMA with pci_set[_consistent]_dma_mask
d7699f87 32 * and set netdev feature flag.
4330c2f2
GFT
33 * (Need to modity transmit descriptor filling policy as well)
34 * - Use pci_map_page instead of pci_map_single for HIGHMEM support
35 *
36 * Stage 2: Error handling.
37 * - Wathch dog
38 * - Transmit timeout
39 *
40 * Stage 3: Basic offloading support.
41 * - Implement scatter-gather offloading.
42 * A system page per RX (buffer|descriptor)?
43 * Handle fraged sk_buff to TX descriptors.
44 * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
45 *
46 * Stage 4: Basic feature support.
47 * - Implement Power Managemt related functions.
48 * - Implement Jumboframe.
49 * - Implement MSI.
50 *
51 * Stage 5: Advanced offloading support.
52 * - Implement VLAN offloading.
53 * - Implement TCP Segement offloading.
54 *
55 * Stage 6: CPU Load balancing.
56 * - Implement MSI-X.
57 * Along with multiple RX queue, for CPU load balancing.
58 * - Use Multiple TX Queue for Multiple CPU Transmit
59 * Simultaneously Without Lock.
60 *
61 * Stage 7:
62 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
63 * - Test and Release 1.0
d7699f87
GFT
64 */
65
4330c2f2 66#include <linux/version.h>
d7699f87
GFT
67#include <linux/module.h>
68#include <linux/kernel.h>
69#include <linux/pci.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/ethtool.h>
73#include <linux/mii.h>
74#include <linux/crc32.h>
4330c2f2 75#include <linux/delay.h>
d7699f87
GFT
76#include "jme.h"
77
4330c2f2
GFT
78#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
79static struct net_device_stats *jme_get_stats(struct net_device *netdev)
80{
81 struct jme_adapter *jme = netdev_priv(netdev);
82 return &jme->stats;
83}
84#endif
85
d7699f87
GFT
86static int jme_mdio_read(struct net_device *netdev, int phy, int reg)
87{
88 struct jme_adapter *jme = netdev_priv(netdev);
89 int i, val;
90
91 jwrite32(jme, JME_SMI, SMI_OP_REQ |
92 smi_phy_addr(phy) |
93 smi_reg_addr(reg));
94
95 wmb();
96 for (i = JME_PHY_TIMEOUT; i > 0; --i) {
97 udelay(1);
98 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
99 break;
100 }
101
102 if (i == 0) {
4330c2f2 103 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
d7699f87
GFT
104 return (0);
105 }
106
107 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
108}
109
110static void jme_mdio_write(struct net_device *netdev, int phy, int reg, int val)
111{
112 struct jme_adapter *jme = netdev_priv(netdev);
113 int i;
114
115 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
116 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
117 smi_phy_addr(phy) | smi_reg_addr(reg));
118
119 wmb();
120 for (i = JME_PHY_TIMEOUT; i > 0; --i)
121 {
122 udelay(1);
123 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
124 break;
125 }
126
127 if (i == 0)
4330c2f2 128 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
d7699f87
GFT
129
130 return;
131}
132
133static void jme_reset_mac_processor(struct jme_adapter *jme)
134{
135 __u32 val;
136
137 val = jread32(jme, JME_GHC);
138 val |= GHC_SWRST;
139 jwrite32(jme, JME_GHC, val);
140 udelay(2);
141 val &= ~GHC_SWRST;
142 jwrite32(jme, JME_GHC, val);
4330c2f2
GFT
143 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
144 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
d7699f87
GFT
145 jwrite32(jme, JME_WFODP, 0);
146 jwrite32(jme, JME_WFOI, 0);
4330c2f2
GFT
147 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
148 jwrite32(jme, JME_GPREG1, 0);
d7699f87
GFT
149}
150
151__always_inline static void jme_clear_pm(struct jme_adapter *jme)
152{
153 jwrite32(jme, JME_PMCS, 0xFFFF0000);
4330c2f2 154 pci_set_power_state(jme->pdev, PCI_D0);
d7699f87
GFT
155}
156
157static int jme_reload_eeprom(struct jme_adapter *jme)
158{
159 __u32 val;
160 int i;
161
162 val = jread32(jme, JME_SMBCSR);
163
164 if(val & SMBCSR_EEPROMD)
165 {
166 val |= SMBCSR_CNACK;
167 jwrite32(jme, JME_SMBCSR, val);
168 val |= SMBCSR_RELOAD;
169 jwrite32(jme, JME_SMBCSR, val);
170 mdelay(12);
171
172 for (i = JME_SMB_TIMEOUT; i > 0; --i)
173 {
174 mdelay(1);
175 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
176 break;
177 }
178
179 if(i == 0) {
4330c2f2 180 jeprintk(jme->dev->name, "eeprom reload timeout\n");
d7699f87
GFT
181 return -EIO;
182 }
183 }
184 else
185 return -EIO;
186
187 return 0;
188}
189
190__always_inline static void jme_load_macaddr(struct net_device *netdev)
191{
192 struct jme_adapter *jme = netdev_priv(netdev);
193 unsigned char macaddr[6];
194 __u32 val;
195
4330c2f2 196 val = jread32(jme, JME_RXUMA_LO);
d7699f87
GFT
197 macaddr[0] = (val >> 0) & 0xFF;
198 macaddr[1] = (val >> 8) & 0xFF;
199 macaddr[2] = (val >> 16) & 0xFF;
200 macaddr[3] = (val >> 24) & 0xFF;
4330c2f2 201 val = jread32(jme, JME_RXUMA_HI);
d7699f87
GFT
202 macaddr[4] = (val >> 0) & 0xFF;
203 macaddr[5] = (val >> 8) & 0xFF;
204 memcpy(netdev->dev_addr, macaddr, 6);
205}
206
207__always_inline static void jme_start_irq(struct jme_adapter *jme)
208{
209 /*
210 * Enable Interrupts
211 */
212 jwrite32(jme, JME_IENS, INTR_ENABLE);
213}
214
215__always_inline static void jme_stop_irq(struct jme_adapter *jme)
216{
217 /*
218 * Disable Interrupts
219 */
220 jwrite32(jme, JME_IENC, INTR_ENABLE);
221}
222
4330c2f2
GFT
223
224__always_inline static void jme_enable_shadow(struct jme_adapter *jme)
225{
226 jwrite32(jme,
227 JME_SHBA_LO,
228 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
229}
230
231__always_inline static void jme_disable_shadow(struct jme_adapter *jme)
232{
233 jwrite32(jme, JME_SHBA_LO, 0x0);
234}
235
236static void jme_check_link(struct net_device *netdev)
d7699f87
GFT
237{
238 struct jme_adapter *jme = netdev_priv(netdev);
239 __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
240 char linkmsg[32];
241
242 phylink = jread32(jme, JME_PHY_LINK);
243
244 if (phylink & PHY_LINK_UP) {
245 /*
246 * Keep polling for autoneg complete
247 */
248 while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
249 mdelay(1);
250 phylink = jread32(jme, JME_PHY_LINK);
251 }
252
253 if(!cnt)
4330c2f2 254 jeprintk(netdev->name, "Waiting autoneg timeout.\n");
d7699f87
GFT
255
256 switch(phylink & PHY_LINK_SPEED_MASK) {
257 case PHY_LINK_SPEED_10M:
258 ghc = GHC_SPEED_10M;
259 strcpy(linkmsg, "10 Mbps, ");
260 break;
261 case PHY_LINK_SPEED_100M:
262 ghc = GHC_SPEED_100M;
263 strcpy(linkmsg, "100 Mbps, ");
264 break;
265 case PHY_LINK_SPEED_1000M:
266 ghc = GHC_SPEED_1000M;
267 strcpy(linkmsg, "1000 Mbps, ");
268 break;
269 default:
270 ghc = 0;
271 break;
272 }
273 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
274 jwrite32(jme, JME_GHC, ghc);
275 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
276 "Full-Duplex" :
277 "Half-Duplex");
278
279 if(phylink & PHY_LINK_DUPLEX)
280 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
281 else
282 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
283 TXMCS_BACKOFF |
284 TXMCS_CARRIERSENSE |
285 TXMCS_COLLISION);
286
4330c2f2 287 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
d7699f87
GFT
288 netif_carrier_on(netdev);
289 }
290 else {
4330c2f2 291 jprintk(netdev->name, "Link is down.\n");
d7699f87
GFT
292 netif_carrier_off(netdev);
293 }
294}
295
4330c2f2
GFT
296static void jme_link_change_tasklet(unsigned long arg)
297{
298 struct jme_adapter *jme = (struct jme_adapter*)arg;
299 jme_check_link(jme->dev);
300}
301
302static void jme_set_new_txdesc(struct jme_adapter *jme,
303 int i, struct sk_buff *skb)
d7699f87
GFT
304{
305 struct jme_ring *txring = jme->txring;
4330c2f2
GFT
306 register struct TxDesc* txdesc = txring->desc;
307 struct jme_buffer_info *txbi = txring->bufinf;
308 dma_addr_t dmaaddr;
309
310 txdesc += i;
311 txbi += i;
312
313 dmaaddr = pci_map_single(jme->pdev,
314 skb->data,
315 skb->len,
316 PCI_DMA_TODEVICE);
317
318 pci_dma_sync_single_for_device(jme->pdev,
319 dmaaddr,
320 skb->len,
321 PCI_DMA_TODEVICE);
d7699f87 322
4330c2f2
GFT
323 txdesc->dw[0] = 0;
324 txdesc->dw[1] = 0;
325 txdesc->dw[2] = 0;
326 txdesc->desc1.bufaddr = cpu_to_le32(dmaaddr);
327 txdesc->desc1.datalen = cpu_to_le16(skb->len);
328 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
d7699f87
GFT
329 /*
330 * Set OWN bit at final.
331 * When kernel transmit faster than NIC last packet sent,
4330c2f2 332 * and NIC trying to send this descriptor before we tell
d7699f87
GFT
333 * it to start sending this TX queue.
334 * Other fields are already filled correctly.
335 */
336 wmb();
4330c2f2
GFT
337 txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
338 txbi->skb = skb;
339 txbi->mapping = dmaaddr;
340 txbi->len = skb->len;
341
342#ifdef TX_QUEUE_DEBUG
343 dprintk(jme->dev->name, "TX Ring Buf Address(%08x,%08x,%d).\n",
344 dmaaddr,
345 (txdesc->all[12] << 0) |
346 (txdesc->all[13] << 8) |
347 (txdesc->all[14] << 16) |
348 (txdesc->all[15] << 24),
349 (txdesc->all[4] << 0) |
350 (txdesc->all[5] << 8));
351#endif
d7699f87
GFT
352
353}
354
355
4330c2f2 356static int jme_setup_tx_resources(struct jme_adapter *jme)
d7699f87 357{
d7699f87
GFT
358 struct jme_ring *txring = &(jme->txring[0]);
359
360 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
361 TX_RING_ALLOC_SIZE,
362 &(txring->dmaalloc),
363 GFP_KERNEL);
4330c2f2
GFT
364 if(!txring->alloc) {
365 txring->desc = NULL;
366 txring->dmaalloc = 0;
367 txring->dma = 0;
d7699f87 368 return -ENOMEM;
4330c2f2 369 }
d7699f87
GFT
370
371 /*
372 * 16 Bytes align
373 */
4330c2f2
GFT
374 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc), RING_DESC_ALIGN);
375 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
376 txring->next_to_use = 0;
377 txring->next_to_clean = 0;
378
4330c2f2
GFT
379#ifdef TX_QUEUE_DEBUG
380 dprintk(jme->dev->name, "TX Ring Base Address(%08x,%08x).\n",
d7699f87
GFT
381 (__u32)txring->desc,
382 txring->dma);
4330c2f2 383#endif
d7699f87
GFT
384
385 /*
386 * Initiallize Transmit Descriptors
387 */
388 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
4330c2f2 389 memset(txring->bufinf, 0, sizeof(struct jme_buffer_info) * RING_DESC_NR);
d7699f87
GFT
390
391 return 0;
392}
393
4330c2f2 394static void jme_free_tx_resources(struct jme_adapter *jme)
d7699f87
GFT
395{
396 int i;
397 struct jme_ring *txring = &(jme->txring[0]);
4330c2f2 398 struct jme_buffer_info *txbi = txring->bufinf;
d7699f87
GFT
399
400 if(txring->alloc) {
4330c2f2
GFT
401 for(i=0;i<RING_DESC_NR;++i) {
402 txbi = txring->bufinf + i;
403 if(txbi->skb) {
404 dev_kfree_skb(txbi->skb);
405 txbi->skb = NULL;
406 txbi->mapping = 0;
407 txbi->len = 0;
d7699f87
GFT
408 }
409 }
410
411 dma_free_coherent(&(jme->pdev->dev),
412 TX_RING_ALLOC_SIZE,
413 txring->alloc,
414 txring->dmaalloc);
415 txring->alloc = NULL;
416 txring->desc = NULL;
417 txring->dmaalloc = 0;
418 txring->dma = 0;
419 }
420 txring->next_to_use = 0;
421 txring->next_to_clean = 0;
422
423}
424
425__always_inline static void jme_enable_tx_engine(struct jme_adapter *jme)
426{
4330c2f2
GFT
427 __u8 mrrs;
428
d7699f87
GFT
429 /*
430 * Select Queue 0
431 */
432 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
433
434 /*
435 * Setup TX Queue 0 DMA Bass Address
436 */
437 jwrite32(jme, JME_TXDBA, jme->txring[0].dma);
438 jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
439
440 /*
441 * Setup TX Descptor Count
442 */
443 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
444
4330c2f2
GFT
445 /*
446 * Get Max Read Req Size from PCI Config Space
447 */
448 pci_read_config_byte(jme->pdev, PCI_CONF_DCSR_MRRS, &mrrs);
449 switch(mrrs) {
450 case MRRS_128B:
451 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
452 break;
453 case MRRS_256B:
454 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
455 break;
456 default:
457 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
458 break;
459 };
460
d7699f87
GFT
461 /*
462 * Enable TX Engine
463 */
464 wmb();
4330c2f2
GFT
465 jwrite32(jme, JME_TXCS, jme->reg_txcs |
466 TXCS_SELECT_QUEUE0 |
467 TXCS_ENABLE);
d7699f87
GFT
468
469}
470
471__always_inline static void jme_disable_tx_engine(struct jme_adapter *jme)
472{
473 int i;
474 __u32 val;
475
476 /*
477 * Disable TX Engine
478 */
4330c2f2 479 jwrite32(jme, JME_TXCS, jme->reg_txcs);
d7699f87
GFT
480
481 val = jread32(jme, JME_TXCS);
482 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
483 {
484 udelay(1);
485 val = jread32(jme, JME_TXCS);
486 }
487
488 if(!i)
4330c2f2 489 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
d7699f87
GFT
490
491
492}
493
4330c2f2 494static void jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
d7699f87
GFT
495{
496 struct jme_ring *rxring = jme->rxring;
4330c2f2
GFT
497 register struct RxDesc* rxdesc = rxring->desc;
498 struct jme_buffer_info *rxbi = rxring->bufinf;
499 rxdesc += i;
500 rxbi += i;
501
502 rxdesc->dw[0] = 0;
503 rxdesc->dw[1] = 0;
504 rxdesc->desc1.bufaddrh = cpu_to_le32(((__u64)rxbi->mapping) >> 32);
505 rxdesc->desc1.bufaddrl = cpu_to_le32(rxbi->mapping);
506 rxdesc->desc1.datalen = cpu_to_le16(RX_BUF_SIZE);
d7699f87 507 wmb();
4330c2f2 508 rxdesc->desc1.flags = RXFLAG_OWN | RXFLAG_INT;
d7699f87
GFT
509
510#ifdef RX_QUEUE_DEBUG
4330c2f2
GFT
511 dprintk(jme->dev->name, "RX Ring Buf Address(%08x,%08x,%d).\n",
512 rxbi->mapping,
513 (rxdesc->all[12] << 0) |
514 (rxdesc->all[13] << 8) |
515 (rxdesc->all[14] << 16) |
516 (rxdesc->all[15] << 24),
517 (rxdesc->all[4] << 0) |
518 (rxdesc->all[5] << 8));
d7699f87
GFT
519#endif
520
521}
522
4330c2f2
GFT
523static int jme_make_new_rx_buf(struct jme_adapter *jme, int i)
524{
525 struct jme_ring *rxring = &(jme->rxring[0]);
526 struct jme_buffer_info *rxbi = rxring->bufinf;
527 unsigned long offset;
528 struct sk_buff* skb;
529
530 skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
531 if(unlikely(!skb))
532 return -ENOMEM;
533 if(unlikely(skb_shinfo(skb)->nr_frags)) {
534 dprintk(jme->dev->name, "Allocated skb fragged(%d).\n", skb_shinfo(skb)->nr_frags);
535 dev_kfree_skb(skb);
536 return -ENOMEM;
537 }
538
539
540 if(unlikely(
541 offset =
542 (unsigned long)(skb->data)
543 & (unsigned long)(RX_BUF_DMA_ALIGN - 1))) {
544 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
545 }
546
547 rxbi += i;
548 rxbi->skb = skb;
549 rxbi->mapping = pci_map_single(jme->pdev,
550 skb->data,
551 RX_BUF_SIZE,
552 PCI_DMA_FROMDEVICE);
553
554 return 0;
555}
556
557static void jme_free_rx_buf(struct jme_adapter *jme, int i)
558{
559 struct jme_ring *rxring = &(jme->rxring[0]);
560 struct jme_buffer_info *rxbi = rxring->bufinf;
561 rxbi += i;
562
563 if(rxbi->skb) {
564 pci_unmap_single(jme->pdev,
565 rxbi->mapping,
566 RX_BUF_SIZE,
567 PCI_DMA_FROMDEVICE);
568 dev_kfree_skb(rxbi->skb);
569 rxbi->skb = NULL;
570 rxbi->mapping = 0;
571 }
572}
573
574static int jme_setup_rx_resources(struct jme_adapter *jme)
d7699f87
GFT
575{
576 int i;
577 struct jme_ring *rxring = &(jme->rxring[0]);
578
579 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
580 RX_RING_ALLOC_SIZE,
581 &(rxring->dmaalloc),
582 GFP_KERNEL);
4330c2f2
GFT
583 if(!rxring->alloc) {
584 rxring->desc = NULL;
585 rxring->dmaalloc = 0;
586 rxring->dma = 0;
d7699f87 587 return -ENOMEM;
4330c2f2 588 }
d7699f87
GFT
589
590 /*
591 * 16 Bytes align
592 */
4330c2f2
GFT
593 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc), RING_DESC_ALIGN);
594 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
595 rxring->next_to_use = 0;
596 rxring->next_to_clean = 0;
597
598#ifdef RX_QUEUE_DEBUG
4330c2f2 599 dprintk(jme->dev->name, "RX Ring Base Address(%08x,%08x).\n",
d7699f87
GFT
600 (__u32)rxring->desc,
601 rxring->dma);
602#endif
603
604 /*
605 * Initiallize Receive Descriptors
606 */
607 for(i = 0 ; i < RING_DESC_NR ; ++i) {
4330c2f2 608 if(unlikely(jme_make_new_rx_buf(jme, i)))
d7699f87
GFT
609 break;
610
611 jme_set_clean_rxdesc(jme, i);
612 }
613
614 /*
615 * Cleanup allocated memories when error
616 */
617 if(i != RING_DESC_NR) {
4330c2f2
GFT
618 for(--i ; i >= 0 ; --i)
619 jme_free_rx_buf(jme, i);
620
d7699f87
GFT
621 dma_free_coherent(&(jme->pdev->dev),
622 RX_RING_ALLOC_SIZE,
623 rxring->alloc,
624 rxring->dmaalloc);
625 rxring->alloc = NULL;
626 rxring->desc = NULL;
627 rxring->dmaalloc = 0;
628 rxring->dma = 0;
629 return -ENOMEM;
630 }
631
632 return 0;
633}
634
4330c2f2 635static void jme_free_rx_resources(struct jme_adapter *jme)
d7699f87
GFT
636{
637 int i;
638 struct jme_ring *rxring = &(jme->rxring[0]);
639
640 if(rxring->alloc) {
4330c2f2
GFT
641 for(i = 0 ; i < RING_DESC_NR ; ++i)
642 jme_free_rx_buf(jme, i);
d7699f87
GFT
643
644 dma_free_coherent(&(jme->pdev->dev),
645 RX_RING_ALLOC_SIZE,
646 rxring->alloc,
647 rxring->dmaalloc);
648 rxring->alloc = NULL;
649 rxring->desc = NULL;
650 rxring->dmaalloc = 0;
651 rxring->dma = 0;
652 }
653 rxring->next_to_use = 0;
654 rxring->next_to_clean = 0;
655}
656
657__always_inline static void jme_enable_rx_engine(struct jme_adapter *jme)
658{
d7699f87
GFT
659 /*
660 * Setup RX DMA Bass Address
661 */
662 jwrite32(jme, JME_RXDBA, jme->rxring[0].dma);
663 jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
664
665 /*
666 * Setup RX Descptor Count
667 */
668 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
669
670 /*
671 * Setup Unicast Filter
672 */
673 jme_set_multi(jme->dev);
674
675 /*
676 * Enable RX Engine
677 */
4330c2f2 678
d7699f87 679 wmb();
4330c2f2
GFT
680 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
681 RXCS_QUEUESEL_Q0 |
682 RXCS_ENABLE |
683 RXCS_QST);
d7699f87
GFT
684}
685
4330c2f2
GFT
686__always_inline static void jme_restart_rx_engine(struct jme_adapter *jme)
687{
688 /*
689 * Enable RX Engine
690 */
691 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
692 RXCS_QUEUESEL_Q0 |
693 RXCS_ENABLE |
694 RXCS_QST);
695}
696
697
d7699f87
GFT
698__always_inline static void jme_disable_rx_engine(struct jme_adapter *jme)
699{
700 int i;
701 __u32 val;
702
703 /*
704 * Disable RX Engine
705 */
706 val = jread32(jme, JME_RXCS);
707 val &= ~RXCS_ENABLE;
708 jwrite32(jme, JME_RXCS, val);
709
710 val = jread32(jme, JME_RXCS);
711 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
712 {
713 udelay(1);
714 val = jread32(jme, JME_RXCS);
715 }
716
717 if(!i)
4330c2f2 718 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
d7699f87
GFT
719
720}
721
4330c2f2 722static void jme_tx_clean_tasklet(unsigned long arg)
d7699f87 723{
4330c2f2
GFT
724 struct jme_adapter *jme = (struct jme_adapter*)arg;
725 struct jme_ring *txring = &(jme->txring[0]);
726 struct TxDesc *txdesc = txring->desc;
727 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
728 struct sk_buff *skb;
729 int i, end;
730
731#ifdef TX_TASKLET_DEBUG
732 dprintk(jme->dev->name, "into tasklet\n");
733#endif
734
735 end = txring->next_to_use;
736 for(i = txring->next_to_clean ; i != end ; ) {
737 ctxbi = txbi + i;
738 skb = ctxbi->skb;
739 if(skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
740
741#ifdef TX_TASKLET_DEBUG
742 dprintk(jme->dev->name, "cleaning %d\n", i);
743#endif
744
745 pci_unmap_single(jme->pdev,
746 ctxbi->mapping,
747 skb->len,
748 PCI_DMA_TODEVICE);
749
750 dev_kfree_skb(skb);
751 prefetch(txbi + i + 1);
752 prefetch(txdesc + i + 1);
753 ctxbi->skb = NULL;
754 ctxbi->mapping = 0;
755 ctxbi->len = skb->len;
756 }
757 else {
758 break;
759 }
760
761 if(unlikely(++i == RING_DESC_NR))
762 i = 0;
763 }
764 txring->next_to_clean = i;
765
d7699f87
GFT
766}
767
4330c2f2 768static void jme_process_receive(struct jme_adapter *jme)
d7699f87 769{
4330c2f2 770 struct net_device *netdev = jme->dev;
d7699f87 771 struct jme_ring *rxring = &(jme->rxring[0]);
4330c2f2
GFT
772 struct RxDesc *rxdesc = rxring->desc;
773 struct jme_buffer_info *rxbi;
d7699f87 774 struct sk_buff *skb;
4330c2f2
GFT
775 dma_addr_t buf_dma;
776 int i, j, start, cnt, ccnt;
777 unsigned int framesize, desccnt;
d7699f87
GFT
778
779 /*
780 * Assume that one descriptor per frame,
781 * Should be fixed in the future
782 * (or not? If buffer already large enough to store entire packet.)
783 */
784
d7699f87
GFT
785 spin_lock(&jme->recv_lock);
786 i = start = rxring->next_to_clean;
787 /*
788 * Decide how many descriptors need to be processed
4330c2f2 789 * In the worst cast we'll have to process entire queue
d7699f87 790 */
4330c2f2 791 for(cnt = 0 ; cnt < RING_DESC_NR ; )
d7699f87 792 {
4330c2f2
GFT
793 rxdesc = (struct RxDesc*)(rxring->desc) + i;
794 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
795 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)
796 ) {
d7699f87
GFT
797 rxring->next_to_clean = i;
798 break;
799 }
800
4330c2f2
GFT
801 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
802
803 if(unlikely((cnt += desccnt) >= RING_DESC_NR)) {
804 cnt -= desccnt;
805 break;
806 }
807
808 if(unlikely((i += desccnt) >= RING_DESC_NR))
809 i -= RING_DESC_NR;
d7699f87
GFT
810 }
811 spin_unlock(&jme->recv_lock);
812
813 /*
814 * Process descriptors independently accross cpu
815 * --- save for multiple cpu handling
816 */
817 for( i = start ; cnt-- ; ) {
4330c2f2
GFT
818 rxdesc = (struct RxDesc*)(rxring->desc) + i;
819 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
820 rxbi = rxring->bufinf + i;
821 if(unlikely(
822 /*
823 * Drop and record error packet
824 */
825 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
826 desccnt > 1)) {
827 if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
828 ++(NET_STAT.rx_fifo_errors);
829 else if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
830 ++(NET_STAT.rx_frame_errors);
831 else {
832 ++(NET_STAT.rx_errors);
833#ifdef RX_ERR_DEBUG
834 dprintk(netdev->name, "err: %02x\n", rxdesc->descwb.errstat);
835#endif
836 }
837
838 if(desccnt > 1)
839 cnt -= desccnt-1;
840
841 for(j=i,ccnt=desccnt;ccnt--;) {
842 jme_set_clean_rxdesc(jme, j);
843
844 if(unlikely(++j == RING_DESC_NR))
845 j = 0;
846 }
d7699f87
GFT
847 }
848 else {
4330c2f2
GFT
849 /*
850 * Pass received packet to kernel
851 */
852 skb = rxbi->skb;
853 buf_dma = rxbi->mapping;
854 pci_dma_sync_single_for_cpu(jme->pdev,
855 buf_dma,
856 RX_BUF_SIZE,
857 PCI_DMA_FROMDEVICE);
858
859 if(unlikely(jme_make_new_rx_buf(jme, i))) {
860 pci_dma_sync_single_for_device(jme->pdev,
861 buf_dma,
862 RX_BUF_SIZE,
863 PCI_DMA_FROMDEVICE);
864 ++(NET_STAT.rx_dropped);
865 }
866 else {
867 framesize = le16_to_cpu(rxdesc->descwb.framesize);
868
869 skb_put(skb, framesize);
870 skb->protocol = eth_type_trans(skb, netdev);
871
872 netif_rx(skb);
873
874 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
875 ++(NET_STAT.multicast);
876
877 netdev->last_rx = jiffies;
878 NET_STAT.rx_bytes += framesize;
879 ++(NET_STAT.rx_packets);
880 }
881
882 jme_set_clean_rxdesc(jme, i);
883
884#ifdef RX_PKT_DEBUG
885 dprintk(netdev->name, "DESCCNT: %u, FSIZE: %u, ADDRH: %08x, "
886 "ADDRL: %08x, FLAGS: %04x, STAT: %02x, "
887 "DST:%02x:%02x:%02x:%02x:%02x:%02x\n",
888 desccnt,
889 framesize,
890 le32_to_cpu(rxdesc->dw[2]),
891 le32_to_cpu(rxdesc->dw[3]),
892 le16_to_cpu(rxdesc->descwb.flags),
893 rxdesc->descwb.errstat,
894 rxbuf[0], rxbuf[1], rxbuf[2],
895 rxbuf[3], rxbuf[4], rxbuf[5]);
896#endif
897
898
d7699f87
GFT
899 }
900
d7699f87 901
4330c2f2
GFT
902 if(unlikely((i+=desccnt) >= RING_DESC_NR))
903 i -= RING_DESC_NR;
d7699f87 904
4330c2f2 905 }
d7699f87 906
4330c2f2
GFT
907}
908
909static void jme_rx_clean_tasklet(unsigned long arg)
910{
911 struct jme_adapter *jme = (struct jme_adapter*)arg;
912
913 jme_process_receive(jme);
914 if(jme->flags & JME_FLAG_RXQ0_EMPTY) {
915 jme_restart_rx_engine(jme);
916 jme->flags &= ~JME_FLAG_RXQ0_EMPTY;
d7699f87
GFT
917 }
918
919}
920
921static irqreturn_t jme_intr(int irq, void *dev_id)
922{
923 struct net_device *netdev = dev_id;
924 struct jme_adapter *jme = netdev_priv(netdev);
925 irqreturn_t rc = IRQ_HANDLED;
4330c2f2
GFT
926 __u32 intrstat;
927
928#if USE_IEVE_SHADOW
929 pci_dma_sync_single_for_cpu(jme->pdev,
930 jme->shadow_dma,
931 sizeof(__u32) * SHADOW_REG_NR,
932 PCI_DMA_FROMDEVICE);
933 intrstat = jme->shadow_regs[SHADOW_IEVE];
934 jme->shadow_regs[SHADOW_IEVE] = 0;
935#else
936 intrstat = jread32(jme, JME_IEVE);
d7699f87
GFT
937#endif
938
d7699f87 939
4330c2f2
GFT
940#ifdef INTERRUPT_DEBUG
941 dprintk(netdev->name, "Interrupt received(%08x) @ %lu.\n", intrstat, jiffies);
d7699f87
GFT
942#endif
943
d7699f87
GFT
944 /*
945 * Check if it's really an interrupt for us
946 * and if the device still exist
947 */
4330c2f2
GFT
948 if((intrstat & INTR_ENABLE) == 0) {
949 rc = IRQ_NONE;
950 goto out;
951 }
952 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
d7699f87
GFT
953 rc = IRQ_NONE;
954 goto out;
955 }
956
4330c2f2 957
d7699f87
GFT
958 if(intrstat & INTR_LINKCH) {
959 /*
960 * Process Link status change event
961 */
4330c2f2 962 tasklet_schedule(&jme->linkch_task);
d7699f87
GFT
963 }
964
4330c2f2 965 if(intrstat & INTR_RX0EMP) {
d7699f87
GFT
966 /*
967 * Process event
968 */
4330c2f2 969 jme->flags |= JME_FLAG_RXQ0_EMPTY;
d7699f87 970
4330c2f2 971 jeprintk(netdev->name, "Ranout of Receive Queue 0.\n");
d7699f87
GFT
972 }
973
4330c2f2 974 if(intrstat & INTR_RX0) {
d7699f87 975 /*
4330c2f2 976 * Process event
d7699f87 977 */
4330c2f2 978 tasklet_schedule(&jme->rxclean_task);
d7699f87 979
4330c2f2
GFT
980#ifdef RX_PKT_DEBUG
981 dprintk(netdev->name, "Received From Queue 0.\n");
982#endif
d7699f87
GFT
983 }
984
985 if(intrstat & INTR_TX0) {
986 /*
987 * Process event
988 */
4330c2f2 989 tasklet_schedule(&jme->txclean_task);
d7699f87 990
4330c2f2
GFT
991#ifdef TX_PKT_DEBUG
992 dprintk(netdev->name, "Queue 0 transmit complete.\n");
993#endif
d7699f87
GFT
994 }
995
4330c2f2
GFT
996 if((intrstat & ~INTR_ENABLE) != 0) {
997#ifdef INTERRUPT_DEBUG
998 dprintk(netdev->name, "Some interrupt event not handled: %08x\n", intrstat & ~INTR_ENABLE);
999#endif
1000 }
d7699f87 1001
d7699f87 1002 /*
4330c2f2 1003 * Deassert interrupts
d7699f87 1004 */
4330c2f2
GFT
1005 jwrite32(jme, JME_IEVE, intrstat & INTR_ENABLE);
1006
1007out:
d7699f87
GFT
1008 return rc;
1009}
1010
1011static int jme_open(struct net_device *netdev)
1012{
1013 struct jme_adapter *jme = netdev_priv(netdev);
4330c2f2 1014 int rc;
d7699f87 1015
4330c2f2
GFT
1016 rc = request_irq(jme->pdev->irq, jme_intr,
1017 IRQF_SHARED, netdev->name, netdev);
1018 if(rc) {
1019 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1020 goto err_out;
1021 }
d7699f87 1022
4330c2f2
GFT
1023 rc = jme_setup_rx_resources(jme);
1024 if(rc) {
1025 printk(KERN_ERR PFX "Allocating resources for RX error.\n");
1026 goto err_out_free_irq;
1027 }
d7699f87 1028
4330c2f2
GFT
1029
1030 rc = jme_setup_tx_resources(jme);
1031 if(rc) {
1032 printk(KERN_ERR PFX "Allocating resources for TX error.\n");
1033 goto err_out_free_rx_resources;
1034 }
d7699f87
GFT
1035
1036 jme_reset_mac_processor(jme);
1037 jme_check_link(netdev);
4330c2f2 1038 jme_enable_shadow(jme);
d7699f87
GFT
1039 jme_start_irq(jme);
1040 jme_enable_rx_engine(jme);
1041 jme_enable_tx_engine(jme);
1042 netif_start_queue(netdev);
1043
1044 return 0;
1045
1046err_out_free_rx_resources:
1047 jme_free_rx_resources(jme);
1048err_out_free_irq:
1049 free_irq(jme->pdev->irq, jme->dev);
1050err_out:
1051 netif_stop_queue(netdev);
1052 netif_carrier_off(netdev);
4330c2f2 1053 return rc;
d7699f87
GFT
1054}
1055
1056static int jme_close(struct net_device *netdev)
1057{
1058 struct jme_adapter *jme = netdev_priv(netdev);
1059
1060 netif_stop_queue(netdev);
1061 netif_carrier_off(netdev);
1062
1063 jme_stop_irq(jme);
4330c2f2 1064 jme_disable_shadow(jme);
d7699f87
GFT
1065 free_irq(jme->pdev->irq, jme->dev);
1066
4330c2f2
GFT
1067 tasklet_kill(&jme->linkch_task);
1068 tasklet_kill(&jme->txclean_task);
1069 tasklet_kill(&jme->rxclean_task);
d7699f87
GFT
1070 jme_disable_rx_engine(jme);
1071 jme_disable_tx_engine(jme);
1072 jme_free_rx_resources(jme);
1073 jme_free_tx_resources(jme);
1074
1075 return 0;
1076}
1077
1078static int jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1079{
1080 struct jme_adapter *jme = netdev_priv(netdev);
1081 struct jme_ring *txring = &(jme->txring[0]);
1082 struct TxDesc *txdesc = txring->desc;
1083 int idx;
1084
d7699f87
GFT
1085 /*
1086 * Check if transmit queue is already full
1087 * and take one descriptor to use
1088 */
1089 spin_lock(&jme->xmit_lock);
1090 idx = txring->next_to_use;
1091 if(unlikely(txdesc[idx].desc1.flags & TXFLAG_OWN)) {
1092 spin_unlock(&jme->xmit_lock);
4330c2f2
GFT
1093#ifdef TX_BUSY_DEBUG
1094 dprintk(netdev->name, "TX Device busy.\n");
1095#endif
d7699f87
GFT
1096 return NETDEV_TX_BUSY;
1097 }
1098 if(unlikely(++(txring->next_to_use) == RING_DESC_NR))
1099 txring->next_to_use = 0;
1100 spin_unlock(&jme->xmit_lock);
1101
d7699f87
GFT
1102 /*
1103 * Fill up TX descriptors
1104 */
4330c2f2 1105 jme_set_new_txdesc(jme, idx, skb);
d7699f87
GFT
1106
1107 /*
1108 * Tell MAC HW to send
1109 */
4330c2f2
GFT
1110 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1111 TXCS_SELECT_QUEUE0 |
1112 TXCS_QUEUE0S |
1113 TXCS_ENABLE);
1114
1115#ifdef TX_PKT_DEBUG
1116 dprintk(netdev->name, "Asked to transmit.\n");
1117#endif
d7699f87 1118
4330c2f2
GFT
1119 NET_STAT.tx_bytes += skb->len;
1120 ++(NET_STAT.tx_packets);
d7699f87
GFT
1121 netdev->trans_start = jiffies;
1122
4330c2f2 1123 return NETDEV_TX_OK;
d7699f87
GFT
1124}
1125
1126static int jme_set_macaddr(struct net_device *netdev, void *p)
1127{
1128 struct jme_adapter *jme = netdev_priv(netdev);
1129 struct sockaddr *addr = p;
1130 __u32 val;
1131
1132 if(netif_running(netdev))
1133 return -EBUSY;
1134
1135 spin_lock(&jme->macaddr_lock);
1136 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1137
1138 val = addr->sa_data[3] << 24 |
1139 addr->sa_data[2] << 16 |
1140 addr->sa_data[1] << 8 |
1141 addr->sa_data[0];
4330c2f2 1142 jwrite32(jme, JME_RXUMA_LO, val);
d7699f87
GFT
1143 val = addr->sa_data[5] << 8 |
1144 addr->sa_data[4];
4330c2f2 1145 jwrite32(jme, JME_RXUMA_HI, val);
d7699f87
GFT
1146 spin_unlock(&jme->macaddr_lock);
1147
1148 return 0;
1149}
1150
1151static void jme_set_multi(struct net_device *netdev)
1152{
1153 struct jme_adapter *jme = netdev_priv(netdev);
1154 u32 mc_hash[2] = {};
1155 __u32 val;
1156 int i;
1157
1158
1159 spin_lock(&jme->macaddr_lock);
1160 val = RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1161
1162 if (netdev->flags & IFF_PROMISC)
1163 val |= RXMCS_ALLFRAME;
1164 else if (netdev->flags & IFF_ALLMULTI)
1165 val |= RXMCS_ALLMULFRAME;
1166 else if(netdev->flags & IFF_MULTICAST) {
1167 struct dev_mc_list *mclist;
1168 int bit_nr;
1169
1170 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1171 for (i = 0, mclist = netdev->mc_list;
1172 mclist && i < netdev->mc_count;
1173 ++i, mclist = mclist->next) {
1174 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1175 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
4330c2f2
GFT
1176#ifdef SET_MULTI_DEBUG
1177 dprintk(netdev->name, "Adding MCAddr: "
d7699f87
GFT
1178 "%02x:%02x:%02x:%02x:%02x:%02x (%d)\n",
1179 mclist->dmi_addr[0],
1180 mclist->dmi_addr[1],
1181 mclist->dmi_addr[2],
1182 mclist->dmi_addr[3],
1183 mclist->dmi_addr[4],
1184 mclist->dmi_addr[5],
1185 bit_nr);
4330c2f2 1186#endif
d7699f87
GFT
1187 }
1188
4330c2f2
GFT
1189 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1190 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
d7699f87
GFT
1191 }
1192
1193
1194 wmb();
1195 jwrite32(jme, JME_RXMCS, val);
1196 spin_unlock(&jme->macaddr_lock);
1197
4330c2f2
GFT
1198#ifdef SET_MULTI_DEBUG
1199 dprintk(netdev->name, "RX Mode changed: %08x\n", val);
1200#endif
d7699f87
GFT
1201}
1202
1203static int jme_change_mtu(struct net_device *dev, int new_mtu)
1204{
1205 /*
1206 * Do not support MTU change for now.
1207 */
1208 return -EINVAL;
1209}
1210
1211static void jme_get_drvinfo(struct net_device *netdev,
1212 struct ethtool_drvinfo *info)
1213{
1214 struct jme_adapter *jme = netdev_priv(netdev);
1215
1216 strcpy(info->driver, DRV_NAME);
1217 strcpy(info->version, DRV_VERSION);
1218 strcpy(info->bus_info, pci_name(jme->pdev));
1219}
1220
1221static int jme_get_settings(struct net_device *netdev,
1222 struct ethtool_cmd *ecmd)
1223{
1224 struct jme_adapter *jme = netdev_priv(netdev);
1225 int rc;
1226 spin_lock(&jme->phy_lock);
1227 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1228 spin_unlock(&jme->phy_lock);
1229 return rc;
1230}
1231
1232static int jme_set_settings(struct net_device *netdev,
1233 struct ethtool_cmd *ecmd)
1234{
1235 struct jme_adapter *jme = netdev_priv(netdev);
1236 int rc;
1237 spin_lock(&jme->phy_lock);
1238 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1239 spin_unlock(&jme->phy_lock);
1240 return rc;
1241}
1242
1243static u32 jme_get_link(struct net_device *netdev) {
1244 struct jme_adapter *jme = netdev_priv(netdev);
1245 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1246}
1247
1248static const struct ethtool_ops jme_ethtool_ops = {
1249 .get_drvinfo = jme_get_drvinfo,
1250 .get_settings = jme_get_settings,
1251 .set_settings = jme_set_settings,
1252 .get_link = jme_get_link,
1253};
1254
1255static int __devinit jme_init_one(struct pci_dev *pdev,
1256 const struct pci_device_id *ent)
1257{
4330c2f2 1258 int rc = 0;
d7699f87
GFT
1259 struct net_device *netdev;
1260 struct jme_adapter *jme;
d7699f87
GFT
1261
1262 /*
1263 * set up PCI device basics
1264 */
4330c2f2
GFT
1265 rc = pci_enable_device(pdev);
1266 if(rc) {
1267 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1268 goto err_out;
1269 }
d7699f87 1270
4330c2f2
GFT
1271 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1272 printk(KERN_ERR PFX "No PCI resource region found.\n");
1273 rc = -ENOMEM;
1274 goto err_out_disable_pdev;
1275 }
d7699f87 1276
4330c2f2
GFT
1277 rc = pci_request_regions(pdev, DRV_NAME);
1278 if(rc) {
1279 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1280 goto err_out_disable_pdev;
1281 }
d7699f87
GFT
1282
1283 pci_set_master(pdev);
1284
1285 /*
1286 * alloc and init net device
1287 */
1288 netdev = alloc_etherdev(sizeof(struct jme_adapter));
1289 if(!netdev) {
4330c2f2
GFT
1290 rc = -ENOMEM;
1291 goto err_out_release_regions;
d7699f87
GFT
1292 }
1293 netdev->open = jme_open;
1294 netdev->stop = jme_close;
1295 netdev->hard_start_xmit = jme_start_xmit;
1296 netdev->irq = pdev->irq;
1297 netdev->set_mac_address = jme_set_macaddr;
1298 netdev->set_multicast_list = jme_set_multi;
1299 netdev->change_mtu = jme_change_mtu;
1300 netdev->ethtool_ops = &jme_ethtool_ops;
1301
1302 SET_NETDEV_DEV(netdev, &pdev->dev);
1303 pci_set_drvdata(pdev, netdev);
1304
1305 /*
1306 * init adapter info
1307 */
1308 jme = netdev_priv(netdev);
1309 jme->pdev = pdev;
1310 jme->dev = netdev;
1311 jme->regs = ioremap(pci_resource_start(pdev, 0),
1312 pci_resource_len(pdev, 0));
4330c2f2 1313 if (!(jme->regs)) {
d7699f87
GFT
1314 rc = -ENOMEM;
1315 goto err_out_free_netdev;
1316 }
4330c2f2
GFT
1317 jme->shadow_regs = pci_alloc_consistent(pdev,
1318 sizeof(__u32) * SHADOW_REG_NR,
1319 &(jme->shadow_dma));
1320 if (!(jme->shadow_regs)) {
1321 rc = -ENOMEM;
1322 goto err_out_unmap;
1323 }
1324
d7699f87
GFT
1325 spin_lock_init(&jme->xmit_lock);
1326 spin_lock_init(&jme->recv_lock);
1327 spin_lock_init(&jme->macaddr_lock);
1328 spin_lock_init(&jme->phy_lock);
4330c2f2
GFT
1329 tasklet_init(&jme->linkch_task,
1330 &jme_link_change_tasklet,
1331 (unsigned long) jme);
1332 tasklet_init(&jme->txclean_task,
1333 &jme_tx_clean_tasklet,
1334 (unsigned long) jme);
1335 tasklet_init(&jme->rxclean_task,
1336 &jme_rx_clean_tasklet,
1337 (unsigned long) jme);
d7699f87
GFT
1338 jme->mii_if.dev = netdev;
1339 jme->mii_if.phy_id = 1;
1340 jme->mii_if.supports_gmii = 1;
1341 jme->mii_if.mdio_read = jme_mdio_read;
1342 jme->mii_if.mdio_write = jme_mdio_write;
4330c2f2
GFT
1343#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
1344 netdev->get_stats = &(jme_get_stats);
1345#endif
d7699f87
GFT
1346
1347 /*
1348 * Reset MAC processor and reload EEPROM for MAC Address
1349 */
1350 jme_clear_pm(jme);
1351 jme_reset_mac_processor(jme);
4330c2f2
GFT
1352 rc = jme_reload_eeprom(jme);
1353 if(rc) {
1354 printk(KERN_ERR PFX "Rload eeprom for reading MAC Address error.\n");
1355 goto err_out_free_shadow;
1356 }
d7699f87
GFT
1357 jme_load_macaddr(netdev);
1358
1359
1360 /*
1361 * Tell stack that we are not ready to work until open()
1362 */
1363 netif_carrier_off(netdev);
1364 netif_stop_queue(netdev);
1365
1366 /*
1367 * Register netdev
1368 */
4330c2f2
GFT
1369 rc = register_netdev(netdev);
1370 if(rc) {
1371 printk(KERN_ERR PFX "Cannot register net device.\n");
1372 goto err_out_free_shadow;
1373 }
d7699f87 1374
4330c2f2
GFT
1375 jprintk(netdev->name,
1376 "JMC250 gigabit eth at %llx, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
1377 (unsigned long long) pci_resource_start(pdev, 0),
1378 netdev->dev_addr[0],
1379 netdev->dev_addr[1],
1380 netdev->dev_addr[2],
1381 netdev->dev_addr[3],
1382 netdev->dev_addr[4],
1383 netdev->dev_addr[5],
1384 pdev->irq);
d7699f87
GFT
1385
1386 return 0;
1387
4330c2f2
GFT
1388err_out_free_shadow:
1389 pci_free_consistent(pdev,
1390 sizeof(__u32) * SHADOW_REG_NR,
1391 jme->shadow_regs,
1392 jme->shadow_dma);
d7699f87
GFT
1393err_out_unmap:
1394 iounmap(jme->regs);
1395err_out_free_netdev:
1396 pci_set_drvdata(pdev, NULL);
1397 free_netdev(netdev);
4330c2f2
GFT
1398err_out_release_regions:
1399 pci_release_regions(pdev);
d7699f87
GFT
1400err_out_disable_pdev:
1401 pci_disable_device(pdev);
d7699f87 1402err_out:
4330c2f2 1403 return rc;
d7699f87
GFT
1404}
1405
1406static void __devexit jme_remove_one(struct pci_dev *pdev)
1407{
1408 struct net_device *netdev = pci_get_drvdata(pdev);
1409 struct jme_adapter *jme = netdev_priv(netdev);
1410
1411 unregister_netdev(netdev);
4330c2f2
GFT
1412 pci_free_consistent(pdev,
1413 sizeof(__u32) * SHADOW_REG_NR,
1414 jme->shadow_regs,
1415 jme->shadow_dma);
d7699f87
GFT
1416 iounmap(jme->regs);
1417 pci_set_drvdata(pdev, NULL);
1418 free_netdev(netdev);
1419 pci_release_regions(pdev);
1420 pci_disable_device(pdev);
1421
1422}
1423
1424static struct pci_device_id jme_pci_tbl[] = {
1425 { PCI_VDEVICE(JMICRON, 0x250) },
1426 { }
1427};
1428
1429static struct pci_driver jme_driver = {
1430 .name = DRV_NAME,
1431 .id_table = jme_pci_tbl,
1432 .probe = jme_init_one,
1433 .remove = __devexit_p(jme_remove_one),
1434#if 0
1435#ifdef CONFIG_PM
1436 .suspend = jme_suspend,
1437 .resume = jme_resume,
1438#endif /* CONFIG_PM */
1439#endif
1440};
1441
1442static int __init jme_init_module(void)
1443{
4330c2f2
GFT
1444 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
1445 "driver version %s\n", DRV_VERSION);
d7699f87
GFT
1446 return pci_register_driver(&jme_driver);
1447}
1448
1449static void __exit jme_cleanup_module(void)
1450{
1451 pci_unregister_driver(&jme_driver);
1452}
1453
1454module_init(jme_init_module);
1455module_exit(jme_cleanup_module);
1456
1457MODULE_AUTHOR("David Tseng <cooldavid@cooldavid.org>");
1458MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1459MODULE_LICENSE("GPL");
1460MODULE_VERSION(DRV_VERSION);
1461MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
1462
1463