]> bbs.cooldavid.org Git - jme.git/blame - jme.c
Import jme 0.7 source
[jme.git] / jme.c
CommitLineData
d7699f87
GFT
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
3bf61c55
GFT
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
d7699f87
GFT
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
79ce639c
GFT
24/*
25 * Note:
26 * Backdoor for changing "FIFO Threshold for processing next packet"
27 * Using:
28 * ethtool -C eth1 adaptive-rx on adaptive-tx on \
29 * rx-usecs 250 rx-frames-low N
30 * N := 16 | 32 | 64 | 128
31 */
32
d7699f87 33/*
4330c2f2 34 * Timeline before release:
4330c2f2 35 * Stage 5: Advanced offloading support.
79ce639c 36 * 0.8:
4330c2f2 37 * - Implement VLAN offloading.
79ce639c
GFT
38 * 0.9:
39 * - Implement scatter-gather offloading.
40 * Use pci_map_page on scattered sk_buff for HIGHMEM support
4330c2f2 41 * - Implement TCP Segement offloading.
79ce639c 42 * Due to TX FIFO size, we should turn off tso when mtu > 1500.
4330c2f2
GFT
43 *
44 * Stage 6: CPU Load balancing.
79ce639c 45 * 1.0:
4330c2f2
GFT
46 * - Implement MSI-X.
47 * Along with multiple RX queue, for CPU load balancing.
4330c2f2
GFT
48 *
49 * Stage 7:
50 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
51 * - Test and Release 1.0
8c198884
GFT
52 *
53 * Non-Critical:
54 * - Use NAPI instead of rx_tasklet?
55 * PCC Support Both Packet Counter and Timeout Interrupt for
56 * receive and transmit complete, does NAPI really needed?
57 * - Decode register dump for ethtool.
d7699f87
GFT
58 */
59
4330c2f2 60#include <linux/version.h>
d7699f87
GFT
61#include <linux/module.h>
62#include <linux/kernel.h>
63#include <linux/pci.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/ethtool.h>
67#include <linux/mii.h>
68#include <linux/crc32.h>
4330c2f2 69#include <linux/delay.h>
29bdd921 70#include <linux/spinlock.h>
8c198884
GFT
71#include <linux/in.h>
72#include <linux/ip.h>
79ce639c
GFT
73#include <linux/ipv6.h>
74#include <linux/tcp.h>
75#include <linux/udp.h>
d7699f87
GFT
76#include "jme.h"
77
4330c2f2 78#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
3bf61c55
GFT
79static struct net_device_stats *
80jme_get_stats(struct net_device *netdev)
4330c2f2
GFT
81{
82 struct jme_adapter *jme = netdev_priv(netdev);
83 return &jme->stats;
84}
85#endif
86
3bf61c55
GFT
87static int
88jme_mdio_read(struct net_device *netdev, int phy, int reg)
d7699f87
GFT
89{
90 struct jme_adapter *jme = netdev_priv(netdev);
91 int i, val;
92
93 jwrite32(jme, JME_SMI, SMI_OP_REQ |
3bf61c55
GFT
94 smi_phy_addr(phy) |
95 smi_reg_addr(reg));
d7699f87
GFT
96
97 wmb();
79ce639c 98 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
3bf61c55
GFT
99 udelay(1);
100 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
101 break;
d7699f87
GFT
102 }
103
104 if (i == 0) {
3bf61c55
GFT
105 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
106 return 0;
d7699f87
GFT
107 }
108
3bf61c55 109 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
d7699f87
GFT
110}
111
3bf61c55
GFT
112static void
113jme_mdio_write(struct net_device *netdev,
114 int phy, int reg, int val)
d7699f87
GFT
115{
116 struct jme_adapter *jme = netdev_priv(netdev);
117 int i;
118
3bf61c55
GFT
119 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
120 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
121 smi_phy_addr(phy) | smi_reg_addr(reg));
d7699f87
GFT
122
123 wmb();
3bf61c55
GFT
124 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
125 udelay(1);
126 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
127 break;
128 }
d7699f87 129
3bf61c55
GFT
130 if (i == 0)
131 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
d7699f87 132
3bf61c55 133 return;
d7699f87
GFT
134}
135
3bf61c55
GFT
136__always_inline static void
137jme_reset_phy_processor(struct jme_adapter *jme)
d7699f87 138{
fcf45b4c 139 __u32 val;
3bf61c55
GFT
140
141 jme_mdio_write(jme->dev,
142 jme->mii_if.phy_id,
8c198884
GFT
143 MII_ADVERTISE, ADVERTISE_ALL |
144 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3bf61c55
GFT
145
146 jme_mdio_write(jme->dev,
147 jme->mii_if.phy_id,
148 MII_CTRL1000,
149 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
150
fcf45b4c
GFT
151 val = jme_mdio_read(jme->dev,
152 jme->mii_if.phy_id,
153 MII_BMCR);
154
155 jme_mdio_write(jme->dev,
156 jme->mii_if.phy_id,
157 MII_BMCR, val | BMCR_RESET);
158
3bf61c55
GFT
159 return;
160}
161
162
163__always_inline static void
164jme_reset_mac_processor(struct jme_adapter *jme)
165{
166 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
d7699f87 167 udelay(2);
3bf61c55 168 jwrite32(jme, JME_GHC, jme->reg_ghc);
4330c2f2
GFT
169 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
170 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
d7699f87
GFT
171 jwrite32(jme, JME_WFODP, 0);
172 jwrite32(jme, JME_WFOI, 0);
4330c2f2
GFT
173 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
174 jwrite32(jme, JME_GPREG1, 0);
d7699f87
GFT
175}
176
3bf61c55
GFT
177__always_inline static void
178jme_clear_pm(struct jme_adapter *jme)
d7699f87 179{
29bdd921 180 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
4330c2f2 181 pci_set_power_state(jme->pdev, PCI_D0);
29bdd921 182 pci_enable_wake(jme->pdev, PCI_D0, 0);
d7699f87
GFT
183}
184
3bf61c55
GFT
185static int
186jme_reload_eeprom(struct jme_adapter *jme)
d7699f87
GFT
187{
188 __u32 val;
189 int i;
190
191 val = jread32(jme, JME_SMBCSR);
192
193 if(val & SMBCSR_EEPROMD)
194 {
195 val |= SMBCSR_CNACK;
196 jwrite32(jme, JME_SMBCSR, val);
197 val |= SMBCSR_RELOAD;
198 jwrite32(jme, JME_SMBCSR, val);
199 mdelay(12);
200
201 for (i = JME_SMB_TIMEOUT; i > 0; --i)
202 {
203 mdelay(1);
204 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
205 break;
206 }
207
208 if(i == 0) {
4330c2f2 209 jeprintk(jme->dev->name, "eeprom reload timeout\n");
d7699f87
GFT
210 return -EIO;
211 }
212 }
213 else
214 return -EIO;
3bf61c55 215
d7699f87
GFT
216 return 0;
217}
218
3bf61c55
GFT
219static void
220jme_load_macaddr(struct net_device *netdev)
d7699f87
GFT
221{
222 struct jme_adapter *jme = netdev_priv(netdev);
223 unsigned char macaddr[6];
224 __u32 val;
225
fcf45b4c 226 spin_lock(&jme->macaddr_lock);
4330c2f2 227 val = jread32(jme, JME_RXUMA_LO);
d7699f87
GFT
228 macaddr[0] = (val >> 0) & 0xFF;
229 macaddr[1] = (val >> 8) & 0xFF;
230 macaddr[2] = (val >> 16) & 0xFF;
231 macaddr[3] = (val >> 24) & 0xFF;
4330c2f2 232 val = jread32(jme, JME_RXUMA_HI);
d7699f87
GFT
233 macaddr[4] = (val >> 0) & 0xFF;
234 macaddr[5] = (val >> 8) & 0xFF;
235 memcpy(netdev->dev_addr, macaddr, 6);
fcf45b4c 236 spin_unlock(&jme->macaddr_lock);
3bf61c55
GFT
237}
238
fcf45b4c 239__always_inline static void
3bf61c55
GFT
240jme_set_rx_pcc(struct jme_adapter *jme, int p)
241{
242 switch(p) {
243 case PCC_P1:
244 jwrite32(jme, JME_PCCRX0,
245 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
246 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
247 break;
248 case PCC_P2:
249 jwrite32(jme, JME_PCCRX0,
250 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
251 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
252 break;
253 case PCC_P3:
254 jwrite32(jme, JME_PCCRX0,
255 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
256 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
257 break;
258 default:
259 break;
260 }
261
262 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
d7699f87
GFT
263}
264
fcf45b4c 265static void
3bf61c55 266jme_start_irq(struct jme_adapter *jme)
d7699f87 267{
3bf61c55
GFT
268 register struct dynpcc_info *dpi = &(jme->dpi);
269
270 jme_set_rx_pcc(jme, PCC_P1);
3bf61c55
GFT
271 dpi->cur = PCC_P1;
272 dpi->attempt = PCC_P1;
273 dpi->cnt = 0;
274
275 jwrite32(jme, JME_PCCTX,
8c198884
GFT
276 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
277 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
3bf61c55
GFT
278 PCCTXQ0_EN
279 );
280
d7699f87
GFT
281 /*
282 * Enable Interrupts
283 */
284 jwrite32(jme, JME_IENS, INTR_ENABLE);
285}
286
3bf61c55
GFT
287__always_inline static void
288jme_stop_irq(struct jme_adapter *jme)
d7699f87
GFT
289{
290 /*
291 * Disable Interrupts
292 */
293 jwrite32(jme, JME_IENC, INTR_ENABLE);
294}
295
4330c2f2 296
3bf61c55
GFT
297__always_inline static void
298jme_enable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
299{
300 jwrite32(jme,
301 JME_SHBA_LO,
302 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
303}
304
3bf61c55
GFT
305__always_inline static void
306jme_disable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
307{
308 jwrite32(jme, JME_SHBA_LO, 0x0);
309}
310
fcf45b4c
GFT
311static int
312jme_check_link(struct net_device *netdev, int testonly)
d7699f87
GFT
313{
314 struct jme_adapter *jme = netdev_priv(netdev);
8c198884 315 __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
79ce639c 316 char linkmsg[64];
fcf45b4c 317 int rc = 0;
d7699f87
GFT
318
319 phylink = jread32(jme, JME_PHY_LINK);
320
321 if (phylink & PHY_LINK_UP) {
8c198884
GFT
322 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
323 /*
324 * If we did not enable AN
325 * Speed/Duplex Info should be obtained from SMI
326 */
327 phylink = PHY_LINK_UP;
328
329 bmcr = jme_mdio_read(jme->dev,
330 jme->mii_if.phy_id,
331 MII_BMCR);
332
79ce639c 333
8c198884
GFT
334 phylink |= ((bmcr & BMCR_SPEED1000) &&
335 (bmcr & BMCR_SPEED100) == 0) ?
336 PHY_LINK_SPEED_1000M :
337 (bmcr & BMCR_SPEED100) ?
338 PHY_LINK_SPEED_100M :
339 PHY_LINK_SPEED_10M;
340
341 phylink |= (bmcr & BMCR_FULLDPLX) ?
342 PHY_LINK_DUPLEX : 0;
79ce639c
GFT
343
344 strcpy(linkmsg, "Forced: ");
8c198884
GFT
345 }
346 else {
347 /*
348 * Keep polling for speed/duplex resolve complete
349 */
350 while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
351 --cnt) {
352
353 udelay(1);
354 phylink = jread32(jme, JME_PHY_LINK);
355
356 }
357
358 if(!cnt)
359 jeprintk(netdev->name,
360 "Waiting speed resolve timeout.\n");
79ce639c
GFT
361
362 strcpy(linkmsg, "ANed: ");
d7699f87
GFT
363 }
364
fcf45b4c
GFT
365 if(jme->phylink == phylink) {
366 rc = 1;
367 goto out;
368 }
369 if(testonly)
370 goto out;
371
372 jme->phylink = phylink;
373
d7699f87
GFT
374 switch(phylink & PHY_LINK_SPEED_MASK) {
375 case PHY_LINK_SPEED_10M:
376 ghc = GHC_SPEED_10M;
377 strcpy(linkmsg, "10 Mbps, ");
378 break;
379 case PHY_LINK_SPEED_100M:
380 ghc = GHC_SPEED_100M;
381 strcpy(linkmsg, "100 Mbps, ");
382 break;
383 case PHY_LINK_SPEED_1000M:
384 ghc = GHC_SPEED_1000M;
385 strcpy(linkmsg, "1000 Mbps, ");
386 break;
387 default:
388 ghc = 0;
389 break;
390 }
391 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
fcf45b4c 392
d7699f87 393 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
fcf45b4c
GFT
394 "Full-Duplex, " :
395 "Half-Duplex, ");
396
397 if(phylink & PHY_LINK_MDI_STAT)
fcf45b4c 398 strcat(linkmsg, "MDI-X");
8c198884
GFT
399 else
400 strcat(linkmsg, "MDI");
d7699f87
GFT
401
402 if(phylink & PHY_LINK_DUPLEX)
403 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
8c198884 404 else {
d7699f87 405 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
3bf61c55
GFT
406 TXMCS_BACKOFF |
407 TXMCS_CARRIERSENSE |
408 TXMCS_COLLISION);
8c198884
GFT
409 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
410 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
411 TXTRHD_TXREN |
412 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
413 }
d7699f87 414
fcf45b4c
GFT
415 jme->reg_ghc = ghc;
416 jwrite32(jme, JME_GHC, ghc);
417
4330c2f2 418 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
d7699f87
GFT
419 netif_carrier_on(netdev);
420 }
421 else {
fcf45b4c
GFT
422 if(testonly)
423 goto out;
424
4330c2f2 425 jprintk(netdev->name, "Link is down.\n");
fcf45b4c 426 jme->phylink = 0;
d7699f87
GFT
427 netif_carrier_off(netdev);
428 }
fcf45b4c
GFT
429
430out:
431 return rc;
d7699f87
GFT
432}
433
3bf61c55
GFT
434
435static int
436jme_alloc_txdesc(struct jme_adapter *jme,
437 int nr_alloc)
4330c2f2 438{
3bf61c55
GFT
439 struct jme_ring *txring = jme->txring;
440 int idx;
441
442 idx = txring->next_to_use;
443
79ce639c 444 if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
3bf61c55
GFT
445 return -1;
446
79ce639c 447 atomic_sub(nr_alloc, &txring->nr_free);
3bf61c55
GFT
448
449 if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
450 txring->next_to_use -= RING_DESC_NR;
3bf61c55
GFT
451
452 return idx;
4330c2f2
GFT
453}
454
79ce639c
GFT
455static void
456jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
457{
458 if(skb->ip_summed == CHECKSUM_PARTIAL) {
459 __u8 ip_proto;
460
461 switch (skb->protocol) {
462 case __constant_htons(ETH_P_IP):
463 ip_proto = ip_hdr(skb)->protocol;
464 break;
465 case __constant_htons(ETH_P_IPV6):
466 ip_proto = ipv6_hdr(skb)->nexthdr;
467 break;
468 default:
469 ip_proto = 0;
470 break;
471 }
472
473
474 switch(ip_proto) {
475 case IPPROTO_TCP:
476 *flags |= TXFLAG_TCPCS;
477 break;
478 case IPPROTO_UDP:
479 *flags |= TXFLAG_UDPCS;
480 break;
481 default:
482 jeprintk("jme", "Error upper layer protocol.\n");
483 break;
484 }
485 }
486}
487
3bf61c55
GFT
488static int
489jme_set_new_txdesc(struct jme_adapter *jme,
490 struct sk_buff *skb)
d7699f87
GFT
491{
492 struct jme_ring *txring = jme->txring;
3bf61c55
GFT
493 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
494 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
4330c2f2 495 dma_addr_t dmaaddr;
3bf61c55 496 int i, idx, nr_desc;
8c198884 497 __u8 flags;
3bf61c55
GFT
498
499 nr_desc = 2;
500 idx = jme_alloc_txdesc(jme, nr_desc);
501
502 if(unlikely(idx<0))
503 return NETDEV_TX_BUSY;
504
505 for(i = 1 ; i < nr_desc ; ++i) {
506 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
507 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
4330c2f2 508
3bf61c55
GFT
509 dmaaddr = pci_map_single(jme->pdev,
510 skb->data,
511 skb->len,
512 PCI_DMA_TODEVICE);
513
514 pci_dma_sync_single_for_device(jme->pdev,
515 dmaaddr,
516 skb->len,
517 PCI_DMA_TODEVICE);
518
519 ctxdesc->dw[0] = 0;
520 ctxdesc->dw[1] = 0;
521 ctxdesc->desc2.flags = TXFLAG_OWN;
522 if(jme->dev->features & NETIF_F_HIGHDMA)
523 ctxdesc->desc2.flags |= TXFLAG_64BIT;
524 ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
525 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
fcf45b4c
GFT
526 ctxdesc->desc2.bufaddrl = cpu_to_le32(
527 (__u64)dmaaddr & 0xFFFFFFFFUL);
3bf61c55
GFT
528
529 ctxbi->mapping = dmaaddr;
530 ctxbi->len = skb->len;
531 }
532
533 ctxdesc = txdesc + idx;
534 ctxbi = txbi + idx;
535
536 ctxdesc->dw[0] = 0;
537 ctxdesc->dw[1] = 0;
538 ctxdesc->dw[2] = 0;
539 ctxdesc->dw[3] = 0;
540 ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
d7699f87
GFT
541 /*
542 * Set OWN bit at final.
3bf61c55
GFT
543 * When kernel transmit faster than NIC.
544 * And NIC trying to send this descriptor before we tell
d7699f87
GFT
545 * it to start sending this TX queue.
546 * Other fields are already filled correctly.
547 */
548 wmb();
8c198884 549 flags = TXFLAG_OWN | TXFLAG_INT;
79ce639c 550 jme_tx_csum(skb, jme->dev->mtu, &flags);
8c198884 551 ctxdesc->desc1.flags = flags;
3bf61c55
GFT
552 /*
553 * Set tx buffer info after telling NIC to send
554 * For better tx_clean timing
555 */
556 wmb();
557 ctxbi->nr_desc = nr_desc;
558 ctxbi->skb = skb;
559
560 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
d7699f87 561
3bf61c55 562 return 0;
d7699f87
GFT
563}
564
565
3bf61c55
GFT
566static int
567jme_setup_tx_resources(struct jme_adapter *jme)
d7699f87 568{
d7699f87
GFT
569 struct jme_ring *txring = &(jme->txring[0]);
570
571 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
572 TX_RING_ALLOC_SIZE,
3bf61c55 573 &(txring->dmaalloc),
fcf45b4c
GFT
574 GFP_ATOMIC);
575
4330c2f2
GFT
576 if(!txring->alloc) {
577 txring->desc = NULL;
578 txring->dmaalloc = 0;
579 txring->dma = 0;
d7699f87 580 return -ENOMEM;
4330c2f2 581 }
d7699f87
GFT
582
583 /*
584 * 16 Bytes align
585 */
3bf61c55
GFT
586 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
587 RING_DESC_ALIGN);
4330c2f2 588 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
589 txring->next_to_use = 0;
590 txring->next_to_clean = 0;
79ce639c 591 atomic_set(&txring->nr_free, RING_DESC_NR);
d7699f87
GFT
592
593 /*
594 * Initiallize Transmit Descriptors
595 */
596 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
3bf61c55
GFT
597 memset(txring->bufinf, 0,
598 sizeof(struct jme_buffer_info) * RING_DESC_NR);
d7699f87
GFT
599
600 return 0;
601}
602
3bf61c55
GFT
603static void
604jme_free_tx_resources(struct jme_adapter *jme)
d7699f87
GFT
605{
606 int i;
607 struct jme_ring *txring = &(jme->txring[0]);
4330c2f2 608 struct jme_buffer_info *txbi = txring->bufinf;
d7699f87
GFT
609
610 if(txring->alloc) {
3bf61c55 611 for(i = 0 ; i < RING_DESC_NR ; ++i) {
4330c2f2
GFT
612 txbi = txring->bufinf + i;
613 if(txbi->skb) {
614 dev_kfree_skb(txbi->skb);
615 txbi->skb = NULL;
d7699f87 616 }
3bf61c55
GFT
617 txbi->mapping = 0;
618 txbi->len = 0;
619 txbi->nr_desc = 0;
d7699f87
GFT
620 }
621
622 dma_free_coherent(&(jme->pdev->dev),
623 TX_RING_ALLOC_SIZE,
624 txring->alloc,
625 txring->dmaalloc);
3bf61c55
GFT
626
627 txring->alloc = NULL;
628 txring->desc = NULL;
629 txring->dmaalloc = 0;
630 txring->dma = 0;
d7699f87 631 }
3bf61c55
GFT
632 txring->next_to_use = 0;
633 txring->next_to_clean = 0;
79ce639c 634 atomic_set(&txring->nr_free, 0);
d7699f87
GFT
635
636}
637
3bf61c55
GFT
638__always_inline static void
639jme_enable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
640{
641 /*
642 * Select Queue 0
643 */
644 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
645
646 /*
647 * Setup TX Queue 0 DMA Bass Address
648 */
fcf45b4c 649 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
3bf61c55 650 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
fcf45b4c 651 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
652
653 /*
654 * Setup TX Descptor Count
655 */
656 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
657
658 /*
659 * Enable TX Engine
660 */
661 wmb();
4330c2f2
GFT
662 jwrite32(jme, JME_TXCS, jme->reg_txcs |
663 TXCS_SELECT_QUEUE0 |
664 TXCS_ENABLE);
d7699f87
GFT
665
666}
667
29bdd921
GFT
668__always_inline static void
669jme_restart_tx_engine(struct jme_adapter *jme)
670{
671 /*
672 * Restart TX Engine
673 */
674 jwrite32(jme, JME_TXCS, jme->reg_txcs |
675 TXCS_SELECT_QUEUE0 |
676 TXCS_ENABLE);
677}
678
3bf61c55
GFT
679__always_inline static void
680jme_disable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
681{
682 int i;
683 __u32 val;
684
685 /*
686 * Disable TX Engine
687 */
fcf45b4c 688 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
d7699f87
GFT
689
690 val = jread32(jme, JME_TXCS);
691 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
692 {
fcf45b4c 693 mdelay(1);
d7699f87
GFT
694 val = jread32(jme, JME_TXCS);
695 }
696
8c198884 697 if(!i) {
4330c2f2 698 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
8c198884
GFT
699 jme_reset_mac_processor(jme);
700 }
d7699f87
GFT
701
702
703}
704
3bf61c55
GFT
705static void
706jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
d7699f87
GFT
707{
708 struct jme_ring *rxring = jme->rxring;
3bf61c55 709 register volatile struct rxdesc* rxdesc = rxring->desc;
4330c2f2
GFT
710 struct jme_buffer_info *rxbi = rxring->bufinf;
711 rxdesc += i;
712 rxbi += i;
713
714 rxdesc->dw[0] = 0;
715 rxdesc->dw[1] = 0;
3bf61c55 716 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
fcf45b4c
GFT
717 rxdesc->desc1.bufaddrl = cpu_to_le32(
718 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
3bf61c55
GFT
719 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
720 if(jme->dev->features & NETIF_F_HIGHDMA)
721 rxdesc->desc1.flags = RXFLAG_64BIT;
d7699f87 722 wmb();
3bf61c55 723 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
d7699f87
GFT
724}
725
3bf61c55
GFT
726static int
727jme_make_new_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
728{
729 struct jme_ring *rxring = &(jme->rxring[0]);
730 struct jme_buffer_info *rxbi = rxring->bufinf;
731 unsigned long offset;
732 struct sk_buff* skb;
733
79ce639c
GFT
734 skb = netdev_alloc_skb(jme->dev,
735 jme->dev->mtu + RX_EXTRA_LEN);
4330c2f2
GFT
736 if(unlikely(!skb))
737 return -ENOMEM;
3bf61c55
GFT
738
739 if(unlikely(skb_is_nonlinear(skb))) {
740 dprintk(jme->dev->name,
741 "Allocated skb fragged(%d).\n",
742 skb_shinfo(skb)->nr_frags);
4330c2f2
GFT
743 dev_kfree_skb(skb);
744 return -ENOMEM;
745 }
746
3bf61c55
GFT
747 if(unlikely(offset =
748 (unsigned long)(skb->data)
79ce639c 749 & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
4330c2f2 750 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
4330c2f2
GFT
751
752 rxbi += i;
753 rxbi->skb = skb;
3bf61c55 754 rxbi->len = skb_tailroom(skb);
4330c2f2
GFT
755 rxbi->mapping = pci_map_single(jme->pdev,
756 skb->data,
3bf61c55 757 rxbi->len,
4330c2f2
GFT
758 PCI_DMA_FROMDEVICE);
759
760 return 0;
761}
762
3bf61c55
GFT
763static void
764jme_free_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
765{
766 struct jme_ring *rxring = &(jme->rxring[0]);
767 struct jme_buffer_info *rxbi = rxring->bufinf;
768 rxbi += i;
769
770 if(rxbi->skb) {
771 pci_unmap_single(jme->pdev,
772 rxbi->mapping,
3bf61c55 773 rxbi->len,
4330c2f2
GFT
774 PCI_DMA_FROMDEVICE);
775 dev_kfree_skb(rxbi->skb);
776 rxbi->skb = NULL;
777 rxbi->mapping = 0;
3bf61c55 778 rxbi->len = 0;
4330c2f2
GFT
779 }
780}
781
3bf61c55
GFT
782static void
783jme_free_rx_resources(struct jme_adapter *jme)
784{
785 int i;
786 struct jme_ring *rxring = &(jme->rxring[0]);
787
788 if(rxring->alloc) {
789 for(i = 0 ; i < RING_DESC_NR ; ++i)
790 jme_free_rx_buf(jme, i);
791
792 dma_free_coherent(&(jme->pdev->dev),
793 RX_RING_ALLOC_SIZE,
794 rxring->alloc,
795 rxring->dmaalloc);
796 rxring->alloc = NULL;
797 rxring->desc = NULL;
798 rxring->dmaalloc = 0;
799 rxring->dma = 0;
800 }
801 rxring->next_to_use = 0;
802 rxring->next_to_clean = 0;
803}
804
805static int
806jme_setup_rx_resources(struct jme_adapter *jme)
d7699f87
GFT
807{
808 int i;
809 struct jme_ring *rxring = &(jme->rxring[0]);
810
811 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
812 RX_RING_ALLOC_SIZE,
3bf61c55 813 &(rxring->dmaalloc),
fcf45b4c 814 GFP_ATOMIC);
4330c2f2
GFT
815 if(!rxring->alloc) {
816 rxring->desc = NULL;
817 rxring->dmaalloc = 0;
818 rxring->dma = 0;
d7699f87 819 return -ENOMEM;
4330c2f2 820 }
d7699f87
GFT
821
822 /*
823 * 16 Bytes align
824 */
3bf61c55
GFT
825 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
826 RING_DESC_ALIGN);
4330c2f2 827 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
828 rxring->next_to_use = 0;
829 rxring->next_to_clean = 0;
830
d7699f87
GFT
831 /*
832 * Initiallize Receive Descriptors
833 */
834 for(i = 0 ; i < RING_DESC_NR ; ++i) {
3bf61c55
GFT
835 if(unlikely(jme_make_new_rx_buf(jme, i))) {
836 jme_free_rx_resources(jme);
837 return -ENOMEM;
838 }
d7699f87
GFT
839
840 jme_set_clean_rxdesc(jme, i);
841 }
842
d7699f87
GFT
843 return 0;
844}
845
3bf61c55
GFT
846__always_inline static void
847jme_enable_rx_engine(struct jme_adapter *jme)
d7699f87 848{
d7699f87
GFT
849 /*
850 * Setup RX DMA Bass Address
851 */
fcf45b4c 852 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
3bf61c55 853 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
fcf45b4c 854 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
855
856 /*
857 * Setup RX Descptor Count
858 */
859 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
860
3bf61c55 861 /*
d7699f87
GFT
862 * Setup Unicast Filter
863 */
864 jme_set_multi(jme->dev);
865
866 /*
867 * Enable RX Engine
868 */
869 wmb();
79ce639c 870 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
871 RXCS_QUEUESEL_Q0 |
872 RXCS_ENABLE |
873 RXCS_QST);
d7699f87
GFT
874}
875
3bf61c55
GFT
876__always_inline static void
877jme_restart_rx_engine(struct jme_adapter *jme)
4330c2f2
GFT
878{
879 /*
3bf61c55 880 * Start RX Engine
4330c2f2 881 */
79ce639c 882 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
883 RXCS_QUEUESEL_Q0 |
884 RXCS_ENABLE |
885 RXCS_QST);
886}
887
888
3bf61c55
GFT
889__always_inline static void
890jme_disable_rx_engine(struct jme_adapter *jme)
d7699f87
GFT
891{
892 int i;
893 __u32 val;
894
895 /*
896 * Disable RX Engine
897 */
29bdd921 898 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
d7699f87
GFT
899
900 val = jread32(jme, JME_RXCS);
901 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
902 {
29bdd921 903 mdelay(1);
d7699f87
GFT
904 val = jread32(jme, JME_RXCS);
905 }
906
907 if(!i)
4330c2f2 908 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
d7699f87
GFT
909
910}
911
3bf61c55 912static void
79ce639c 913jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx, int summed)
d7699f87 914{
d7699f87 915 struct jme_ring *rxring = &(jme->rxring[0]);
3bf61c55
GFT
916 volatile struct rxdesc *rxdesc = rxring->desc;
917 struct jme_buffer_info *rxbi = rxring->bufinf;
d7699f87 918 struct sk_buff *skb;
3bf61c55 919 int framesize;
d7699f87 920
3bf61c55
GFT
921 rxdesc += idx;
922 rxbi += idx;
d7699f87 923
3bf61c55
GFT
924 skb = rxbi->skb;
925 pci_dma_sync_single_for_cpu(jme->pdev,
926 rxbi->mapping,
927 rxbi->len,
928 PCI_DMA_FROMDEVICE);
929
930 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
931 pci_dma_sync_single_for_device(jme->pdev,
932 rxbi->mapping,
933 rxbi->len,
934 PCI_DMA_FROMDEVICE);
935
936 ++(NET_STAT(jme).rx_dropped);
937 }
938 else {
939 framesize = le16_to_cpu(rxdesc->descwb.framesize)
940 - RX_PREPAD_SIZE;
941
942 skb_reserve(skb, RX_PREPAD_SIZE);
943 skb_put(skb, framesize);
944 skb->protocol = eth_type_trans(skb, jme->dev);
945
79ce639c 946 if(summed)
8c198884 947 skb->ip_summed = CHECKSUM_UNNECESSARY;
29bdd921
GFT
948 else
949 skb->ip_summed = CHECKSUM_NONE;
8c198884 950
3bf61c55
GFT
951 netif_rx(skb);
952
953 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
954 ++(NET_STAT(jme).multicast);
955
956 jme->dev->last_rx = jiffies;
957 NET_STAT(jme).rx_bytes += framesize;
958 ++(NET_STAT(jme).rx_packets);
959 }
960
961 jme_set_clean_rxdesc(jme, idx);
962
963}
964
8c198884
GFT
965static int
966jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
967{
79ce639c
GFT
968 if(unlikely((flags & RXWBFLAG_TCPON) &&
969 !(flags & RXWBFLAG_TCPCS))) {
970 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
971 return 1;
972 }
973 else if(unlikely((flags & RXWBFLAG_UDPON) &&
974 !(flags & RXWBFLAG_UDPCS))) {
975 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
976 return 1;
977 }
978 else if(unlikely((flags & RXWBFLAG_IPV4) &&
979 !(flags & RXWBFLAG_IPCS))) {
980 csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
981 return 1;
8c198884
GFT
982 }
983 else {
984 return 0;
985 }
986}
987
3bf61c55
GFT
988static int
989jme_process_receive(struct jme_adapter *jme, int limit)
990{
991 struct jme_ring *rxring = &(jme->rxring[0]);
992 volatile struct rxdesc *rxdesc = rxring->desc;
993 int i, j, ccnt, desccnt;
994
995 i = rxring->next_to_clean;
996 while( limit-- > 0 )
d7699f87 997 {
3bf61c55
GFT
998 rxdesc = rxring->desc;
999 rxdesc += i;
1000
4330c2f2 1001 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
3bf61c55
GFT
1002 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1003 goto out;
d7699f87 1004
4330c2f2
GFT
1005 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1006
3bf61c55 1007 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
4330c2f2 1008
8c198884
GFT
1009 if(unlikely(desccnt > 1 ||
1010 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
1011 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
d7699f87 1012
3bf61c55
GFT
1013 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
1014 ++(NET_STAT(jme).rx_crc_errors);
1015 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
1016 ++(NET_STAT(jme).rx_fifo_errors);
1017 else
1018 ++(NET_STAT(jme).rx_errors);
4330c2f2 1019
79ce639c
GFT
1020 if(desccnt > 1) {
1021 rx_dbg(jme->dev->name,
1022 "RX: More than one(%d) descriptor, "
1023 "framelen=%d\n",
1024 desccnt, le16_to_cpu(rxdesc->descwb.framesize));
3bf61c55 1025 limit -= desccnt - 1;
79ce639c 1026 }
4330c2f2 1027
3bf61c55 1028 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
4330c2f2
GFT
1029 jme_set_clean_rxdesc(jme, j);
1030
1031 if(unlikely(++j == RING_DESC_NR))
1032 j = 0;
1033 }
3bf61c55 1034
d7699f87
GFT
1035 }
1036 else {
79ce639c
GFT
1037 jme_alloc_and_feed_skb(jme, i,
1038 (rxdesc->descwb.flags &
1039 (RXWBFLAG_TCPON |
1040 RXWBFLAG_UDPON |
1041 RXWBFLAG_IPV4)));
3bf61c55 1042 }
4330c2f2 1043
3bf61c55
GFT
1044 if((i += desccnt) >= RING_DESC_NR)
1045 i -= RING_DESC_NR;
1046 }
4330c2f2 1047
3bf61c55
GFT
1048out:
1049 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
1050 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
1051 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
1052 >> 4);
4330c2f2 1053
3bf61c55 1054 rxring->next_to_clean = i;
4330c2f2 1055
3bf61c55 1056 return limit > 0 ? limit : 0;
4330c2f2 1057
3bf61c55 1058}
d7699f87 1059
79ce639c
GFT
1060static void
1061jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1062{
1063 if(likely(atmp == dpi->cur))
1064 return;
1065
1066 if(dpi->attempt == atmp) {
1067 ++(dpi->cnt);
1068 }
1069 else {
1070 dpi->attempt = atmp;
1071 dpi->cnt = 0;
1072 }
1073
1074}
1075
1076static void
1077jme_dynamic_pcc(struct jme_adapter *jme)
1078{
1079 register struct dynpcc_info *dpi = &(jme->dpi);
1080
1081 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1082 jme_attempt_pcc(dpi, PCC_P3);
1083 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
1084 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1085 jme_attempt_pcc(dpi, PCC_P2);
1086 else
1087 jme_attempt_pcc(dpi, PCC_P1);
1088
1089 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
1090 jme_set_rx_pcc(jme, dpi->attempt);
1091 dpi->cur = dpi->attempt;
1092 dpi->cnt = 0;
1093 }
1094}
1095
1096static void
1097jme_start_pcc_timer(struct jme_adapter *jme)
1098{
1099 struct dynpcc_info *dpi = &(jme->dpi);
1100 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1101 dpi->last_pkts = NET_STAT(jme).rx_packets;
1102 dpi->intr_cnt = 0;
1103 jwrite32(jme, JME_TMCSR,
1104 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1105}
1106
29bdd921
GFT
1107static void
1108jme_stop_pcc_timer(struct jme_adapter *jme)
1109{
1110 jwrite32(jme, JME_TMCSR, 0);
1111}
1112
79ce639c
GFT
1113static void
1114jme_pcc_tasklet(unsigned long arg)
1115{
1116 struct jme_adapter *jme = (struct jme_adapter*)arg;
1117 struct net_device *netdev = jme->dev;
1118
29bdd921
GFT
1119
1120 if(unlikely(netif_queue_stopped(netdev) ||
1121 (atomic_read(&jme->link_changing) != 1)
1122 )) {
1123 jme_stop_pcc_timer(jme);
79ce639c
GFT
1124 return;
1125 }
29bdd921 1126
79ce639c
GFT
1127 jme_dynamic_pcc(jme);
1128 jme_start_pcc_timer(jme);
1129}
1130
3bf61c55
GFT
1131static void
1132jme_link_change_tasklet(unsigned long arg)
1133{
1134 struct jme_adapter *jme = (struct jme_adapter*)arg;
fcf45b4c
GFT
1135 struct net_device *netdev = jme->dev;
1136 int timeout = WAIT_TASKLET_TIMEOUT;
1137 int rc;
1138
1139 if(!atomic_dec_and_test(&jme->link_changing))
1140 goto out;
1141
29bdd921 1142 if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
fcf45b4c
GFT
1143 goto out;
1144
29bdd921 1145 jme->old_mtu = netdev->mtu;
fcf45b4c
GFT
1146 netif_stop_queue(netdev);
1147
1148 while(--timeout > 0 &&
1149 (
1150 atomic_read(&jme->rx_cleaning) != 1 ||
1151 atomic_read(&jme->tx_cleaning) != 1
1152 )) {
1153
1154 mdelay(1);
1155 }
1156
1157 if(netif_carrier_ok(netdev)) {
29bdd921 1158 jme_stop_pcc_timer(jme);
fcf45b4c
GFT
1159 jme_reset_mac_processor(jme);
1160 jme_free_rx_resources(jme);
1161 jme_free_tx_resources(jme);
1162 }
1163
1164 jme_check_link(netdev, 0);
1165 if(netif_carrier_ok(netdev)) {
1166 rc = jme_setup_rx_resources(jme);
1167 if(rc) {
1168 jeprintk(netdev->name,
1169 "Allocating resources for RX error"
1170 ", Device STOPPED!\n");
1171 goto out;
1172 }
1173
1174
1175 rc = jme_setup_tx_resources(jme);
1176 if(rc) {
1177 jeprintk(netdev->name,
1178 "Allocating resources for TX error"
1179 ", Device STOPPED!\n");
1180 goto err_out_free_rx_resources;
1181 }
1182
1183 jme_enable_rx_engine(jme);
1184 jme_enable_tx_engine(jme);
1185
1186 netif_start_queue(netdev);
79ce639c 1187 jme_start_pcc_timer(jme);
fcf45b4c
GFT
1188 }
1189
1190 goto out;
1191
1192err_out_free_rx_resources:
1193 jme_free_rx_resources(jme);
1194out:
1195 atomic_inc(&jme->link_changing);
3bf61c55 1196}
d7699f87 1197
3bf61c55
GFT
1198static void
1199jme_rx_clean_tasklet(unsigned long arg)
1200{
1201 struct jme_adapter *jme = (struct jme_adapter*)arg;
79ce639c 1202 struct dynpcc_info *dpi = &(jme->dpi);
d7699f87 1203
79ce639c 1204 if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
fcf45b4c
GFT
1205 goto out;
1206
79ce639c 1207 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1208 goto out;
1209
1210 if(unlikely(netif_queue_stopped(jme->dev)))
1211 goto out;
1212
3bf61c55 1213 jme_process_receive(jme, RING_DESC_NR);
79ce639c 1214 ++(dpi->intr_cnt);
fcf45b4c
GFT
1215
1216out:
1217 atomic_inc(&jme->rx_cleaning);
1218}
1219
1220static void
1221jme_rx_empty_tasklet(unsigned long arg)
1222{
1223 struct jme_adapter *jme = (struct jme_adapter*)arg;
1224
79ce639c 1225 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1226 return;
1227
1228 if(unlikely(netif_queue_stopped(jme->dev)))
1229 return;
1230
29bdd921
GFT
1231 queue_dbg(jme->dev->name, "RX Queue empty!\n");
1232
fcf45b4c
GFT
1233 jme_rx_clean_tasklet(arg);
1234 jme_restart_rx_engine(jme);
4330c2f2
GFT
1235}
1236
3bf61c55
GFT
1237static void
1238jme_tx_clean_tasklet(unsigned long arg)
4330c2f2
GFT
1239{
1240 struct jme_adapter *jme = (struct jme_adapter*)arg;
3bf61c55
GFT
1241 struct jme_ring *txring = &(jme->txring[0]);
1242 volatile struct txdesc *txdesc = txring->desc;
1243 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
8c198884 1244 int i, j, cnt = 0, max, err;
3bf61c55 1245
79ce639c 1246 if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
fcf45b4c
GFT
1247 goto out;
1248
79ce639c 1249 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1250 goto out;
1251
1252 if(unlikely(netif_queue_stopped(jme->dev)))
1253 goto out;
1254
79ce639c 1255 max = RING_DESC_NR - atomic_read(&txring->nr_free);
3bf61c55
GFT
1256
1257 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1258
1259 for(i = txring->next_to_clean ; cnt < max ; ) {
1260
1261 ctxbi = txbi + i;
1262
8c198884
GFT
1263 if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
1264
1265 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
3bf61c55
GFT
1266
1267 tx_dbg(jme->dev->name,
1268 "Tx Tasklet: Clean %d+%d\n",
1269 i, ctxbi->nr_desc);
1270
1271 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1272 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1273 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1274
1275 pci_unmap_single(jme->pdev,
1276 ttxbi->mapping,
1277 ttxbi->len,
1278 PCI_DMA_TODEVICE);
1279
8c198884
GFT
1280 if(likely(!err))
1281 NET_STAT(jme).tx_bytes += ttxbi->len;
1282
3bf61c55
GFT
1283 ttxbi->mapping = 0;
1284 ttxbi->len = 0;
1285 }
1286
1287 dev_kfree_skb(ctxbi->skb);
1288 ctxbi->skb = NULL;
1289
1290 cnt += ctxbi->nr_desc;
1291
8c198884
GFT
1292 if(unlikely(err))
1293 ++(NET_STAT(jme).tx_carrier_errors);
1294 else
1295 ++(NET_STAT(jme).tx_packets);
3bf61c55
GFT
1296 }
1297 else {
1298 if(!ctxbi->skb)
1299 tx_dbg(jme->dev->name,
1300 "Tx Tasklet:"
1301 " Stoped due to no skb.\n");
1302 else
1303 tx_dbg(jme->dev->name,
1304 "Tx Tasklet:"
1305 "Stoped due to not done.\n");
1306 break;
1307 }
1308
1309 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1310 i -= RING_DESC_NR;
1311
1312 ctxbi->nr_desc = 0;
d7699f87
GFT
1313 }
1314
3bf61c55
GFT
1315 tx_dbg(jme->dev->name,
1316 "Tx Tasklet: Stop %d Jiffies %lu\n",
1317 i, jiffies);
1318 txring->next_to_clean = i;
1319
79ce639c 1320 atomic_add(cnt, &txring->nr_free);
3bf61c55 1321
fcf45b4c
GFT
1322out:
1323 atomic_inc(&jme->tx_cleaning);
d7699f87
GFT
1324}
1325
79ce639c
GFT
1326static void
1327jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
d7699f87 1328{
3bf61c55
GFT
1329 /*
1330 * Disable interrupt
1331 */
1332 jwrite32f(jme, JME_IENC, INTR_ENABLE);
d7699f87 1333
29bdd921
GFT
1334 /*
1335 * Write 1 clear interrupt status
1336 */
1337 jwrite32f(jme, JME_IEVE, intrstat);
1338
79ce639c 1339 if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
3bf61c55 1340 tasklet_schedule(&jme->linkch_task);
29bdd921 1341 goto out_reenable;
fcf45b4c 1342 }
d7699f87 1343
79ce639c
GFT
1344 if(intrstat & INTR_TMINTR)
1345 tasklet_schedule(&jme->pcc_task);
1346
fcf45b4c
GFT
1347 if(intrstat & INTR_RX0EMP)
1348 tasklet_schedule(&jme->rxempty_task);
d7699f87 1349
fcf45b4c 1350 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
4330c2f2 1351 tasklet_schedule(&jme->rxclean_task);
d7699f87 1352
3bf61c55 1353 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
4330c2f2 1354 tasklet_schedule(&jme->txclean_task);
d7699f87 1355
4330c2f2 1356 if((intrstat & ~INTR_ENABLE) != 0) {
3bf61c55
GFT
1357 /*
1358 * Some interrupt not handled
1359 * but not enabled also (for debug)
1360 */
4330c2f2 1361 }
d7699f87 1362
29bdd921 1363out_reenable:
3bf61c55 1364 /*
fcf45b4c 1365 * Re-enable interrupt
3bf61c55 1366 */
fcf45b4c 1367 jwrite32f(jme, JME_IENS, INTR_ENABLE);
3bf61c55 1368
79ce639c
GFT
1369
1370}
1371
1372static irqreturn_t
1373jme_intr(int irq, void *dev_id)
1374{
1375 struct net_device *netdev = dev_id;
1376 struct jme_adapter *jme = netdev_priv(netdev);
79ce639c
GFT
1377 __u32 intrstat;
1378
1379 intrstat = jread32(jme, JME_IEVE);
1380
1381 /*
1382 * Check if it's really an interrupt for us
1383 */
29bdd921
GFT
1384 if(unlikely(intrstat == 0))
1385 return IRQ_NONE;
79ce639c
GFT
1386
1387 /*
1388 * Check if the device still exist
1389 */
29bdd921
GFT
1390 if(unlikely(intrstat == ~((typeof(intrstat))0)))
1391 return IRQ_NONE;
79ce639c
GFT
1392
1393 jme_intr_msi(jme, intrstat);
1394
29bdd921 1395 return IRQ_HANDLED;
d7699f87
GFT
1396}
1397
79ce639c
GFT
1398static irqreturn_t
1399jme_msi(int irq, void *dev_id)
1400{
1401 struct net_device *netdev = dev_id;
1402 struct jme_adapter *jme = netdev_priv(netdev);
1403 __u32 intrstat;
1404
1405 pci_dma_sync_single_for_cpu(jme->pdev,
1406 jme->shadow_dma,
1407 sizeof(__u32) * SHADOW_REG_NR,
1408 PCI_DMA_FROMDEVICE);
1409 intrstat = jme->shadow_regs[SHADOW_IEVE];
1410 jme->shadow_regs[SHADOW_IEVE] = 0;
1411
1412 jme_intr_msi(jme, intrstat);
1413
1414 return IRQ_HANDLED;
1415}
1416
1417
1418static void
1419jme_reset_link(struct jme_adapter *jme)
1420{
29bdd921 1421 jme->phylink = 0;
79ce639c
GFT
1422 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1423}
1424
fcf45b4c
GFT
1425static void
1426jme_restart_an(struct jme_adapter *jme)
1427{
1428 __u32 bmcr;
79ce639c 1429 unsigned long flags;
fcf45b4c 1430
79ce639c 1431 spin_lock_irqsave(&jme->phy_lock, flags);
fcf45b4c
GFT
1432 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1433 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1434 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
79ce639c
GFT
1435 spin_unlock_irqrestore(&jme->phy_lock, flags);
1436}
1437
1438static int
1439jme_request_irq(struct jme_adapter *jme)
1440{
1441 int rc;
1442 struct net_device *netdev = jme->dev;
1443 irq_handler_t handler = jme_intr;
1444 int irq_flags = IRQF_SHARED;
1445
1446 if (!pci_enable_msi(jme->pdev)) {
1447 jme->flags |= JME_FLAG_MSI;
1448 handler = jme_msi;
1449 irq_flags = 0;
1450 }
1451
1452 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1453 netdev);
1454 if(rc) {
1455 jeprintk(netdev->name,
1456 "Unable to allocate %s interrupt (return: %d)\n",
29bdd921 1457 jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
79ce639c
GFT
1458
1459 if(jme->flags & JME_FLAG_MSI) {
1460 pci_disable_msi(jme->pdev);
1461 jme->flags &= ~JME_FLAG_MSI;
1462 }
1463 }
1464 else {
1465 netdev->irq = jme->pdev->irq;
1466 }
1467
1468 return rc;
1469}
1470
1471static void
1472jme_free_irq(struct jme_adapter *jme)
1473{
1474 free_irq(jme->pdev->irq, jme->dev);
1475 if (jme->flags & JME_FLAG_MSI) {
1476 pci_disable_msi(jme->pdev);
1477 jme->flags &= ~JME_FLAG_MSI;
1478 jme->dev->irq = jme->pdev->irq;
1479 }
fcf45b4c
GFT
1480}
1481
3bf61c55
GFT
1482static int
1483jme_open(struct net_device *netdev)
d7699f87
GFT
1484{
1485 struct jme_adapter *jme = netdev_priv(netdev);
fcf45b4c
GFT
1486 int rc, timeout = 100;
1487
1488 while(
1489 --timeout > 0 &&
1490 (
1491 atomic_read(&jme->link_changing) != 1 ||
1492 atomic_read(&jme->rx_cleaning) != 1 ||
1493 atomic_read(&jme->tx_cleaning) != 1
1494 )
1495 )
1496 msleep(10);
1497
79ce639c
GFT
1498 if(!timeout) {
1499 rc = -EBUSY;
1500 goto err_out;
1501 }
1502
fcf45b4c 1503 jme_reset_mac_processor(jme);
d7699f87 1504
79ce639c
GFT
1505 rc = jme_request_irq(jme);
1506 if(rc)
4330c2f2 1507 goto err_out;
79ce639c 1508
4330c2f2 1509 jme_enable_shadow(jme);
d7699f87 1510 jme_start_irq(jme);
29bdd921 1511 jme_reset_link(jme);
d7699f87
GFT
1512
1513 return 0;
1514
d7699f87
GFT
1515err_out:
1516 netif_stop_queue(netdev);
1517 netif_carrier_off(netdev);
4330c2f2 1518 return rc;
d7699f87
GFT
1519}
1520
3bf61c55
GFT
1521static int
1522jme_close(struct net_device *netdev)
d7699f87
GFT
1523{
1524 struct jme_adapter *jme = netdev_priv(netdev);
1525
1526 netif_stop_queue(netdev);
1527 netif_carrier_off(netdev);
1528
1529 jme_stop_irq(jme);
4330c2f2 1530 jme_disable_shadow(jme);
79ce639c 1531 jme_free_irq(jme);
d7699f87 1532
4330c2f2
GFT
1533 tasklet_kill(&jme->linkch_task);
1534 tasklet_kill(&jme->txclean_task);
1535 tasklet_kill(&jme->rxclean_task);
fcf45b4c 1536 tasklet_kill(&jme->rxempty_task);
8c198884
GFT
1537
1538 jme_reset_mac_processor(jme);
d7699f87
GFT
1539 jme_free_rx_resources(jme);
1540 jme_free_tx_resources(jme);
1541
1542 return 0;
1543}
1544
3bf61c55
GFT
1545/*
1546 * This function is already protected by netif_tx_lock()
1547 */
1548static int
1549jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
d7699f87
GFT
1550{
1551 struct jme_adapter *jme = netdev_priv(netdev);
3bf61c55 1552 int rc;
d7699f87 1553
fcf45b4c
GFT
1554 if(unlikely(netif_queue_stopped(jme->dev)))
1555 return NETDEV_TX_BUSY;
1556
79ce639c
GFT
1557#if 0
1558/*Testing*/
1559 ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n",
1560 skb_shinfo(skb)->nr_frags,
1561 skb_headlen(skb),
1562 skb->len,
1563 skb->ip_summed);
1564/*********/
1565#endif
1566
3bf61c55 1567 rc = jme_set_new_txdesc(jme, skb);
d7699f87 1568
3bf61c55
GFT
1569 if(unlikely(rc != NETDEV_TX_OK))
1570 return rc;
d7699f87 1571
4330c2f2
GFT
1572 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1573 TXCS_SELECT_QUEUE0 |
1574 TXCS_QUEUE0S |
1575 TXCS_ENABLE);
d7699f87
GFT
1576 netdev->trans_start = jiffies;
1577
4330c2f2 1578 return NETDEV_TX_OK;
d7699f87
GFT
1579}
1580
3bf61c55
GFT
1581static int
1582jme_set_macaddr(struct net_device *netdev, void *p)
d7699f87
GFT
1583{
1584 struct jme_adapter *jme = netdev_priv(netdev);
1585 struct sockaddr *addr = p;
1586 __u32 val;
1587
1588 if(netif_running(netdev))
1589 return -EBUSY;
1590
fcf45b4c 1591 spin_lock(&jme->macaddr_lock);
d7699f87
GFT
1592 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1593
1594 val = addr->sa_data[3] << 24 |
1595 addr->sa_data[2] << 16 |
1596 addr->sa_data[1] << 8 |
1597 addr->sa_data[0];
4330c2f2 1598 jwrite32(jme, JME_RXUMA_LO, val);
d7699f87
GFT
1599 val = addr->sa_data[5] << 8 |
1600 addr->sa_data[4];
4330c2f2 1601 jwrite32(jme, JME_RXUMA_HI, val);
fcf45b4c 1602 spin_unlock(&jme->macaddr_lock);
d7699f87
GFT
1603
1604 return 0;
1605}
1606
3bf61c55
GFT
1607static void
1608jme_set_multi(struct net_device *netdev)
d7699f87 1609{
3bf61c55 1610 struct jme_adapter *jme = netdev_priv(netdev);
d7699f87 1611 u32 mc_hash[2] = {};
d7699f87 1612 int i;
8c198884 1613 unsigned long flags;
d7699f87 1614
8c198884
GFT
1615 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1616
1617 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
d7699f87 1618
3bf61c55 1619 if (netdev->flags & IFF_PROMISC) {
8c198884 1620 jme->reg_rxmcs |= RXMCS_ALLFRAME;
3bf61c55
GFT
1621 }
1622 else if (netdev->flags & IFF_ALLMULTI) {
8c198884 1623 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
3bf61c55 1624 }
d7699f87 1625 else if(netdev->flags & IFF_MULTICAST) {
3bf61c55
GFT
1626 struct dev_mc_list *mclist;
1627 int bit_nr;
d7699f87 1628
8c198884 1629 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
3bf61c55
GFT
1630 for (i = 0, mclist = netdev->mc_list;
1631 mclist && i < netdev->mc_count;
1632 ++i, mclist = mclist->next) {
1633
d7699f87
GFT
1634 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1635 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
d7699f87
GFT
1636 }
1637
4330c2f2
GFT
1638 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1639 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
d7699f87
GFT
1640 }
1641
d7699f87 1642 wmb();
8c198884
GFT
1643 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1644
1645 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
d7699f87
GFT
1646}
1647
3bf61c55 1648static int
8c198884 1649jme_change_mtu(struct net_device *netdev, int new_mtu)
d7699f87 1650{
79ce639c
GFT
1651 struct jme_adapter *jme = netdev_priv(netdev);
1652
29bdd921
GFT
1653 if(new_mtu == jme->old_mtu)
1654 return 0;
1655
79ce639c
GFT
1656 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1657 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
1658 return -EINVAL;
1659
1660 if(new_mtu > 4000) {
1661 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1662 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1663 jme_restart_rx_engine(jme);
1664 }
1665 else {
1666 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1667 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1668 jme_restart_rx_engine(jme);
1669 }
1670
1671 if(new_mtu > 1900) {
1672 netdev->features &= ~NETIF_F_HW_CSUM;
1673 }
1674 else {
1675 netdev->features |= NETIF_F_HW_CSUM;
1676 }
1677
1678 netdev->mtu = new_mtu;
1679 jme_reset_link(jme);
1680
1681 return 0;
d7699f87
GFT
1682}
1683
8c198884
GFT
1684static void
1685jme_tx_timeout(struct net_device *netdev)
1686{
1687 struct jme_adapter *jme = netdev_priv(netdev);
1688
1689 /*
1690 * Reset the link
1691 * And the link change will reinitiallize all RX/TX resources
1692 */
29bdd921 1693 jme_reset_link(jme);
8c198884
GFT
1694}
1695
3bf61c55
GFT
1696static void
1697jme_get_drvinfo(struct net_device *netdev,
1698 struct ethtool_drvinfo *info)
d7699f87
GFT
1699{
1700 struct jme_adapter *jme = netdev_priv(netdev);
1701
1702 strcpy(info->driver, DRV_NAME);
1703 strcpy(info->version, DRV_VERSION);
1704 strcpy(info->bus_info, pci_name(jme->pdev));
1705}
1706
8c198884
GFT
1707static int
1708jme_get_regs_len(struct net_device *netdev)
1709{
1710 return 0x400;
1711}
1712
1713static void
1714mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1715{
1716 int i;
1717
1718 for(i = 0 ; i < len ; i += 4)
79ce639c 1719 p[i >> 2] = jread32(jme, reg + i);
8c198884
GFT
1720
1721}
1722
1723static void
1724jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1725{
1726 struct jme_adapter *jme = netdev_priv(netdev);
1727 __u32 *p32 = (__u32*)p;
1728
1729 memset(p, 0, 0x400);
1730
1731 regs->version = 1;
1732 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1733
1734 p32 += 0x100 >> 2;
1735 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1736
1737 p32 += 0x100 >> 2;
1738 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1739
1740 p32 += 0x100 >> 2;
1741 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1742
1743}
1744
1745static int
1746jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1747{
1748 struct jme_adapter *jme = netdev_priv(netdev);
1749
1750 ecmd->use_adaptive_rx_coalesce = true;
1751 ecmd->tx_coalesce_usecs = PCC_TX_TO;
1752 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1753
1754 switch(jme->dpi.cur) {
1755 case PCC_P1:
1756 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1757 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1758 break;
1759 case PCC_P2:
1760 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1761 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1762 break;
1763 case PCC_P3:
1764 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1765 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1766 break;
1767 default:
1768 break;
1769 }
1770
1771 return 0;
1772}
1773
79ce639c
GFT
1774/*
1775 * It's not actually for coalesce.
1776 * It changes internell FIFO related setting for testing.
1777 */
1778static int
1779jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1780{
1781 struct jme_adapter *jme = netdev_priv(netdev);
1782
1783 if(ecmd->use_adaptive_rx_coalesce &&
1784 ecmd->use_adaptive_tx_coalesce &&
1785 ecmd->rx_coalesce_usecs == 250 &&
1786 (ecmd->rx_max_coalesced_frames_low == 16 ||
1787 ecmd->rx_max_coalesced_frames_low == 32 ||
1788 ecmd->rx_max_coalesced_frames_low == 64 ||
1789 ecmd->rx_max_coalesced_frames_low == 128)) {
1790 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1791 switch(ecmd->rx_max_coalesced_frames_low) {
1792 case 16:
1793 jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
1794 break;
1795 case 32:
1796 jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
1797 break;
1798 case 64:
1799 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1800 break;
1801 case 128:
1802 default:
1803 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1804 }
1805 jme_restart_rx_engine(jme);
1806 }
1807 else {
1808 return -EINVAL;
1809 }
1810
1811 return 0;
1812}
1813
8c198884
GFT
1814static void
1815jme_get_pauseparam(struct net_device *netdev,
1816 struct ethtool_pauseparam *ecmd)
1817{
1818 struct jme_adapter *jme = netdev_priv(netdev);
1819 unsigned long flags;
1820 __u32 val;
1821
1822 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1823 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1824
1825 spin_lock_irqsave(&jme->phy_lock, flags);
1826 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1827 spin_unlock_irqrestore(&jme->phy_lock, flags);
1828 ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1829}
1830
1831static int
1832jme_set_pauseparam(struct net_device *netdev,
1833 struct ethtool_pauseparam *ecmd)
1834{
1835 struct jme_adapter *jme = netdev_priv(netdev);
1836 unsigned long flags;
1837 __u32 val;
1838
1839 if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1840 (ecmd->tx_pause != 0)) {
1841
1842 if(ecmd->tx_pause)
1843 jme->reg_txpfc |= TXPFC_PF_EN;
1844 else
1845 jme->reg_txpfc &= ~TXPFC_PF_EN;
1846
1847 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
1848 }
1849
1850 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1851 if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
1852 (ecmd->rx_pause != 0)) {
1853
1854 if(ecmd->rx_pause)
1855 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
1856 else
1857 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
1858
1859 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1860 }
1861 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1862
1863 spin_lock_irqsave(&jme->phy_lock, flags);
1864 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1865 if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
1866 (ecmd->autoneg != 0)) {
1867
1868 if(ecmd->autoneg)
1869 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1870 else
1871 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1872
1873 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
1874 }
1875 spin_unlock_irqrestore(&jme->phy_lock, flags);
1876
1877 return 0;
1878}
1879
29bdd921
GFT
1880static void
1881jme_get_wol(struct net_device *netdev,
1882 struct ethtool_wolinfo *wol)
1883{
1884 struct jme_adapter *jme = netdev_priv(netdev);
1885
1886 wol->supported = WAKE_MAGIC | WAKE_PHY;
1887
1888 wol->wolopts = 0;
1889
1890 if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1891 wol->wolopts |= WAKE_PHY;
1892
1893 if(jme->reg_pmcs & PMCS_MFEN)
1894 wol->wolopts |= WAKE_MAGIC;
1895
1896}
1897
1898static int
1899jme_set_wol(struct net_device *netdev,
1900 struct ethtool_wolinfo *wol)
1901{
1902 struct jme_adapter *jme = netdev_priv(netdev);
1903
1904 if(wol->wolopts & (WAKE_MAGICSECURE |
1905 WAKE_UCAST |
1906 WAKE_MCAST |
1907 WAKE_BCAST |
1908 WAKE_ARP))
1909 return -EOPNOTSUPP;
1910
1911 jme->reg_pmcs = 0;
1912
1913 if(wol->wolopts & WAKE_PHY)
1914 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
1915
1916 if(wol->wolopts & WAKE_MAGIC)
1917 jme->reg_pmcs |= PMCS_MFEN;
1918
1919 return 0;
1920}
1921
3bf61c55
GFT
1922static int
1923jme_get_settings(struct net_device *netdev,
1924 struct ethtool_cmd *ecmd)
d7699f87
GFT
1925{
1926 struct jme_adapter *jme = netdev_priv(netdev);
1927 int rc;
79ce639c 1928 unsigned long flags;
8c198884 1929
79ce639c 1930 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 1931 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
79ce639c 1932 spin_unlock_irqrestore(&jme->phy_lock, flags);
d7699f87
GFT
1933 return rc;
1934}
1935
3bf61c55
GFT
1936static int
1937jme_set_settings(struct net_device *netdev,
1938 struct ethtool_cmd *ecmd)
d7699f87
GFT
1939{
1940 struct jme_adapter *jme = netdev_priv(netdev);
79ce639c 1941 int rc, fdc=0;
fcf45b4c
GFT
1942 unsigned long flags;
1943
8c198884
GFT
1944 if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
1945 return -EINVAL;
1946
79ce639c
GFT
1947 if(jme->mii_if.force_media &&
1948 ecmd->autoneg != AUTONEG_ENABLE &&
1949 (jme->mii_if.full_duplex != ecmd->duplex))
1950 fdc = 1;
1951
fcf45b4c 1952 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 1953 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
fcf45b4c
GFT
1954 spin_unlock_irqrestore(&jme->phy_lock, flags);
1955
79ce639c
GFT
1956 if(!rc && fdc)
1957 jme_reset_link(jme);
1958
29bdd921
GFT
1959 if(!rc) {
1960 jme->flags |= JME_FLAG_SSET;
1961 jme->old_ecmd = *ecmd;
1962 }
1963
d7699f87
GFT
1964 return rc;
1965}
1966
3bf61c55
GFT
1967static __u32
1968jme_get_link(struct net_device *netdev)
1969{
d7699f87
GFT
1970 struct jme_adapter *jme = netdev_priv(netdev);
1971 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1972}
1973
8c198884
GFT
1974static u32
1975jme_get_rx_csum(struct net_device *netdev)
1976{
1977 struct jme_adapter *jme = netdev_priv(netdev);
1978
1979 return jme->reg_rxmcs & RXMCS_CHECKSUM;
1980}
1981
1982static int
1983jme_set_rx_csum(struct net_device *netdev, u32 on)
1984{
1985 struct jme_adapter *jme = netdev_priv(netdev);
1986 unsigned long flags;
1987
1988 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1989 if(on)
1990 jme->reg_rxmcs |= RXMCS_CHECKSUM;
1991 else
1992 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
1993 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1994 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1995
1996 return 0;
1997}
1998
1999static int
2000jme_set_tx_csum(struct net_device *netdev, u32 on)
2001{
2002 if(on)
2003 netdev->features |= NETIF_F_HW_CSUM;
2004 else
2005 netdev->features &= ~NETIF_F_HW_CSUM;
2006
2007 return 0;
2008}
2009
2010static int
2011jme_nway_reset(struct net_device *netdev)
2012{
2013 struct jme_adapter *jme = netdev_priv(netdev);
2014 jme_restart_an(jme);
2015 return 0;
2016}
2017
d7699f87
GFT
2018static const struct ethtool_ops jme_ethtool_ops = {
2019 .get_drvinfo = jme_get_drvinfo,
8c198884
GFT
2020 .get_regs_len = jme_get_regs_len,
2021 .get_regs = jme_get_regs,
2022 .get_coalesce = jme_get_coalesce,
79ce639c 2023 .set_coalesce = jme_set_coalesce,
8c198884
GFT
2024 .get_pauseparam = jme_get_pauseparam,
2025 .set_pauseparam = jme_set_pauseparam,
29bdd921
GFT
2026 .get_wol = jme_get_wol,
2027 .set_wol = jme_set_wol,
d7699f87
GFT
2028 .get_settings = jme_get_settings,
2029 .set_settings = jme_set_settings,
2030 .get_link = jme_get_link,
8c198884
GFT
2031 .get_rx_csum = jme_get_rx_csum,
2032 .set_rx_csum = jme_set_rx_csum,
2033 .set_tx_csum = jme_set_tx_csum,
2034 .nway_reset = jme_nway_reset,
d7699f87
GFT
2035};
2036
3bf61c55
GFT
2037static int
2038jme_pci_dma64(struct pci_dev *pdev)
d7699f87 2039{
3bf61c55
GFT
2040 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2041 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2042 return 1;
2043
8c198884
GFT
2044 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2045 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
2046 return 1;
2047
3bf61c55
GFT
2048 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2049 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
2050 return 0;
2051
2052 return -1;
2053}
2054
2055static int __devinit
2056jme_init_one(struct pci_dev *pdev,
2057 const struct pci_device_id *ent)
2058{
2059 int rc = 0, using_dac;
d7699f87
GFT
2060 struct net_device *netdev;
2061 struct jme_adapter *jme;
d7699f87
GFT
2062
2063 /*
2064 * set up PCI device basics
2065 */
4330c2f2
GFT
2066 rc = pci_enable_device(pdev);
2067 if(rc) {
2068 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2069 goto err_out;
2070 }
d7699f87 2071
3bf61c55
GFT
2072 using_dac = jme_pci_dma64(pdev);
2073 if(using_dac < 0) {
2074 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2075 rc = -EIO;
2076 goto err_out_disable_pdev;
2077 }
2078
4330c2f2
GFT
2079 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2080 printk(KERN_ERR PFX "No PCI resource region found.\n");
2081 rc = -ENOMEM;
2082 goto err_out_disable_pdev;
2083 }
d7699f87 2084
4330c2f2
GFT
2085 rc = pci_request_regions(pdev, DRV_NAME);
2086 if(rc) {
2087 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2088 goto err_out_disable_pdev;
2089 }
d7699f87
GFT
2090
2091 pci_set_master(pdev);
2092
2093 /*
2094 * alloc and init net device
2095 */
3bf61c55 2096 netdev = alloc_etherdev(sizeof(*jme));
d7699f87 2097 if(!netdev) {
4330c2f2
GFT
2098 rc = -ENOMEM;
2099 goto err_out_release_regions;
d7699f87
GFT
2100 }
2101 netdev->open = jme_open;
2102 netdev->stop = jme_close;
2103 netdev->hard_start_xmit = jme_start_xmit;
d7699f87
GFT
2104 netdev->set_mac_address = jme_set_macaddr;
2105 netdev->set_multicast_list = jme_set_multi;
2106 netdev->change_mtu = jme_change_mtu;
2107 netdev->ethtool_ops = &jme_ethtool_ops;
8c198884
GFT
2108 netdev->tx_timeout = jme_tx_timeout;
2109 netdev->watchdog_timeo = TX_TIMEOUT;
3bf61c55 2110 NETDEV_GET_STATS(netdev, &jme_get_stats);
8c198884 2111 netdev->features = NETIF_F_HW_CSUM;
3bf61c55 2112 if(using_dac)
8c198884 2113 netdev->features |= NETIF_F_HIGHDMA;
d7699f87
GFT
2114
2115 SET_NETDEV_DEV(netdev, &pdev->dev);
2116 pci_set_drvdata(pdev, netdev);
2117
2118 /*
2119 * init adapter info
2120 */
2121 jme = netdev_priv(netdev);
2122 jme->pdev = pdev;
2123 jme->dev = netdev;
29bdd921 2124 jme->old_mtu = netdev->mtu = 1500;
fcf45b4c 2125 jme->phylink = 0;
d7699f87
GFT
2126 jme->regs = ioremap(pci_resource_start(pdev, 0),
2127 pci_resource_len(pdev, 0));
4330c2f2 2128 if (!(jme->regs)) {
d7699f87
GFT
2129 rc = -ENOMEM;
2130 goto err_out_free_netdev;
2131 }
4330c2f2
GFT
2132 jme->shadow_regs = pci_alloc_consistent(pdev,
2133 sizeof(__u32) * SHADOW_REG_NR,
2134 &(jme->shadow_dma));
2135 if (!(jme->shadow_regs)) {
2136 rc = -ENOMEM;
2137 goto err_out_unmap;
2138 }
2139
d7699f87 2140 spin_lock_init(&jme->phy_lock);
fcf45b4c 2141 spin_lock_init(&jme->macaddr_lock);
8c198884 2142 spin_lock_init(&jme->rxmcs_lock);
fcf45b4c 2143
fcf45b4c
GFT
2144 atomic_set(&jme->link_changing, 1);
2145 atomic_set(&jme->rx_cleaning, 1);
2146 atomic_set(&jme->tx_cleaning, 1);
2147
79ce639c
GFT
2148 tasklet_init(&jme->pcc_task,
2149 &jme_pcc_tasklet,
2150 (unsigned long) jme);
4330c2f2
GFT
2151 tasklet_init(&jme->linkch_task,
2152 &jme_link_change_tasklet,
2153 (unsigned long) jme);
2154 tasklet_init(&jme->txclean_task,
2155 &jme_tx_clean_tasklet,
2156 (unsigned long) jme);
2157 tasklet_init(&jme->rxclean_task,
2158 &jme_rx_clean_tasklet,
2159 (unsigned long) jme);
fcf45b4c
GFT
2160 tasklet_init(&jme->rxempty_task,
2161 &jme_rx_empty_tasklet,
2162 (unsigned long) jme);
d7699f87
GFT
2163 jme->mii_if.dev = netdev;
2164 jme->mii_if.phy_id = 1;
2165 jme->mii_if.supports_gmii = 1;
2166 jme->mii_if.mdio_read = jme_mdio_read;
2167 jme->mii_if.mdio_write = jme_mdio_write;
2168
8c198884
GFT
2169 jme->dpi.cur = PCC_P1;
2170
2171 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
79ce639c 2172 jme->reg_rxcs = RXCS_DEFAULT;
8c198884
GFT
2173 jme->reg_rxmcs = RXMCS_DEFAULT;
2174 jme->reg_txpfc = 0;
29bdd921 2175 jme->reg_pmcs = 0;
fcf45b4c
GFT
2176 /*
2177 * Get Max Read Req Size from PCI Config Space
2178 */
2179 pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2180 switch(jme->mrrs) {
2181 case MRRS_128B:
2182 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2183 break;
2184 case MRRS_256B:
2185 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2186 break;
2187 default:
2188 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2189 break;
2190 };
2191
2192
d7699f87
GFT
2193 /*
2194 * Reset MAC processor and reload EEPROM for MAC Address
2195 */
2196 jme_clear_pm(jme);
3bf61c55 2197 jme_reset_phy_processor(jme);
d7699f87 2198 jme_reset_mac_processor(jme);
4330c2f2
GFT
2199 rc = jme_reload_eeprom(jme);
2200 if(rc) {
3bf61c55
GFT
2201 printk(KERN_ERR PFX
2202 "Rload eeprom for reading MAC Address error.\n");
4330c2f2
GFT
2203 goto err_out_free_shadow;
2204 }
d7699f87
GFT
2205 jme_load_macaddr(netdev);
2206
2207
2208 /*
2209 * Tell stack that we are not ready to work until open()
2210 */
2211 netif_carrier_off(netdev);
2212 netif_stop_queue(netdev);
2213
2214 /*
2215 * Register netdev
2216 */
4330c2f2
GFT
2217 rc = register_netdev(netdev);
2218 if(rc) {
2219 printk(KERN_ERR PFX "Cannot register net device.\n");
2220 goto err_out_free_shadow;
2221 }
d7699f87 2222
4330c2f2 2223 jprintk(netdev->name,
8c198884 2224 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
4330c2f2
GFT
2225 netdev->dev_addr[0],
2226 netdev->dev_addr[1],
2227 netdev->dev_addr[2],
2228 netdev->dev_addr[3],
2229 netdev->dev_addr[4],
8c198884 2230 netdev->dev_addr[5]);
d7699f87
GFT
2231
2232 return 0;
2233
4330c2f2
GFT
2234err_out_free_shadow:
2235 pci_free_consistent(pdev,
2236 sizeof(__u32) * SHADOW_REG_NR,
2237 jme->shadow_regs,
2238 jme->shadow_dma);
d7699f87
GFT
2239err_out_unmap:
2240 iounmap(jme->regs);
2241err_out_free_netdev:
2242 pci_set_drvdata(pdev, NULL);
2243 free_netdev(netdev);
4330c2f2
GFT
2244err_out_release_regions:
2245 pci_release_regions(pdev);
d7699f87
GFT
2246err_out_disable_pdev:
2247 pci_disable_device(pdev);
d7699f87 2248err_out:
4330c2f2 2249 return rc;
d7699f87
GFT
2250}
2251
3bf61c55
GFT
2252static void __devexit
2253jme_remove_one(struct pci_dev *pdev)
2254{
d7699f87
GFT
2255 struct net_device *netdev = pci_get_drvdata(pdev);
2256 struct jme_adapter *jme = netdev_priv(netdev);
2257
2258 unregister_netdev(netdev);
4330c2f2
GFT
2259 pci_free_consistent(pdev,
2260 sizeof(__u32) * SHADOW_REG_NR,
2261 jme->shadow_regs,
2262 jme->shadow_dma);
d7699f87
GFT
2263 iounmap(jme->regs);
2264 pci_set_drvdata(pdev, NULL);
2265 free_netdev(netdev);
2266 pci_release_regions(pdev);
2267 pci_disable_device(pdev);
2268
2269}
2270
29bdd921
GFT
2271static void
2272jme_set_10m_half(struct jme_adapter *jme)
2273{
2274 __u32 bmcr, tmp;
2275
2276 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
2277 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
2278 BMCR_SPEED1000 | BMCR_FULLDPLX);
2279
2280 if (bmcr != tmp)
2281 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
2282
2283 jwrite32(jme, JME_GHC, GHC_SPEED_10M);
2284}
2285
2286static int
2287jme_suspend(struct pci_dev *pdev, pm_message_t state)
2288{
2289 struct net_device *netdev = pci_get_drvdata(pdev);
2290 struct jme_adapter *jme = netdev_priv(netdev);
2291 int timeout = 100;
2292
2293 atomic_dec(&jme->link_changing);
2294
2295 netif_device_detach(netdev);
2296 netif_stop_queue(netdev);
2297 jme_stop_irq(jme);
2298 jme_free_irq(jme);
2299
2300 while(--timeout > 0 &&
2301 (
2302 atomic_read(&jme->rx_cleaning) != 1 ||
2303 atomic_read(&jme->tx_cleaning) != 1
2304 )) {
2305 mdelay(1);
2306 }
2307 if(!timeout) {
2308 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2309 return -EBUSY;
2310 }
2311 jme_disable_shadow(jme);
2312
2313 if(netif_carrier_ok(netdev)) {
2314 jme_stop_pcc_timer(jme);
2315 jme_reset_mac_processor(jme);
2316 jme_free_rx_resources(jme);
2317 jme_free_tx_resources(jme);
2318 netif_carrier_off(netdev);
2319 jme->phylink = 0;
2320 }
2321
2322 jme_set_10m_half(jme);
2323
2324 pci_save_state(pdev);
2325 if(jme->reg_pmcs) {
2326 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2327 pci_enable_wake(pdev, PCI_D3cold, 1);
2328 }
2329 else {
2330 pci_enable_wake(pdev, PCI_D3cold, 0);
2331 }
2332 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2333
2334 return 0;
2335}
2336
2337static int
2338jme_resume(struct pci_dev *pdev)
2339{
2340 struct net_device *netdev = pci_get_drvdata(pdev);
2341 struct jme_adapter *jme = netdev_priv(netdev);
2342
2343 jme_clear_pm(jme);
2344 pci_restore_state(pdev);
2345
2346 if(jme->flags & JME_FLAG_SSET)
2347 jme_set_settings(netdev, &jme->old_ecmd);
2348 else
2349 jme_reset_phy_processor(jme);
2350
2351 jme_reset_mac_processor(jme);
2352 jme_enable_shadow(jme);
2353 jme_request_irq(jme);
2354 jme_start_irq(jme);
2355 netif_device_attach(netdev);
2356
2357 atomic_inc(&jme->link_changing);
2358
2359 jme_reset_link(jme);
2360
2361 return 0;
2362}
2363
d7699f87
GFT
2364static struct pci_device_id jme_pci_tbl[] = {
2365 { PCI_VDEVICE(JMICRON, 0x250) },
2366 { }
2367};
2368
2369static struct pci_driver jme_driver = {
2370 .name = DRV_NAME,
2371 .id_table = jme_pci_tbl,
2372 .probe = jme_init_one,
2373 .remove = __devexit_p(jme_remove_one),
d7699f87
GFT
2374#ifdef CONFIG_PM
2375 .suspend = jme_suspend,
2376 .resume = jme_resume,
2377#endif /* CONFIG_PM */
d7699f87
GFT
2378};
2379
3bf61c55
GFT
2380static int __init
2381jme_init_module(void)
d7699f87 2382{
4330c2f2
GFT
2383 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2384 "driver version %s\n", DRV_VERSION);
d7699f87
GFT
2385 return pci_register_driver(&jme_driver);
2386}
2387
3bf61c55
GFT
2388static void __exit
2389jme_cleanup_module(void)
d7699f87
GFT
2390{
2391 pci_unregister_driver(&jme_driver);
2392}
2393
2394module_init(jme_init_module);
2395module_exit(jme_cleanup_module);
2396
3bf61c55 2397MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
d7699f87
GFT
2398MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2399MODULE_LICENSE("GPL");
2400MODULE_VERSION(DRV_VERSION);
2401MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2402