]> bbs.cooldavid.org Git - jme.git/blame - jme.c
Import jme 0.6 source
[jme.git] / jme.c
CommitLineData
d7699f87
GFT
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
3bf61c55
GFT
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
d7699f87
GFT
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
79ce639c
GFT
24/*
25 * Note:
26 * Backdoor for changing "FIFO Threshold for processing next packet"
27 * Using:
28 * ethtool -C eth1 adaptive-rx on adaptive-tx on \
29 * rx-usecs 250 rx-frames-low N
30 * N := 16 | 32 | 64 | 128
31 */
32
d7699f87 33/*
4330c2f2 34 * Timeline before release:
4330c2f2 35 * Stage 4: Basic feature support.
79ce639c 36 * 0.7:
4330c2f2 37 * - Implement Power Managemt related functions.
4330c2f2
GFT
38 *
39 * Stage 5: Advanced offloading support.
79ce639c 40 * 0.8:
4330c2f2 41 * - Implement VLAN offloading.
79ce639c
GFT
42 * 0.9:
43 * - Implement scatter-gather offloading.
44 * Use pci_map_page on scattered sk_buff for HIGHMEM support
4330c2f2 45 * - Implement TCP Segement offloading.
79ce639c 46 * Due to TX FIFO size, we should turn off tso when mtu > 1500.
4330c2f2
GFT
47 *
48 * Stage 6: CPU Load balancing.
79ce639c 49 * 1.0:
4330c2f2
GFT
50 * - Implement MSI-X.
51 * Along with multiple RX queue, for CPU load balancing.
4330c2f2
GFT
52 *
53 * Stage 7:
54 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
55 * - Test and Release 1.0
8c198884
GFT
56 *
57 * Non-Critical:
58 * - Use NAPI instead of rx_tasklet?
59 * PCC Support Both Packet Counter and Timeout Interrupt for
60 * receive and transmit complete, does NAPI really needed?
61 * - Decode register dump for ethtool.
d7699f87
GFT
62 */
63
4330c2f2 64#include <linux/version.h>
d7699f87
GFT
65#include <linux/module.h>
66#include <linux/kernel.h>
67#include <linux/pci.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include <linux/ethtool.h>
71#include <linux/mii.h>
72#include <linux/crc32.h>
4330c2f2 73#include <linux/delay.h>
8c198884
GFT
74#include <linux/in.h>
75#include <linux/ip.h>
79ce639c
GFT
76#include <linux/ipv6.h>
77#include <linux/tcp.h>
78#include <linux/udp.h>
d7699f87
GFT
79#include "jme.h"
80
4330c2f2 81#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
3bf61c55
GFT
82static struct net_device_stats *
83jme_get_stats(struct net_device *netdev)
4330c2f2
GFT
84{
85 struct jme_adapter *jme = netdev_priv(netdev);
86 return &jme->stats;
87}
88#endif
89
3bf61c55
GFT
90static int
91jme_mdio_read(struct net_device *netdev, int phy, int reg)
d7699f87
GFT
92{
93 struct jme_adapter *jme = netdev_priv(netdev);
94 int i, val;
95
96 jwrite32(jme, JME_SMI, SMI_OP_REQ |
3bf61c55
GFT
97 smi_phy_addr(phy) |
98 smi_reg_addr(reg));
d7699f87
GFT
99
100 wmb();
79ce639c 101 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
3bf61c55
GFT
102 udelay(1);
103 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
104 break;
d7699f87
GFT
105 }
106
107 if (i == 0) {
3bf61c55
GFT
108 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
109 return 0;
d7699f87
GFT
110 }
111
3bf61c55 112 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
d7699f87
GFT
113}
114
3bf61c55
GFT
115static void
116jme_mdio_write(struct net_device *netdev,
117 int phy, int reg, int val)
d7699f87
GFT
118{
119 struct jme_adapter *jme = netdev_priv(netdev);
120 int i;
121
3bf61c55
GFT
122 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
123 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
124 smi_phy_addr(phy) | smi_reg_addr(reg));
d7699f87
GFT
125
126 wmb();
3bf61c55
GFT
127 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
128 udelay(1);
129 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
130 break;
131 }
d7699f87 132
3bf61c55
GFT
133 if (i == 0)
134 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
d7699f87 135
3bf61c55 136 return;
d7699f87
GFT
137}
138
3bf61c55
GFT
139__always_inline static void
140jme_reset_phy_processor(struct jme_adapter *jme)
d7699f87 141{
fcf45b4c 142 __u32 val;
3bf61c55
GFT
143
144 jme_mdio_write(jme->dev,
145 jme->mii_if.phy_id,
8c198884
GFT
146 MII_ADVERTISE, ADVERTISE_ALL |
147 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3bf61c55
GFT
148
149 jme_mdio_write(jme->dev,
150 jme->mii_if.phy_id,
151 MII_CTRL1000,
152 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
153
fcf45b4c
GFT
154 val = jme_mdio_read(jme->dev,
155 jme->mii_if.phy_id,
156 MII_BMCR);
157
158 jme_mdio_write(jme->dev,
159 jme->mii_if.phy_id,
160 MII_BMCR, val | BMCR_RESET);
161
3bf61c55
GFT
162 return;
163}
164
165
166__always_inline static void
167jme_reset_mac_processor(struct jme_adapter *jme)
168{
169 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
d7699f87 170 udelay(2);
3bf61c55 171 jwrite32(jme, JME_GHC, jme->reg_ghc);
4330c2f2
GFT
172 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
173 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
d7699f87
GFT
174 jwrite32(jme, JME_WFODP, 0);
175 jwrite32(jme, JME_WFOI, 0);
4330c2f2
GFT
176 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
177 jwrite32(jme, JME_GPREG1, 0);
d7699f87
GFT
178}
179
3bf61c55
GFT
180__always_inline static void
181jme_clear_pm(struct jme_adapter *jme)
d7699f87
GFT
182{
183 jwrite32(jme, JME_PMCS, 0xFFFF0000);
4330c2f2 184 pci_set_power_state(jme->pdev, PCI_D0);
d7699f87
GFT
185}
186
3bf61c55
GFT
187static int
188jme_reload_eeprom(struct jme_adapter *jme)
d7699f87
GFT
189{
190 __u32 val;
191 int i;
192
193 val = jread32(jme, JME_SMBCSR);
194
195 if(val & SMBCSR_EEPROMD)
196 {
197 val |= SMBCSR_CNACK;
198 jwrite32(jme, JME_SMBCSR, val);
199 val |= SMBCSR_RELOAD;
200 jwrite32(jme, JME_SMBCSR, val);
201 mdelay(12);
202
203 for (i = JME_SMB_TIMEOUT; i > 0; --i)
204 {
205 mdelay(1);
206 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
207 break;
208 }
209
210 if(i == 0) {
4330c2f2 211 jeprintk(jme->dev->name, "eeprom reload timeout\n");
d7699f87
GFT
212 return -EIO;
213 }
214 }
215 else
216 return -EIO;
3bf61c55 217
d7699f87
GFT
218 return 0;
219}
220
3bf61c55
GFT
221static void
222jme_load_macaddr(struct net_device *netdev)
d7699f87
GFT
223{
224 struct jme_adapter *jme = netdev_priv(netdev);
225 unsigned char macaddr[6];
226 __u32 val;
227
fcf45b4c 228 spin_lock(&jme->macaddr_lock);
4330c2f2 229 val = jread32(jme, JME_RXUMA_LO);
d7699f87
GFT
230 macaddr[0] = (val >> 0) & 0xFF;
231 macaddr[1] = (val >> 8) & 0xFF;
232 macaddr[2] = (val >> 16) & 0xFF;
233 macaddr[3] = (val >> 24) & 0xFF;
4330c2f2 234 val = jread32(jme, JME_RXUMA_HI);
d7699f87
GFT
235 macaddr[4] = (val >> 0) & 0xFF;
236 macaddr[5] = (val >> 8) & 0xFF;
237 memcpy(netdev->dev_addr, macaddr, 6);
fcf45b4c 238 spin_unlock(&jme->macaddr_lock);
3bf61c55
GFT
239}
240
fcf45b4c 241__always_inline static void
3bf61c55
GFT
242jme_set_rx_pcc(struct jme_adapter *jme, int p)
243{
244 switch(p) {
245 case PCC_P1:
246 jwrite32(jme, JME_PCCRX0,
247 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
248 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
249 break;
250 case PCC_P2:
251 jwrite32(jme, JME_PCCRX0,
252 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
253 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
254 break;
255 case PCC_P3:
256 jwrite32(jme, JME_PCCRX0,
257 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
258 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
259 break;
260 default:
261 break;
262 }
263
264 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
d7699f87
GFT
265}
266
fcf45b4c 267static void
3bf61c55 268jme_start_irq(struct jme_adapter *jme)
d7699f87 269{
3bf61c55
GFT
270 register struct dynpcc_info *dpi = &(jme->dpi);
271
272 jme_set_rx_pcc(jme, PCC_P1);
3bf61c55
GFT
273 dpi->cur = PCC_P1;
274 dpi->attempt = PCC_P1;
275 dpi->cnt = 0;
276
277 jwrite32(jme, JME_PCCTX,
8c198884
GFT
278 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
279 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
3bf61c55
GFT
280 PCCTXQ0_EN
281 );
282
d7699f87
GFT
283 /*
284 * Enable Interrupts
285 */
286 jwrite32(jme, JME_IENS, INTR_ENABLE);
287}
288
3bf61c55
GFT
289__always_inline static void
290jme_stop_irq(struct jme_adapter *jme)
d7699f87
GFT
291{
292 /*
293 * Disable Interrupts
294 */
295 jwrite32(jme, JME_IENC, INTR_ENABLE);
296}
297
4330c2f2 298
3bf61c55
GFT
299__always_inline static void
300jme_enable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
301{
302 jwrite32(jme,
303 JME_SHBA_LO,
304 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
305}
306
3bf61c55
GFT
307__always_inline static void
308jme_disable_shadow(struct jme_adapter *jme)
4330c2f2
GFT
309{
310 jwrite32(jme, JME_SHBA_LO, 0x0);
311}
312
fcf45b4c
GFT
313static int
314jme_check_link(struct net_device *netdev, int testonly)
d7699f87
GFT
315{
316 struct jme_adapter *jme = netdev_priv(netdev);
8c198884 317 __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
79ce639c 318 char linkmsg[64];
fcf45b4c 319 int rc = 0;
d7699f87
GFT
320
321 phylink = jread32(jme, JME_PHY_LINK);
322
323 if (phylink & PHY_LINK_UP) {
8c198884
GFT
324 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
325 /*
326 * If we did not enable AN
327 * Speed/Duplex Info should be obtained from SMI
328 */
329 phylink = PHY_LINK_UP;
330
331 bmcr = jme_mdio_read(jme->dev,
332 jme->mii_if.phy_id,
333 MII_BMCR);
334
79ce639c 335
8c198884
GFT
336 phylink |= ((bmcr & BMCR_SPEED1000) &&
337 (bmcr & BMCR_SPEED100) == 0) ?
338 PHY_LINK_SPEED_1000M :
339 (bmcr & BMCR_SPEED100) ?
340 PHY_LINK_SPEED_100M :
341 PHY_LINK_SPEED_10M;
342
343 phylink |= (bmcr & BMCR_FULLDPLX) ?
344 PHY_LINK_DUPLEX : 0;
79ce639c
GFT
345
346 strcpy(linkmsg, "Forced: ");
8c198884
GFT
347 }
348 else {
349 /*
350 * Keep polling for speed/duplex resolve complete
351 */
352 while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
353 --cnt) {
354
355 udelay(1);
356 phylink = jread32(jme, JME_PHY_LINK);
357
358 }
359
360 if(!cnt)
361 jeprintk(netdev->name,
362 "Waiting speed resolve timeout.\n");
79ce639c
GFT
363
364 strcpy(linkmsg, "ANed: ");
d7699f87
GFT
365 }
366
fcf45b4c
GFT
367 if(jme->phylink == phylink) {
368 rc = 1;
369 goto out;
370 }
371 if(testonly)
372 goto out;
373
374 jme->phylink = phylink;
375
d7699f87
GFT
376 switch(phylink & PHY_LINK_SPEED_MASK) {
377 case PHY_LINK_SPEED_10M:
378 ghc = GHC_SPEED_10M;
379 strcpy(linkmsg, "10 Mbps, ");
380 break;
381 case PHY_LINK_SPEED_100M:
382 ghc = GHC_SPEED_100M;
383 strcpy(linkmsg, "100 Mbps, ");
384 break;
385 case PHY_LINK_SPEED_1000M:
386 ghc = GHC_SPEED_1000M;
387 strcpy(linkmsg, "1000 Mbps, ");
388 break;
389 default:
390 ghc = 0;
391 break;
392 }
393 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
fcf45b4c 394
d7699f87 395 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
fcf45b4c
GFT
396 "Full-Duplex, " :
397 "Half-Duplex, ");
398
399 if(phylink & PHY_LINK_MDI_STAT)
fcf45b4c 400 strcat(linkmsg, "MDI-X");
8c198884
GFT
401 else
402 strcat(linkmsg, "MDI");
d7699f87
GFT
403
404 if(phylink & PHY_LINK_DUPLEX)
405 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
8c198884 406 else {
d7699f87 407 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
3bf61c55
GFT
408 TXMCS_BACKOFF |
409 TXMCS_CARRIERSENSE |
410 TXMCS_COLLISION);
8c198884
GFT
411 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
412 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
413 TXTRHD_TXREN |
414 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
415 }
d7699f87 416
fcf45b4c
GFT
417 jme->reg_ghc = ghc;
418 jwrite32(jme, JME_GHC, ghc);
419
4330c2f2 420 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
d7699f87
GFT
421 netif_carrier_on(netdev);
422 }
423 else {
fcf45b4c
GFT
424 if(testonly)
425 goto out;
426
4330c2f2 427 jprintk(netdev->name, "Link is down.\n");
fcf45b4c 428 jme->phylink = 0;
d7699f87
GFT
429 netif_carrier_off(netdev);
430 }
fcf45b4c
GFT
431
432out:
433 return rc;
d7699f87
GFT
434}
435
3bf61c55
GFT
436
437static int
438jme_alloc_txdesc(struct jme_adapter *jme,
439 int nr_alloc)
4330c2f2 440{
3bf61c55
GFT
441 struct jme_ring *txring = jme->txring;
442 int idx;
443
444 idx = txring->next_to_use;
445
79ce639c 446 if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
3bf61c55
GFT
447 return -1;
448
79ce639c 449 atomic_sub(nr_alloc, &txring->nr_free);
3bf61c55
GFT
450
451 if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
452 txring->next_to_use -= RING_DESC_NR;
3bf61c55
GFT
453
454 return idx;
4330c2f2
GFT
455}
456
79ce639c
GFT
457static void
458jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
459{
460 if(skb->ip_summed == CHECKSUM_PARTIAL) {
461 __u8 ip_proto;
462
463 switch (skb->protocol) {
464 case __constant_htons(ETH_P_IP):
465 ip_proto = ip_hdr(skb)->protocol;
466 break;
467 case __constant_htons(ETH_P_IPV6):
468 ip_proto = ipv6_hdr(skb)->nexthdr;
469 break;
470 default:
471 ip_proto = 0;
472 break;
473 }
474
475
476 switch(ip_proto) {
477 case IPPROTO_TCP:
478 *flags |= TXFLAG_TCPCS;
479 break;
480 case IPPROTO_UDP:
481 *flags |= TXFLAG_UDPCS;
482 break;
483 default:
484 jeprintk("jme", "Error upper layer protocol.\n");
485 break;
486 }
487 }
488}
489
3bf61c55
GFT
490static int
491jme_set_new_txdesc(struct jme_adapter *jme,
492 struct sk_buff *skb)
d7699f87
GFT
493{
494 struct jme_ring *txring = jme->txring;
3bf61c55
GFT
495 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
496 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
4330c2f2 497 dma_addr_t dmaaddr;
3bf61c55 498 int i, idx, nr_desc;
8c198884 499 __u8 flags;
3bf61c55
GFT
500
501 nr_desc = 2;
502 idx = jme_alloc_txdesc(jme, nr_desc);
503
504 if(unlikely(idx<0))
505 return NETDEV_TX_BUSY;
506
507 for(i = 1 ; i < nr_desc ; ++i) {
508 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
509 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
4330c2f2 510
3bf61c55
GFT
511 dmaaddr = pci_map_single(jme->pdev,
512 skb->data,
513 skb->len,
514 PCI_DMA_TODEVICE);
515
516 pci_dma_sync_single_for_device(jme->pdev,
517 dmaaddr,
518 skb->len,
519 PCI_DMA_TODEVICE);
520
521 ctxdesc->dw[0] = 0;
522 ctxdesc->dw[1] = 0;
523 ctxdesc->desc2.flags = TXFLAG_OWN;
524 if(jme->dev->features & NETIF_F_HIGHDMA)
525 ctxdesc->desc2.flags |= TXFLAG_64BIT;
526 ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
527 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
fcf45b4c
GFT
528 ctxdesc->desc2.bufaddrl = cpu_to_le32(
529 (__u64)dmaaddr & 0xFFFFFFFFUL);
3bf61c55
GFT
530
531 ctxbi->mapping = dmaaddr;
532 ctxbi->len = skb->len;
533 }
534
535 ctxdesc = txdesc + idx;
536 ctxbi = txbi + idx;
537
538 ctxdesc->dw[0] = 0;
539 ctxdesc->dw[1] = 0;
540 ctxdesc->dw[2] = 0;
541 ctxdesc->dw[3] = 0;
542 ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
d7699f87
GFT
543 /*
544 * Set OWN bit at final.
3bf61c55
GFT
545 * When kernel transmit faster than NIC.
546 * And NIC trying to send this descriptor before we tell
d7699f87
GFT
547 * it to start sending this TX queue.
548 * Other fields are already filled correctly.
549 */
550 wmb();
8c198884 551 flags = TXFLAG_OWN | TXFLAG_INT;
79ce639c 552 jme_tx_csum(skb, jme->dev->mtu, &flags);
8c198884 553 ctxdesc->desc1.flags = flags;
3bf61c55
GFT
554 /*
555 * Set tx buffer info after telling NIC to send
556 * For better tx_clean timing
557 */
558 wmb();
559 ctxbi->nr_desc = nr_desc;
560 ctxbi->skb = skb;
561
562 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
d7699f87 563
3bf61c55 564 return 0;
d7699f87
GFT
565}
566
567
3bf61c55
GFT
568static int
569jme_setup_tx_resources(struct jme_adapter *jme)
d7699f87 570{
d7699f87
GFT
571 struct jme_ring *txring = &(jme->txring[0]);
572
573 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
574 TX_RING_ALLOC_SIZE,
3bf61c55 575 &(txring->dmaalloc),
fcf45b4c
GFT
576 GFP_ATOMIC);
577
4330c2f2
GFT
578 if(!txring->alloc) {
579 txring->desc = NULL;
580 txring->dmaalloc = 0;
581 txring->dma = 0;
d7699f87 582 return -ENOMEM;
4330c2f2 583 }
d7699f87
GFT
584
585 /*
586 * 16 Bytes align
587 */
3bf61c55
GFT
588 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
589 RING_DESC_ALIGN);
4330c2f2 590 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
591 txring->next_to_use = 0;
592 txring->next_to_clean = 0;
79ce639c 593 atomic_set(&txring->nr_free, RING_DESC_NR);
d7699f87
GFT
594
595 /*
596 * Initiallize Transmit Descriptors
597 */
598 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
3bf61c55
GFT
599 memset(txring->bufinf, 0,
600 sizeof(struct jme_buffer_info) * RING_DESC_NR);
d7699f87
GFT
601
602 return 0;
603}
604
3bf61c55
GFT
605static void
606jme_free_tx_resources(struct jme_adapter *jme)
d7699f87
GFT
607{
608 int i;
609 struct jme_ring *txring = &(jme->txring[0]);
4330c2f2 610 struct jme_buffer_info *txbi = txring->bufinf;
d7699f87
GFT
611
612 if(txring->alloc) {
3bf61c55 613 for(i = 0 ; i < RING_DESC_NR ; ++i) {
4330c2f2
GFT
614 txbi = txring->bufinf + i;
615 if(txbi->skb) {
616 dev_kfree_skb(txbi->skb);
617 txbi->skb = NULL;
d7699f87 618 }
3bf61c55
GFT
619 txbi->mapping = 0;
620 txbi->len = 0;
621 txbi->nr_desc = 0;
d7699f87
GFT
622 }
623
624 dma_free_coherent(&(jme->pdev->dev),
625 TX_RING_ALLOC_SIZE,
626 txring->alloc,
627 txring->dmaalloc);
3bf61c55
GFT
628
629 txring->alloc = NULL;
630 txring->desc = NULL;
631 txring->dmaalloc = 0;
632 txring->dma = 0;
d7699f87 633 }
3bf61c55
GFT
634 txring->next_to_use = 0;
635 txring->next_to_clean = 0;
79ce639c 636 atomic_set(&txring->nr_free, 0);
d7699f87
GFT
637
638}
639
3bf61c55
GFT
640__always_inline static void
641jme_enable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
642{
643 /*
644 * Select Queue 0
645 */
646 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
647
648 /*
649 * Setup TX Queue 0 DMA Bass Address
650 */
fcf45b4c 651 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
3bf61c55 652 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
fcf45b4c 653 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
654
655 /*
656 * Setup TX Descptor Count
657 */
658 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
659
660 /*
661 * Enable TX Engine
662 */
663 wmb();
4330c2f2
GFT
664 jwrite32(jme, JME_TXCS, jme->reg_txcs |
665 TXCS_SELECT_QUEUE0 |
666 TXCS_ENABLE);
d7699f87
GFT
667
668}
669
3bf61c55
GFT
670__always_inline static void
671jme_disable_tx_engine(struct jme_adapter *jme)
d7699f87
GFT
672{
673 int i;
674 __u32 val;
675
676 /*
677 * Disable TX Engine
678 */
fcf45b4c 679 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
d7699f87
GFT
680
681 val = jread32(jme, JME_TXCS);
682 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
683 {
fcf45b4c 684 mdelay(1);
d7699f87
GFT
685 val = jread32(jme, JME_TXCS);
686 }
687
8c198884 688 if(!i) {
4330c2f2 689 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
8c198884
GFT
690 jme_reset_mac_processor(jme);
691 }
d7699f87
GFT
692
693
694}
695
3bf61c55
GFT
696static void
697jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
d7699f87
GFT
698{
699 struct jme_ring *rxring = jme->rxring;
3bf61c55 700 register volatile struct rxdesc* rxdesc = rxring->desc;
4330c2f2
GFT
701 struct jme_buffer_info *rxbi = rxring->bufinf;
702 rxdesc += i;
703 rxbi += i;
704
705 rxdesc->dw[0] = 0;
706 rxdesc->dw[1] = 0;
3bf61c55 707 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
fcf45b4c
GFT
708 rxdesc->desc1.bufaddrl = cpu_to_le32(
709 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
3bf61c55
GFT
710 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
711 if(jme->dev->features & NETIF_F_HIGHDMA)
712 rxdesc->desc1.flags = RXFLAG_64BIT;
d7699f87 713 wmb();
3bf61c55 714 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
d7699f87
GFT
715}
716
3bf61c55
GFT
717static int
718jme_make_new_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
719{
720 struct jme_ring *rxring = &(jme->rxring[0]);
721 struct jme_buffer_info *rxbi = rxring->bufinf;
722 unsigned long offset;
723 struct sk_buff* skb;
724
79ce639c
GFT
725 skb = netdev_alloc_skb(jme->dev,
726 jme->dev->mtu + RX_EXTRA_LEN);
4330c2f2
GFT
727 if(unlikely(!skb))
728 return -ENOMEM;
3bf61c55
GFT
729
730 if(unlikely(skb_is_nonlinear(skb))) {
731 dprintk(jme->dev->name,
732 "Allocated skb fragged(%d).\n",
733 skb_shinfo(skb)->nr_frags);
4330c2f2
GFT
734 dev_kfree_skb(skb);
735 return -ENOMEM;
736 }
737
3bf61c55
GFT
738 if(unlikely(offset =
739 (unsigned long)(skb->data)
79ce639c 740 & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
4330c2f2 741 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
4330c2f2
GFT
742
743 rxbi += i;
744 rxbi->skb = skb;
3bf61c55 745 rxbi->len = skb_tailroom(skb);
4330c2f2
GFT
746 rxbi->mapping = pci_map_single(jme->pdev,
747 skb->data,
3bf61c55 748 rxbi->len,
4330c2f2
GFT
749 PCI_DMA_FROMDEVICE);
750
751 return 0;
752}
753
3bf61c55
GFT
754static void
755jme_free_rx_buf(struct jme_adapter *jme, int i)
4330c2f2
GFT
756{
757 struct jme_ring *rxring = &(jme->rxring[0]);
758 struct jme_buffer_info *rxbi = rxring->bufinf;
759 rxbi += i;
760
761 if(rxbi->skb) {
762 pci_unmap_single(jme->pdev,
763 rxbi->mapping,
3bf61c55 764 rxbi->len,
4330c2f2
GFT
765 PCI_DMA_FROMDEVICE);
766 dev_kfree_skb(rxbi->skb);
767 rxbi->skb = NULL;
768 rxbi->mapping = 0;
3bf61c55 769 rxbi->len = 0;
4330c2f2
GFT
770 }
771}
772
3bf61c55
GFT
773static void
774jme_free_rx_resources(struct jme_adapter *jme)
775{
776 int i;
777 struct jme_ring *rxring = &(jme->rxring[0]);
778
779 if(rxring->alloc) {
780 for(i = 0 ; i < RING_DESC_NR ; ++i)
781 jme_free_rx_buf(jme, i);
782
783 dma_free_coherent(&(jme->pdev->dev),
784 RX_RING_ALLOC_SIZE,
785 rxring->alloc,
786 rxring->dmaalloc);
787 rxring->alloc = NULL;
788 rxring->desc = NULL;
789 rxring->dmaalloc = 0;
790 rxring->dma = 0;
791 }
792 rxring->next_to_use = 0;
793 rxring->next_to_clean = 0;
794}
795
796static int
797jme_setup_rx_resources(struct jme_adapter *jme)
d7699f87
GFT
798{
799 int i;
800 struct jme_ring *rxring = &(jme->rxring[0]);
801
802 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
803 RX_RING_ALLOC_SIZE,
3bf61c55 804 &(rxring->dmaalloc),
fcf45b4c 805 GFP_ATOMIC);
4330c2f2
GFT
806 if(!rxring->alloc) {
807 rxring->desc = NULL;
808 rxring->dmaalloc = 0;
809 rxring->dma = 0;
d7699f87 810 return -ENOMEM;
4330c2f2 811 }
d7699f87
GFT
812
813 /*
814 * 16 Bytes align
815 */
3bf61c55
GFT
816 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
817 RING_DESC_ALIGN);
4330c2f2 818 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
d7699f87
GFT
819 rxring->next_to_use = 0;
820 rxring->next_to_clean = 0;
821
d7699f87
GFT
822 /*
823 * Initiallize Receive Descriptors
824 */
825 for(i = 0 ; i < RING_DESC_NR ; ++i) {
3bf61c55
GFT
826 if(unlikely(jme_make_new_rx_buf(jme, i))) {
827 jme_free_rx_resources(jme);
828 return -ENOMEM;
829 }
d7699f87
GFT
830
831 jme_set_clean_rxdesc(jme, i);
832 }
833
d7699f87
GFT
834 return 0;
835}
836
3bf61c55
GFT
837__always_inline static void
838jme_enable_rx_engine(struct jme_adapter *jme)
d7699f87 839{
d7699f87
GFT
840 /*
841 * Setup RX DMA Bass Address
842 */
fcf45b4c 843 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
3bf61c55 844 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
fcf45b4c 845 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
d7699f87
GFT
846
847 /*
848 * Setup RX Descptor Count
849 */
850 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
851
3bf61c55 852 /*
d7699f87
GFT
853 * Setup Unicast Filter
854 */
855 jme_set_multi(jme->dev);
856
857 /*
858 * Enable RX Engine
859 */
860 wmb();
79ce639c 861 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
862 RXCS_QUEUESEL_Q0 |
863 RXCS_ENABLE |
864 RXCS_QST);
d7699f87
GFT
865}
866
3bf61c55
GFT
867__always_inline static void
868jme_restart_rx_engine(struct jme_adapter *jme)
4330c2f2
GFT
869{
870 /*
3bf61c55 871 * Start RX Engine
4330c2f2 872 */
79ce639c 873 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
4330c2f2
GFT
874 RXCS_QUEUESEL_Q0 |
875 RXCS_ENABLE |
876 RXCS_QST);
877}
878
879
3bf61c55
GFT
880__always_inline static void
881jme_disable_rx_engine(struct jme_adapter *jme)
d7699f87
GFT
882{
883 int i;
884 __u32 val;
885
886 /*
887 * Disable RX Engine
888 */
889 val = jread32(jme, JME_RXCS);
890 val &= ~RXCS_ENABLE;
891 jwrite32(jme, JME_RXCS, val);
892
893 val = jread32(jme, JME_RXCS);
894 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
895 {
fcf45b4c 896 mdelay(100);
d7699f87
GFT
897 val = jread32(jme, JME_RXCS);
898 }
899
900 if(!i)
4330c2f2 901 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
d7699f87
GFT
902
903}
904
3bf61c55 905static void
79ce639c 906jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx, int summed)
d7699f87 907{
d7699f87 908 struct jme_ring *rxring = &(jme->rxring[0]);
3bf61c55
GFT
909 volatile struct rxdesc *rxdesc = rxring->desc;
910 struct jme_buffer_info *rxbi = rxring->bufinf;
d7699f87 911 struct sk_buff *skb;
3bf61c55 912 int framesize;
d7699f87 913
3bf61c55
GFT
914 rxdesc += idx;
915 rxbi += idx;
d7699f87 916
3bf61c55
GFT
917 skb = rxbi->skb;
918 pci_dma_sync_single_for_cpu(jme->pdev,
919 rxbi->mapping,
920 rxbi->len,
921 PCI_DMA_FROMDEVICE);
922
923 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
924 pci_dma_sync_single_for_device(jme->pdev,
925 rxbi->mapping,
926 rxbi->len,
927 PCI_DMA_FROMDEVICE);
928
929 ++(NET_STAT(jme).rx_dropped);
930 }
931 else {
932 framesize = le16_to_cpu(rxdesc->descwb.framesize)
933 - RX_PREPAD_SIZE;
934
935 skb_reserve(skb, RX_PREPAD_SIZE);
936 skb_put(skb, framesize);
937 skb->protocol = eth_type_trans(skb, jme->dev);
938
79ce639c 939 if(summed)
8c198884
GFT
940 skb->ip_summed = CHECKSUM_UNNECESSARY;
941
3bf61c55
GFT
942 netif_rx(skb);
943
944 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
945 ++(NET_STAT(jme).multicast);
946
947 jme->dev->last_rx = jiffies;
948 NET_STAT(jme).rx_bytes += framesize;
949 ++(NET_STAT(jme).rx_packets);
950 }
951
952 jme_set_clean_rxdesc(jme, idx);
953
954}
955
8c198884
GFT
956static int
957jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
958{
79ce639c
GFT
959 if(unlikely((flags & RXWBFLAG_TCPON) &&
960 !(flags & RXWBFLAG_TCPCS))) {
961 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
962 return 1;
963 }
964 else if(unlikely((flags & RXWBFLAG_UDPON) &&
965 !(flags & RXWBFLAG_UDPCS))) {
966 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
967 return 1;
968 }
969 else if(unlikely((flags & RXWBFLAG_IPV4) &&
970 !(flags & RXWBFLAG_IPCS))) {
971 csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
972 return 1;
8c198884
GFT
973 }
974 else {
975 return 0;
976 }
977}
978
3bf61c55
GFT
979static int
980jme_process_receive(struct jme_adapter *jme, int limit)
981{
982 struct jme_ring *rxring = &(jme->rxring[0]);
983 volatile struct rxdesc *rxdesc = rxring->desc;
984 int i, j, ccnt, desccnt;
985
986 i = rxring->next_to_clean;
987 while( limit-- > 0 )
d7699f87 988 {
3bf61c55
GFT
989 rxdesc = rxring->desc;
990 rxdesc += i;
991
4330c2f2 992 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
3bf61c55
GFT
993 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
994 goto out;
d7699f87 995
4330c2f2
GFT
996 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
997
3bf61c55 998 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
4330c2f2 999
8c198884
GFT
1000 if(unlikely(desccnt > 1 ||
1001 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
1002 jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
d7699f87 1003
3bf61c55
GFT
1004 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
1005 ++(NET_STAT(jme).rx_crc_errors);
1006 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
1007 ++(NET_STAT(jme).rx_fifo_errors);
1008 else
1009 ++(NET_STAT(jme).rx_errors);
4330c2f2 1010
79ce639c
GFT
1011 if(desccnt > 1) {
1012 rx_dbg(jme->dev->name,
1013 "RX: More than one(%d) descriptor, "
1014 "framelen=%d\n",
1015 desccnt, le16_to_cpu(rxdesc->descwb.framesize));
3bf61c55 1016 limit -= desccnt - 1;
79ce639c 1017 }
4330c2f2 1018
3bf61c55 1019 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
4330c2f2
GFT
1020 jme_set_clean_rxdesc(jme, j);
1021
1022 if(unlikely(++j == RING_DESC_NR))
1023 j = 0;
1024 }
3bf61c55 1025
d7699f87
GFT
1026 }
1027 else {
79ce639c
GFT
1028 jme_alloc_and_feed_skb(jme, i,
1029 (rxdesc->descwb.flags &
1030 (RXWBFLAG_TCPON |
1031 RXWBFLAG_UDPON |
1032 RXWBFLAG_IPV4)));
3bf61c55 1033 }
4330c2f2 1034
3bf61c55
GFT
1035 if((i += desccnt) >= RING_DESC_NR)
1036 i -= RING_DESC_NR;
1037 }
4330c2f2 1038
3bf61c55
GFT
1039out:
1040 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
1041 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
1042 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
1043 >> 4);
4330c2f2 1044
3bf61c55 1045 rxring->next_to_clean = i;
4330c2f2 1046
3bf61c55 1047 return limit > 0 ? limit : 0;
4330c2f2 1048
3bf61c55 1049}
d7699f87 1050
79ce639c
GFT
1051static void
1052jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1053{
1054 if(likely(atmp == dpi->cur))
1055 return;
1056
1057 if(dpi->attempt == atmp) {
1058 ++(dpi->cnt);
1059 }
1060 else {
1061 dpi->attempt = atmp;
1062 dpi->cnt = 0;
1063 }
1064
1065}
1066
1067static void
1068jme_dynamic_pcc(struct jme_adapter *jme)
1069{
1070 register struct dynpcc_info *dpi = &(jme->dpi);
1071
1072 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1073 jme_attempt_pcc(dpi, PCC_P3);
1074 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
1075 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1076 jme_attempt_pcc(dpi, PCC_P2);
1077 else
1078 jme_attempt_pcc(dpi, PCC_P1);
1079
1080 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
1081 jme_set_rx_pcc(jme, dpi->attempt);
1082 dpi->cur = dpi->attempt;
1083 dpi->cnt = 0;
1084 }
1085}
1086
1087static void
1088jme_start_pcc_timer(struct jme_adapter *jme)
1089{
1090 struct dynpcc_info *dpi = &(jme->dpi);
1091 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1092 dpi->last_pkts = NET_STAT(jme).rx_packets;
1093 dpi->intr_cnt = 0;
1094 jwrite32(jme, JME_TMCSR,
1095 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1096}
1097
1098static void
1099jme_pcc_tasklet(unsigned long arg)
1100{
1101 struct jme_adapter *jme = (struct jme_adapter*)arg;
1102 struct net_device *netdev = jme->dev;
1103
1104 if(netif_queue_stopped(netdev)) {
1105 jwrite32(jme, JME_TMCSR, 0);
1106 return;
1107 }
1108 jme_dynamic_pcc(jme);
1109 jme_start_pcc_timer(jme);
1110}
1111
3bf61c55
GFT
1112static void
1113jme_link_change_tasklet(unsigned long arg)
1114{
1115 struct jme_adapter *jme = (struct jme_adapter*)arg;
fcf45b4c
GFT
1116 struct net_device *netdev = jme->dev;
1117 int timeout = WAIT_TASKLET_TIMEOUT;
1118 int rc;
1119
1120 if(!atomic_dec_and_test(&jme->link_changing))
1121 goto out;
1122
79ce639c 1123 if(jme_check_link(netdev, 1) && jme->oldmtu == netdev->mtu)
fcf45b4c
GFT
1124 goto out;
1125
79ce639c 1126 jme->oldmtu = netdev->mtu;
fcf45b4c
GFT
1127 netif_stop_queue(netdev);
1128
1129 while(--timeout > 0 &&
1130 (
1131 atomic_read(&jme->rx_cleaning) != 1 ||
1132 atomic_read(&jme->tx_cleaning) != 1
1133 )) {
1134
1135 mdelay(1);
1136 }
1137
1138 if(netif_carrier_ok(netdev)) {
1139 jme_reset_mac_processor(jme);
1140 jme_free_rx_resources(jme);
1141 jme_free_tx_resources(jme);
1142 }
1143
1144 jme_check_link(netdev, 0);
1145 if(netif_carrier_ok(netdev)) {
1146 rc = jme_setup_rx_resources(jme);
1147 if(rc) {
1148 jeprintk(netdev->name,
1149 "Allocating resources for RX error"
1150 ", Device STOPPED!\n");
1151 goto out;
1152 }
1153
1154
1155 rc = jme_setup_tx_resources(jme);
1156 if(rc) {
1157 jeprintk(netdev->name,
1158 "Allocating resources for TX error"
1159 ", Device STOPPED!\n");
1160 goto err_out_free_rx_resources;
1161 }
1162
1163 jme_enable_rx_engine(jme);
1164 jme_enable_tx_engine(jme);
1165
1166 netif_start_queue(netdev);
79ce639c 1167 jme_start_pcc_timer(jme);
fcf45b4c
GFT
1168 }
1169
1170 goto out;
1171
1172err_out_free_rx_resources:
1173 jme_free_rx_resources(jme);
1174out:
1175 atomic_inc(&jme->link_changing);
3bf61c55 1176}
d7699f87 1177
3bf61c55
GFT
1178static void
1179jme_rx_clean_tasklet(unsigned long arg)
1180{
1181 struct jme_adapter *jme = (struct jme_adapter*)arg;
79ce639c 1182 struct dynpcc_info *dpi = &(jme->dpi);
d7699f87 1183
79ce639c 1184 if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
fcf45b4c
GFT
1185 goto out;
1186
79ce639c 1187 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1188 goto out;
1189
1190 if(unlikely(netif_queue_stopped(jme->dev)))
1191 goto out;
1192
3bf61c55 1193 jme_process_receive(jme, RING_DESC_NR);
79ce639c 1194 ++(dpi->intr_cnt);
fcf45b4c
GFT
1195
1196out:
1197 atomic_inc(&jme->rx_cleaning);
1198}
1199
1200static void
1201jme_rx_empty_tasklet(unsigned long arg)
1202{
1203 struct jme_adapter *jme = (struct jme_adapter*)arg;
1204
79ce639c 1205 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1206 return;
1207
1208 if(unlikely(netif_queue_stopped(jme->dev)))
1209 return;
1210
1211 jme_rx_clean_tasklet(arg);
1212 jme_restart_rx_engine(jme);
4330c2f2
GFT
1213}
1214
3bf61c55
GFT
1215static void
1216jme_tx_clean_tasklet(unsigned long arg)
4330c2f2
GFT
1217{
1218 struct jme_adapter *jme = (struct jme_adapter*)arg;
3bf61c55
GFT
1219 struct jme_ring *txring = &(jme->txring[0]);
1220 volatile struct txdesc *txdesc = txring->desc;
1221 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
8c198884 1222 int i, j, cnt = 0, max, err;
3bf61c55 1223
79ce639c 1224 if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
fcf45b4c
GFT
1225 goto out;
1226
79ce639c 1227 if(unlikely(atomic_read(&jme->link_changing) != 1))
fcf45b4c
GFT
1228 goto out;
1229
1230 if(unlikely(netif_queue_stopped(jme->dev)))
1231 goto out;
1232
79ce639c 1233 max = RING_DESC_NR - atomic_read(&txring->nr_free);
3bf61c55
GFT
1234
1235 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1236
1237 for(i = txring->next_to_clean ; cnt < max ; ) {
1238
1239 ctxbi = txbi + i;
1240
8c198884
GFT
1241 if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
1242
1243 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
3bf61c55
GFT
1244
1245 tx_dbg(jme->dev->name,
1246 "Tx Tasklet: Clean %d+%d\n",
1247 i, ctxbi->nr_desc);
1248
1249 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1250 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1251 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1252
1253 pci_unmap_single(jme->pdev,
1254 ttxbi->mapping,
1255 ttxbi->len,
1256 PCI_DMA_TODEVICE);
1257
8c198884
GFT
1258 if(likely(!err))
1259 NET_STAT(jme).tx_bytes += ttxbi->len;
1260
3bf61c55
GFT
1261 ttxbi->mapping = 0;
1262 ttxbi->len = 0;
1263 }
1264
1265 dev_kfree_skb(ctxbi->skb);
1266 ctxbi->skb = NULL;
1267
1268 cnt += ctxbi->nr_desc;
1269
8c198884
GFT
1270 if(unlikely(err))
1271 ++(NET_STAT(jme).tx_carrier_errors);
1272 else
1273 ++(NET_STAT(jme).tx_packets);
3bf61c55
GFT
1274 }
1275 else {
1276 if(!ctxbi->skb)
1277 tx_dbg(jme->dev->name,
1278 "Tx Tasklet:"
1279 " Stoped due to no skb.\n");
1280 else
1281 tx_dbg(jme->dev->name,
1282 "Tx Tasklet:"
1283 "Stoped due to not done.\n");
1284 break;
1285 }
1286
1287 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1288 i -= RING_DESC_NR;
1289
1290 ctxbi->nr_desc = 0;
d7699f87
GFT
1291 }
1292
3bf61c55
GFT
1293 tx_dbg(jme->dev->name,
1294 "Tx Tasklet: Stop %d Jiffies %lu\n",
1295 i, jiffies);
1296 txring->next_to_clean = i;
1297
79ce639c 1298 atomic_add(cnt, &txring->nr_free);
3bf61c55 1299
fcf45b4c
GFT
1300out:
1301 atomic_inc(&jme->tx_cleaning);
d7699f87
GFT
1302}
1303
79ce639c
GFT
1304static void
1305jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
d7699f87 1306{
3bf61c55
GFT
1307 /*
1308 * Disable interrupt
1309 */
1310 jwrite32f(jme, JME_IENC, INTR_ENABLE);
d7699f87 1311
79ce639c 1312 if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
3bf61c55 1313 tasklet_schedule(&jme->linkch_task);
fcf45b4c
GFT
1314 goto out_deassert;
1315 }
d7699f87 1316
79ce639c
GFT
1317 if(intrstat & INTR_TMINTR)
1318 tasklet_schedule(&jme->pcc_task);
1319
fcf45b4c
GFT
1320 if(intrstat & INTR_RX0EMP)
1321 tasklet_schedule(&jme->rxempty_task);
d7699f87 1322
fcf45b4c 1323 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
4330c2f2 1324 tasklet_schedule(&jme->rxclean_task);
d7699f87 1325
3bf61c55 1326 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
4330c2f2 1327 tasklet_schedule(&jme->txclean_task);
d7699f87 1328
4330c2f2 1329 if((intrstat & ~INTR_ENABLE) != 0) {
3bf61c55
GFT
1330 /*
1331 * Some interrupt not handled
1332 * but not enabled also (for debug)
1333 */
4330c2f2 1334 }
d7699f87 1335
fcf45b4c 1336out_deassert:
d7699f87 1337 /*
4330c2f2 1338 * Deassert interrupts
d7699f87 1339 */
3bf61c55
GFT
1340 jwrite32f(jme, JME_IEVE, intrstat);
1341
1342 /*
fcf45b4c 1343 * Re-enable interrupt
3bf61c55 1344 */
fcf45b4c 1345 jwrite32f(jme, JME_IENS, INTR_ENABLE);
3bf61c55 1346
79ce639c
GFT
1347
1348}
1349
1350static irqreturn_t
1351jme_intr(int irq, void *dev_id)
1352{
1353 struct net_device *netdev = dev_id;
1354 struct jme_adapter *jme = netdev_priv(netdev);
1355 irqreturn_t rc = IRQ_HANDLED;
1356 __u32 intrstat;
1357
1358 intrstat = jread32(jme, JME_IEVE);
1359
1360 /*
1361 * Check if it's really an interrupt for us
1362 */
1363 if(unlikely(intrstat == 0)) {
1364 rc = IRQ_NONE;
1365 goto out;
1366 }
1367
1368 /*
1369 * Check if the device still exist
1370 */
1371 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1372 rc = IRQ_NONE;
1373 goto out;
1374 }
1375
1376 /*
1377 * Allow one interrupt handling at a time
1378 */
1379 if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1380 goto out_inc;
1381
1382 jme_intr_msi(jme, intrstat);
1383
fcf45b4c 1384out_inc:
3bf61c55 1385 /*
fcf45b4c 1386 * Enable next interrupt handling
3bf61c55 1387 */
fcf45b4c 1388 atomic_inc(&jme->intr_sem);
4330c2f2
GFT
1389
1390out:
d7699f87
GFT
1391 return rc;
1392}
1393
79ce639c
GFT
1394static irqreturn_t
1395jme_msi(int irq, void *dev_id)
1396{
1397 struct net_device *netdev = dev_id;
1398 struct jme_adapter *jme = netdev_priv(netdev);
1399 __u32 intrstat;
1400
1401 pci_dma_sync_single_for_cpu(jme->pdev,
1402 jme->shadow_dma,
1403 sizeof(__u32) * SHADOW_REG_NR,
1404 PCI_DMA_FROMDEVICE);
1405 intrstat = jme->shadow_regs[SHADOW_IEVE];
1406 jme->shadow_regs[SHADOW_IEVE] = 0;
1407
1408 jme_intr_msi(jme, intrstat);
1409
1410 return IRQ_HANDLED;
1411}
1412
1413
1414static void
1415jme_reset_link(struct jme_adapter *jme)
1416{
1417 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1418}
1419
fcf45b4c
GFT
1420static void
1421jme_restart_an(struct jme_adapter *jme)
1422{
1423 __u32 bmcr;
79ce639c 1424 unsigned long flags;
fcf45b4c 1425
79ce639c 1426 spin_lock_irqsave(&jme->phy_lock, flags);
fcf45b4c
GFT
1427 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1428 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1429 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
79ce639c
GFT
1430 spin_unlock_irqrestore(&jme->phy_lock, flags);
1431}
1432
1433static int
1434jme_request_irq(struct jme_adapter *jme)
1435{
1436 int rc;
1437 struct net_device *netdev = jme->dev;
1438 irq_handler_t handler = jme_intr;
1439 int irq_flags = IRQF_SHARED;
1440
1441 if (!pci_enable_msi(jme->pdev)) {
1442 jme->flags |= JME_FLAG_MSI;
1443 handler = jme_msi;
1444 irq_flags = 0;
1445 }
1446
1447 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1448 netdev);
1449 if(rc) {
1450 jeprintk(netdev->name,
1451 "Unable to allocate %s interrupt (return: %d)\n",
1452 jme->flags & JME_FLAG_MSI ? "MSI":"INTx",
1453 rc);
1454
1455 if(jme->flags & JME_FLAG_MSI) {
1456 pci_disable_msi(jme->pdev);
1457 jme->flags &= ~JME_FLAG_MSI;
1458 }
1459 }
1460 else {
1461 netdev->irq = jme->pdev->irq;
1462 }
1463
1464 return rc;
1465}
1466
1467static void
1468jme_free_irq(struct jme_adapter *jme)
1469{
1470 free_irq(jme->pdev->irq, jme->dev);
1471 if (jme->flags & JME_FLAG_MSI) {
1472 pci_disable_msi(jme->pdev);
1473 jme->flags &= ~JME_FLAG_MSI;
1474 jme->dev->irq = jme->pdev->irq;
1475 }
fcf45b4c
GFT
1476}
1477
3bf61c55
GFT
1478static int
1479jme_open(struct net_device *netdev)
d7699f87
GFT
1480{
1481 struct jme_adapter *jme = netdev_priv(netdev);
fcf45b4c
GFT
1482 int rc, timeout = 100;
1483
1484 while(
1485 --timeout > 0 &&
1486 (
1487 atomic_read(&jme->link_changing) != 1 ||
1488 atomic_read(&jme->rx_cleaning) != 1 ||
1489 atomic_read(&jme->tx_cleaning) != 1
1490 )
1491 )
1492 msleep(10);
1493
79ce639c
GFT
1494 if(!timeout) {
1495 rc = -EBUSY;
1496 goto err_out;
1497 }
1498
fcf45b4c 1499 jme_reset_mac_processor(jme);
d7699f87 1500
79ce639c
GFT
1501 rc = jme_request_irq(jme);
1502 if(rc)
4330c2f2 1503 goto err_out;
79ce639c 1504
4330c2f2 1505 jme_enable_shadow(jme);
d7699f87 1506 jme_start_irq(jme);
fcf45b4c 1507 jme_restart_an(jme);
d7699f87
GFT
1508
1509 return 0;
1510
d7699f87
GFT
1511err_out:
1512 netif_stop_queue(netdev);
1513 netif_carrier_off(netdev);
4330c2f2 1514 return rc;
d7699f87
GFT
1515}
1516
3bf61c55
GFT
1517static int
1518jme_close(struct net_device *netdev)
d7699f87
GFT
1519{
1520 struct jme_adapter *jme = netdev_priv(netdev);
1521
1522 netif_stop_queue(netdev);
1523 netif_carrier_off(netdev);
1524
1525 jme_stop_irq(jme);
4330c2f2 1526 jme_disable_shadow(jme);
79ce639c 1527 jme_free_irq(jme);
d7699f87 1528
4330c2f2
GFT
1529 tasklet_kill(&jme->linkch_task);
1530 tasklet_kill(&jme->txclean_task);
1531 tasklet_kill(&jme->rxclean_task);
fcf45b4c 1532 tasklet_kill(&jme->rxempty_task);
8c198884
GFT
1533
1534 jme_reset_mac_processor(jme);
d7699f87
GFT
1535 jme_free_rx_resources(jme);
1536 jme_free_tx_resources(jme);
1537
1538 return 0;
1539}
1540
3bf61c55
GFT
1541/*
1542 * This function is already protected by netif_tx_lock()
1543 */
1544static int
1545jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
d7699f87
GFT
1546{
1547 struct jme_adapter *jme = netdev_priv(netdev);
3bf61c55 1548 int rc;
d7699f87 1549
fcf45b4c
GFT
1550 if(unlikely(netif_queue_stopped(jme->dev)))
1551 return NETDEV_TX_BUSY;
1552
79ce639c
GFT
1553#if 0
1554/*Testing*/
1555 ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n",
1556 skb_shinfo(skb)->nr_frags,
1557 skb_headlen(skb),
1558 skb->len,
1559 skb->ip_summed);
1560/*********/
1561#endif
1562
3bf61c55 1563 rc = jme_set_new_txdesc(jme, skb);
d7699f87 1564
3bf61c55
GFT
1565 if(unlikely(rc != NETDEV_TX_OK))
1566 return rc;
d7699f87 1567
4330c2f2
GFT
1568 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1569 TXCS_SELECT_QUEUE0 |
1570 TXCS_QUEUE0S |
1571 TXCS_ENABLE);
d7699f87
GFT
1572 netdev->trans_start = jiffies;
1573
4330c2f2 1574 return NETDEV_TX_OK;
d7699f87
GFT
1575}
1576
3bf61c55
GFT
1577static int
1578jme_set_macaddr(struct net_device *netdev, void *p)
d7699f87
GFT
1579{
1580 struct jme_adapter *jme = netdev_priv(netdev);
1581 struct sockaddr *addr = p;
1582 __u32 val;
1583
1584 if(netif_running(netdev))
1585 return -EBUSY;
1586
fcf45b4c 1587 spin_lock(&jme->macaddr_lock);
d7699f87
GFT
1588 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1589
1590 val = addr->sa_data[3] << 24 |
1591 addr->sa_data[2] << 16 |
1592 addr->sa_data[1] << 8 |
1593 addr->sa_data[0];
4330c2f2 1594 jwrite32(jme, JME_RXUMA_LO, val);
d7699f87
GFT
1595 val = addr->sa_data[5] << 8 |
1596 addr->sa_data[4];
4330c2f2 1597 jwrite32(jme, JME_RXUMA_HI, val);
fcf45b4c 1598 spin_unlock(&jme->macaddr_lock);
d7699f87
GFT
1599
1600 return 0;
1601}
1602
3bf61c55
GFT
1603static void
1604jme_set_multi(struct net_device *netdev)
d7699f87 1605{
3bf61c55 1606 struct jme_adapter *jme = netdev_priv(netdev);
d7699f87 1607 u32 mc_hash[2] = {};
d7699f87 1608 int i;
8c198884 1609 unsigned long flags;
d7699f87 1610
8c198884
GFT
1611 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1612
1613 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
d7699f87 1614
3bf61c55 1615 if (netdev->flags & IFF_PROMISC) {
8c198884 1616 jme->reg_rxmcs |= RXMCS_ALLFRAME;
3bf61c55
GFT
1617 }
1618 else if (netdev->flags & IFF_ALLMULTI) {
8c198884 1619 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
3bf61c55 1620 }
d7699f87 1621 else if(netdev->flags & IFF_MULTICAST) {
3bf61c55
GFT
1622 struct dev_mc_list *mclist;
1623 int bit_nr;
d7699f87 1624
8c198884 1625 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
3bf61c55
GFT
1626 for (i = 0, mclist = netdev->mc_list;
1627 mclist && i < netdev->mc_count;
1628 ++i, mclist = mclist->next) {
1629
d7699f87
GFT
1630 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1631 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
d7699f87
GFT
1632 }
1633
4330c2f2
GFT
1634 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1635 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
d7699f87
GFT
1636 }
1637
d7699f87 1638 wmb();
8c198884
GFT
1639 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1640
1641 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
d7699f87
GFT
1642}
1643
3bf61c55 1644static int
8c198884 1645jme_change_mtu(struct net_device *netdev, int new_mtu)
d7699f87 1646{
79ce639c
GFT
1647 struct jme_adapter *jme = netdev_priv(netdev);
1648
1649 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1650 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
1651 return -EINVAL;
1652
1653 if(new_mtu > 4000) {
1654 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1655 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1656 jme_restart_rx_engine(jme);
1657 }
1658 else {
1659 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1660 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1661 jme_restart_rx_engine(jme);
1662 }
1663
1664 if(new_mtu > 1900) {
1665 netdev->features &= ~NETIF_F_HW_CSUM;
1666 }
1667 else {
1668 netdev->features |= NETIF_F_HW_CSUM;
1669 }
1670
1671 netdev->mtu = new_mtu;
1672 jme_reset_link(jme);
1673
1674 return 0;
d7699f87
GFT
1675}
1676
8c198884
GFT
1677static void
1678jme_tx_timeout(struct net_device *netdev)
1679{
1680 struct jme_adapter *jme = netdev_priv(netdev);
1681
1682 /*
1683 * Reset the link
1684 * And the link change will reinitiallize all RX/TX resources
1685 */
1686 jme_restart_an(jme);
1687}
1688
3bf61c55
GFT
1689static void
1690jme_get_drvinfo(struct net_device *netdev,
1691 struct ethtool_drvinfo *info)
d7699f87
GFT
1692{
1693 struct jme_adapter *jme = netdev_priv(netdev);
1694
1695 strcpy(info->driver, DRV_NAME);
1696 strcpy(info->version, DRV_VERSION);
1697 strcpy(info->bus_info, pci_name(jme->pdev));
1698}
1699
8c198884
GFT
1700static int
1701jme_get_regs_len(struct net_device *netdev)
1702{
1703 return 0x400;
1704}
1705
1706static void
1707mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1708{
1709 int i;
1710
1711 for(i = 0 ; i < len ; i += 4)
79ce639c 1712 p[i >> 2] = jread32(jme, reg + i);
8c198884
GFT
1713
1714}
1715
1716static void
1717jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1718{
1719 struct jme_adapter *jme = netdev_priv(netdev);
1720 __u32 *p32 = (__u32*)p;
1721
1722 memset(p, 0, 0x400);
1723
1724 regs->version = 1;
1725 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1726
1727 p32 += 0x100 >> 2;
1728 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1729
1730 p32 += 0x100 >> 2;
1731 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1732
1733 p32 += 0x100 >> 2;
1734 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
1735
1736}
1737
1738static int
1739jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1740{
1741 struct jme_adapter *jme = netdev_priv(netdev);
1742
1743 ecmd->use_adaptive_rx_coalesce = true;
1744 ecmd->tx_coalesce_usecs = PCC_TX_TO;
1745 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
1746
1747 switch(jme->dpi.cur) {
1748 case PCC_P1:
1749 ecmd->rx_coalesce_usecs = PCC_P1_TO;
1750 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
1751 break;
1752 case PCC_P2:
1753 ecmd->rx_coalesce_usecs = PCC_P2_TO;
1754 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
1755 break;
1756 case PCC_P3:
1757 ecmd->rx_coalesce_usecs = PCC_P3_TO;
1758 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
1759 break;
1760 default:
1761 break;
1762 }
1763
1764 return 0;
1765}
1766
79ce639c
GFT
1767/*
1768 * It's not actually for coalesce.
1769 * It changes internell FIFO related setting for testing.
1770 */
1771static int
1772jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
1773{
1774 struct jme_adapter *jme = netdev_priv(netdev);
1775
1776 if(ecmd->use_adaptive_rx_coalesce &&
1777 ecmd->use_adaptive_tx_coalesce &&
1778 ecmd->rx_coalesce_usecs == 250 &&
1779 (ecmd->rx_max_coalesced_frames_low == 16 ||
1780 ecmd->rx_max_coalesced_frames_low == 32 ||
1781 ecmd->rx_max_coalesced_frames_low == 64 ||
1782 ecmd->rx_max_coalesced_frames_low == 128)) {
1783 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1784 switch(ecmd->rx_max_coalesced_frames_low) {
1785 case 16:
1786 jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
1787 break;
1788 case 32:
1789 jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
1790 break;
1791 case 64:
1792 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1793 break;
1794 case 128:
1795 default:
1796 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1797 }
1798 jme_restart_rx_engine(jme);
1799 }
1800 else {
1801 return -EINVAL;
1802 }
1803
1804 return 0;
1805}
1806
8c198884
GFT
1807static void
1808jme_get_pauseparam(struct net_device *netdev,
1809 struct ethtool_pauseparam *ecmd)
1810{
1811 struct jme_adapter *jme = netdev_priv(netdev);
1812 unsigned long flags;
1813 __u32 val;
1814
1815 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
1816 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
1817
1818 spin_lock_irqsave(&jme->phy_lock, flags);
1819 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1820 spin_unlock_irqrestore(&jme->phy_lock, flags);
1821 ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1822}
1823
1824static int
1825jme_set_pauseparam(struct net_device *netdev,
1826 struct ethtool_pauseparam *ecmd)
1827{
1828 struct jme_adapter *jme = netdev_priv(netdev);
1829 unsigned long flags;
1830 __u32 val;
1831
1832 if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
1833 (ecmd->tx_pause != 0)) {
1834
1835 if(ecmd->tx_pause)
1836 jme->reg_txpfc |= TXPFC_PF_EN;
1837 else
1838 jme->reg_txpfc &= ~TXPFC_PF_EN;
1839
1840 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
1841 }
1842
1843 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1844 if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
1845 (ecmd->rx_pause != 0)) {
1846
1847 if(ecmd->rx_pause)
1848 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
1849 else
1850 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
1851
1852 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1853 }
1854 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1855
1856 spin_lock_irqsave(&jme->phy_lock, flags);
1857 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
1858 if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
1859 (ecmd->autoneg != 0)) {
1860
1861 if(ecmd->autoneg)
1862 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1863 else
1864 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1865
1866 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
1867 }
1868 spin_unlock_irqrestore(&jme->phy_lock, flags);
1869
1870 return 0;
1871}
1872
3bf61c55
GFT
1873static int
1874jme_get_settings(struct net_device *netdev,
1875 struct ethtool_cmd *ecmd)
d7699f87
GFT
1876{
1877 struct jme_adapter *jme = netdev_priv(netdev);
1878 int rc;
79ce639c 1879 unsigned long flags;
8c198884 1880
79ce639c 1881 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 1882 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
79ce639c 1883 spin_unlock_irqrestore(&jme->phy_lock, flags);
d7699f87
GFT
1884 return rc;
1885}
1886
3bf61c55
GFT
1887static int
1888jme_set_settings(struct net_device *netdev,
1889 struct ethtool_cmd *ecmd)
d7699f87
GFT
1890{
1891 struct jme_adapter *jme = netdev_priv(netdev);
79ce639c 1892 int rc, fdc=0;
fcf45b4c
GFT
1893 unsigned long flags;
1894
8c198884
GFT
1895 if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
1896 return -EINVAL;
1897
79ce639c
GFT
1898 if(jme->mii_if.force_media &&
1899 ecmd->autoneg != AUTONEG_ENABLE &&
1900 (jme->mii_if.full_duplex != ecmd->duplex))
1901 fdc = 1;
1902
fcf45b4c 1903 spin_lock_irqsave(&jme->phy_lock, flags);
d7699f87 1904 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
fcf45b4c
GFT
1905 spin_unlock_irqrestore(&jme->phy_lock, flags);
1906
79ce639c
GFT
1907 if(!rc && fdc)
1908 jme_reset_link(jme);
1909
d7699f87
GFT
1910 return rc;
1911}
1912
3bf61c55
GFT
1913static __u32
1914jme_get_link(struct net_device *netdev)
1915{
d7699f87
GFT
1916 struct jme_adapter *jme = netdev_priv(netdev);
1917 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1918}
1919
8c198884
GFT
1920static u32
1921jme_get_rx_csum(struct net_device *netdev)
1922{
1923 struct jme_adapter *jme = netdev_priv(netdev);
1924
1925 return jme->reg_rxmcs & RXMCS_CHECKSUM;
1926}
1927
1928static int
1929jme_set_rx_csum(struct net_device *netdev, u32 on)
1930{
1931 struct jme_adapter *jme = netdev_priv(netdev);
1932 unsigned long flags;
1933
1934 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1935 if(on)
1936 jme->reg_rxmcs |= RXMCS_CHECKSUM;
1937 else
1938 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
1939 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1940 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1941
1942 return 0;
1943}
1944
1945static int
1946jme_set_tx_csum(struct net_device *netdev, u32 on)
1947{
1948 if(on)
1949 netdev->features |= NETIF_F_HW_CSUM;
1950 else
1951 netdev->features &= ~NETIF_F_HW_CSUM;
1952
1953 return 0;
1954}
1955
1956static int
1957jme_nway_reset(struct net_device *netdev)
1958{
1959 struct jme_adapter *jme = netdev_priv(netdev);
1960 jme_restart_an(jme);
1961 return 0;
1962}
1963
d7699f87
GFT
1964static const struct ethtool_ops jme_ethtool_ops = {
1965 .get_drvinfo = jme_get_drvinfo,
8c198884
GFT
1966 .get_regs_len = jme_get_regs_len,
1967 .get_regs = jme_get_regs,
1968 .get_coalesce = jme_get_coalesce,
79ce639c 1969 .set_coalesce = jme_set_coalesce,
8c198884
GFT
1970 .get_pauseparam = jme_get_pauseparam,
1971 .set_pauseparam = jme_set_pauseparam,
d7699f87
GFT
1972 .get_settings = jme_get_settings,
1973 .set_settings = jme_set_settings,
1974 .get_link = jme_get_link,
8c198884
GFT
1975 .get_rx_csum = jme_get_rx_csum,
1976 .set_rx_csum = jme_set_rx_csum,
1977 .set_tx_csum = jme_set_tx_csum,
1978 .nway_reset = jme_nway_reset,
d7699f87
GFT
1979};
1980
3bf61c55
GFT
1981static int
1982jme_pci_dma64(struct pci_dev *pdev)
d7699f87 1983{
3bf61c55
GFT
1984 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1985 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1986 return 1;
1987
8c198884
GFT
1988 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
1989 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
1990 return 1;
1991
3bf61c55
GFT
1992 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1993 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1994 return 0;
1995
1996 return -1;
1997}
1998
1999static int __devinit
2000jme_init_one(struct pci_dev *pdev,
2001 const struct pci_device_id *ent)
2002{
2003 int rc = 0, using_dac;
d7699f87
GFT
2004 struct net_device *netdev;
2005 struct jme_adapter *jme;
d7699f87
GFT
2006
2007 /*
2008 * set up PCI device basics
2009 */
4330c2f2
GFT
2010 rc = pci_enable_device(pdev);
2011 if(rc) {
2012 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2013 goto err_out;
2014 }
d7699f87 2015
3bf61c55
GFT
2016 using_dac = jme_pci_dma64(pdev);
2017 if(using_dac < 0) {
2018 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2019 rc = -EIO;
2020 goto err_out_disable_pdev;
2021 }
2022
4330c2f2
GFT
2023 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2024 printk(KERN_ERR PFX "No PCI resource region found.\n");
2025 rc = -ENOMEM;
2026 goto err_out_disable_pdev;
2027 }
d7699f87 2028
4330c2f2
GFT
2029 rc = pci_request_regions(pdev, DRV_NAME);
2030 if(rc) {
2031 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2032 goto err_out_disable_pdev;
2033 }
d7699f87
GFT
2034
2035 pci_set_master(pdev);
2036
2037 /*
2038 * alloc and init net device
2039 */
3bf61c55 2040 netdev = alloc_etherdev(sizeof(*jme));
d7699f87 2041 if(!netdev) {
4330c2f2
GFT
2042 rc = -ENOMEM;
2043 goto err_out_release_regions;
d7699f87
GFT
2044 }
2045 netdev->open = jme_open;
2046 netdev->stop = jme_close;
2047 netdev->hard_start_xmit = jme_start_xmit;
d7699f87
GFT
2048 netdev->set_mac_address = jme_set_macaddr;
2049 netdev->set_multicast_list = jme_set_multi;
2050 netdev->change_mtu = jme_change_mtu;
2051 netdev->ethtool_ops = &jme_ethtool_ops;
8c198884
GFT
2052 netdev->tx_timeout = jme_tx_timeout;
2053 netdev->watchdog_timeo = TX_TIMEOUT;
3bf61c55 2054 NETDEV_GET_STATS(netdev, &jme_get_stats);
8c198884 2055 netdev->features = NETIF_F_HW_CSUM;
3bf61c55 2056 if(using_dac)
8c198884 2057 netdev->features |= NETIF_F_HIGHDMA;
d7699f87
GFT
2058
2059 SET_NETDEV_DEV(netdev, &pdev->dev);
2060 pci_set_drvdata(pdev, netdev);
2061
2062 /*
2063 * init adapter info
2064 */
2065 jme = netdev_priv(netdev);
2066 jme->pdev = pdev;
2067 jme->dev = netdev;
79ce639c 2068 jme->oldmtu = netdev->mtu = 1500;
fcf45b4c 2069 jme->phylink = 0;
d7699f87
GFT
2070 jme->regs = ioremap(pci_resource_start(pdev, 0),
2071 pci_resource_len(pdev, 0));
4330c2f2 2072 if (!(jme->regs)) {
d7699f87
GFT
2073 rc = -ENOMEM;
2074 goto err_out_free_netdev;
2075 }
4330c2f2
GFT
2076 jme->shadow_regs = pci_alloc_consistent(pdev,
2077 sizeof(__u32) * SHADOW_REG_NR,
2078 &(jme->shadow_dma));
2079 if (!(jme->shadow_regs)) {
2080 rc = -ENOMEM;
2081 goto err_out_unmap;
2082 }
2083
d7699f87 2084 spin_lock_init(&jme->phy_lock);
fcf45b4c 2085 spin_lock_init(&jme->macaddr_lock);
8c198884 2086 spin_lock_init(&jme->rxmcs_lock);
fcf45b4c
GFT
2087
2088 atomic_set(&jme->intr_sem, 1);
2089 atomic_set(&jme->link_changing, 1);
2090 atomic_set(&jme->rx_cleaning, 1);
2091 atomic_set(&jme->tx_cleaning, 1);
2092
79ce639c
GFT
2093 tasklet_init(&jme->pcc_task,
2094 &jme_pcc_tasklet,
2095 (unsigned long) jme);
4330c2f2
GFT
2096 tasklet_init(&jme->linkch_task,
2097 &jme_link_change_tasklet,
2098 (unsigned long) jme);
2099 tasklet_init(&jme->txclean_task,
2100 &jme_tx_clean_tasklet,
2101 (unsigned long) jme);
2102 tasklet_init(&jme->rxclean_task,
2103 &jme_rx_clean_tasklet,
2104 (unsigned long) jme);
fcf45b4c
GFT
2105 tasklet_init(&jme->rxempty_task,
2106 &jme_rx_empty_tasklet,
2107 (unsigned long) jme);
d7699f87
GFT
2108 jme->mii_if.dev = netdev;
2109 jme->mii_if.phy_id = 1;
2110 jme->mii_if.supports_gmii = 1;
2111 jme->mii_if.mdio_read = jme_mdio_read;
2112 jme->mii_if.mdio_write = jme_mdio_write;
2113
8c198884
GFT
2114 jme->dpi.cur = PCC_P1;
2115
2116 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
79ce639c 2117 jme->reg_rxcs = RXCS_DEFAULT;
8c198884
GFT
2118 jme->reg_rxmcs = RXMCS_DEFAULT;
2119 jme->reg_txpfc = 0;
fcf45b4c
GFT
2120 /*
2121 * Get Max Read Req Size from PCI Config Space
2122 */
2123 pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2124 switch(jme->mrrs) {
2125 case MRRS_128B:
2126 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2127 break;
2128 case MRRS_256B:
2129 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2130 break;
2131 default:
2132 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2133 break;
2134 };
2135
2136
d7699f87
GFT
2137 /*
2138 * Reset MAC processor and reload EEPROM for MAC Address
2139 */
2140 jme_clear_pm(jme);
3bf61c55 2141 jme_reset_phy_processor(jme);
d7699f87 2142 jme_reset_mac_processor(jme);
4330c2f2
GFT
2143 rc = jme_reload_eeprom(jme);
2144 if(rc) {
3bf61c55
GFT
2145 printk(KERN_ERR PFX
2146 "Rload eeprom for reading MAC Address error.\n");
4330c2f2
GFT
2147 goto err_out_free_shadow;
2148 }
d7699f87
GFT
2149 jme_load_macaddr(netdev);
2150
2151
2152 /*
2153 * Tell stack that we are not ready to work until open()
2154 */
2155 netif_carrier_off(netdev);
2156 netif_stop_queue(netdev);
2157
2158 /*
2159 * Register netdev
2160 */
4330c2f2
GFT
2161 rc = register_netdev(netdev);
2162 if(rc) {
2163 printk(KERN_ERR PFX "Cannot register net device.\n");
2164 goto err_out_free_shadow;
2165 }
d7699f87 2166
4330c2f2 2167 jprintk(netdev->name,
8c198884 2168 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
4330c2f2
GFT
2169 netdev->dev_addr[0],
2170 netdev->dev_addr[1],
2171 netdev->dev_addr[2],
2172 netdev->dev_addr[3],
2173 netdev->dev_addr[4],
8c198884 2174 netdev->dev_addr[5]);
d7699f87
GFT
2175
2176 return 0;
2177
4330c2f2
GFT
2178err_out_free_shadow:
2179 pci_free_consistent(pdev,
2180 sizeof(__u32) * SHADOW_REG_NR,
2181 jme->shadow_regs,
2182 jme->shadow_dma);
d7699f87
GFT
2183err_out_unmap:
2184 iounmap(jme->regs);
2185err_out_free_netdev:
2186 pci_set_drvdata(pdev, NULL);
2187 free_netdev(netdev);
4330c2f2
GFT
2188err_out_release_regions:
2189 pci_release_regions(pdev);
d7699f87
GFT
2190err_out_disable_pdev:
2191 pci_disable_device(pdev);
d7699f87 2192err_out:
4330c2f2 2193 return rc;
d7699f87
GFT
2194}
2195
3bf61c55
GFT
2196static void __devexit
2197jme_remove_one(struct pci_dev *pdev)
2198{
d7699f87
GFT
2199 struct net_device *netdev = pci_get_drvdata(pdev);
2200 struct jme_adapter *jme = netdev_priv(netdev);
2201
2202 unregister_netdev(netdev);
4330c2f2
GFT
2203 pci_free_consistent(pdev,
2204 sizeof(__u32) * SHADOW_REG_NR,
2205 jme->shadow_regs,
2206 jme->shadow_dma);
d7699f87
GFT
2207 iounmap(jme->regs);
2208 pci_set_drvdata(pdev, NULL);
2209 free_netdev(netdev);
2210 pci_release_regions(pdev);
2211 pci_disable_device(pdev);
2212
2213}
2214
2215static struct pci_device_id jme_pci_tbl[] = {
2216 { PCI_VDEVICE(JMICRON, 0x250) },
2217 { }
2218};
2219
2220static struct pci_driver jme_driver = {
2221 .name = DRV_NAME,
2222 .id_table = jme_pci_tbl,
2223 .probe = jme_init_one,
2224 .remove = __devexit_p(jme_remove_one),
2225#if 0
2226#ifdef CONFIG_PM
2227 .suspend = jme_suspend,
2228 .resume = jme_resume,
2229#endif /* CONFIG_PM */
2230#endif
2231};
2232
3bf61c55
GFT
2233static int __init
2234jme_init_module(void)
d7699f87 2235{
4330c2f2
GFT
2236 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2237 "driver version %s\n", DRV_VERSION);
d7699f87
GFT
2238 return pci_register_driver(&jme_driver);
2239}
2240
3bf61c55
GFT
2241static void __exit
2242jme_cleanup_module(void)
d7699f87
GFT
2243{
2244 pci_unregister_driver(&jme_driver);
2245}
2246
2247module_init(jme_init_module);
2248module_exit(jme_cleanup_module);
2249
3bf61c55 2250MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
d7699f87
GFT
2251MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2252MODULE_LICENSE("GPL");
2253MODULE_VERSION(DRV_VERSION);
2254MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
2255