]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ll_temac_main.c
net: convert multicast list to list_head
[net-next-2.6.git] / drivers / net / ll_temac_main.c
CommitLineData
92744989
GL
1/*
2 * Driver for Xilinx TEMAC Ethernet device
3 *
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 *
8 * This is a driver for the Xilinx ll_temac ipcore which is often used
9 * in the Virtex and Spartan series of chips.
10 *
11 * Notes:
12 * - The ll_temac hardware uses indirect access for many of the TEMAC
13 * registers, include the MDIO bus. However, indirect access to MDIO
14 * registers take considerably more clock cycles than to TEMAC registers.
15 * MDIO accesses are long, so threads doing them should probably sleep
16 * rather than busywait. However, since only one indirect access can be
17 * in progress at any given time, that means that *all* indirect accesses
18 * could end up sleeping (to wait for an MDIO access to complete).
19 * Fortunately none of the indirect accesses are on the 'hot' path for tx
20 * or rx, so this should be okay.
21 *
22 * TODO:
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
25 * instructions.
26 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming.
29 * - Testing. Lots and lots of testing.
30 *
31 */
32
33#include <linux/delay.h>
34#include <linux/etherdevice.h>
35#include <linux/init.h>
36#include <linux/mii.h>
37#include <linux/module.h>
38#include <linux/mutex.h>
39#include <linux/netdevice.h>
40#include <linux/of.h>
41#include <linux/of_device.h>
42#include <linux/of_mdio.h>
43#include <linux/of_platform.h>
44#include <linux/skbuff.h>
45#include <linux/spinlock.h>
46#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47#include <linux/udp.h> /* needed for sizeof(udphdr) */
48#include <linux/phy.h>
49#include <linux/in.h>
50#include <linux/io.h>
51#include <linux/ip.h>
52
53#include "ll_temac.h"
54
55#define TX_BD_NUM 64
56#define RX_BD_NUM 128
57
58/* ---------------------------------------------------------------------
59 * Low level register access functions
60 */
61
62u32 temac_ior(struct temac_local *lp, int offset)
63{
64 return in_be32((u32 *)(lp->regs + offset));
65}
66
67void temac_iow(struct temac_local *lp, int offset, u32 value)
68{
69 out_be32((u32 *) (lp->regs + offset), value);
70}
71
72int temac_indirect_busywait(struct temac_local *lp)
73{
74 long end = jiffies + 2;
75
76 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
77 if (end - jiffies <= 0) {
78 WARN_ON(1);
79 return -ETIMEDOUT;
80 }
81 msleep(1);
82 }
83 return 0;
84}
85
86/**
87 * temac_indirect_in32
88 *
89 * lp->indirect_mutex must be held when calling this function
90 */
91u32 temac_indirect_in32(struct temac_local *lp, int reg)
92{
93 u32 val;
94
95 if (temac_indirect_busywait(lp))
96 return -ETIMEDOUT;
97 temac_iow(lp, XTE_CTL0_OFFSET, reg);
98 if (temac_indirect_busywait(lp))
99 return -ETIMEDOUT;
100 val = temac_ior(lp, XTE_LSW0_OFFSET);
101
102 return val;
103}
104
105/**
106 * temac_indirect_out32
107 *
108 * lp->indirect_mutex must be held when calling this function
109 */
110void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
111{
112 if (temac_indirect_busywait(lp))
113 return;
114 temac_iow(lp, XTE_LSW0_OFFSET, value);
115 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
116}
117
118static u32 temac_dma_in32(struct temac_local *lp, int reg)
119{
120 return dcr_read(lp->sdma_dcrs, reg);
121}
122
123static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
124{
125 dcr_write(lp->sdma_dcrs, reg, value);
126}
127
128/**
129 * temac_dma_bd_init - Setup buffer descriptor rings
130 */
131static int temac_dma_bd_init(struct net_device *ndev)
132{
133 struct temac_local *lp = netdev_priv(ndev);
134 struct sk_buff *skb;
135 int i;
136
5d66fe92 137 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
92744989
GL
138 /* allocate the tx and rx ring buffer descriptors. */
139 /* returns a virtual addres and a physical address. */
140 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
141 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
142 &lp->tx_bd_p, GFP_KERNEL);
143 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
144 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
145 &lp->rx_bd_p, GFP_KERNEL);
146
147 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
148 for (i = 0; i < TX_BD_NUM; i++) {
149 lp->tx_bd_v[i].next = lp->tx_bd_p +
150 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
151 }
152
153 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
154 for (i = 0; i < RX_BD_NUM; i++) {
155 lp->rx_bd_v[i].next = lp->rx_bd_p +
156 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
157
158 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
159 + XTE_ALIGN, GFP_ATOMIC);
160 if (skb == 0) {
161 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
162 return -1;
163 }
164 lp->rx_skb[i] = skb;
165 skb_reserve(skb, BUFFER_ALIGN(skb->data));
166 /* returns physical address of skb->data */
167 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
168 skb->data,
169 XTE_MAX_JUMBO_FRAME_SIZE,
170 DMA_FROM_DEVICE);
171 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
172 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
173 }
174
175 temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
176 CHNL_CTRL_IRQ_EN |
177 CHNL_CTRL_IRQ_DLY_EN |
178 CHNL_CTRL_IRQ_COAL_EN);
179 /* 0x10220483 */
180 /* 0x00100483 */
181 temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
182 CHNL_CTRL_IRQ_EN |
183 CHNL_CTRL_IRQ_DLY_EN |
184 CHNL_CTRL_IRQ_COAL_EN |
185 CHNL_CTRL_IRQ_IOE);
186 /* 0xff010283 */
187
188 temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
189 temac_dma_out32(lp, RX_TAILDESC_PTR,
190 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
191 temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
192
193 return 0;
194}
195
196/* ---------------------------------------------------------------------
197 * net_device_ops
198 */
199
200static int temac_set_mac_address(struct net_device *ndev, void *address)
201{
202 struct temac_local *lp = netdev_priv(ndev);
203
204 if (address)
205 memcpy(ndev->dev_addr, address, ETH_ALEN);
206
207 if (!is_valid_ether_addr(ndev->dev_addr))
208 random_ether_addr(ndev->dev_addr);
209
210 /* set up unicast MAC address filter set its mac address */
211 mutex_lock(&lp->indirect_mutex);
212 temac_indirect_out32(lp, XTE_UAW0_OFFSET,
213 (ndev->dev_addr[0]) |
214 (ndev->dev_addr[1] << 8) |
215 (ndev->dev_addr[2] << 16) |
216 (ndev->dev_addr[3] << 24));
217 /* There are reserved bits in EUAW1
218 * so don't affect them Set MAC bits [47:32] in EUAW1 */
219 temac_indirect_out32(lp, XTE_UAW1_OFFSET,
220 (ndev->dev_addr[4] & 0x000000ff) |
221 (ndev->dev_addr[5] << 8));
222 mutex_unlock(&lp->indirect_mutex);
223
224 return 0;
225}
226
8ea7a37c
SM
227static int netdev_set_mac_address(struct net_device *ndev, void *p)
228{
229 struct sockaddr *addr = p;
230
231 return temac_set_mac_address(ndev, addr->sa_data);
232}
233
92744989
GL
234static void temac_set_multicast_list(struct net_device *ndev)
235{
236 struct temac_local *lp = netdev_priv(ndev);
237 u32 multi_addr_msw, multi_addr_lsw, val;
238 int i;
239
240 mutex_lock(&lp->indirect_mutex);
8e95a202 241 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
4cd24eaf 242 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
92744989
GL
243 /*
244 * We must make the kernel realise we had to move
245 * into promisc mode or we start all out war on
246 * the cable. If it was a promisc request the
247 * flag is already set. If not we assert it.
248 */
249 ndev->flags |= IFF_PROMISC;
250 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
251 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
4cd24eaf 252 } else if (!netdev_mc_empty(ndev)) {
22bedad3 253 struct netdev_hw_addr *ha;
92744989 254
f9dcbcc9 255 i = 0;
22bedad3 256 netdev_for_each_mc_addr(ha, ndev) {
92744989
GL
257 if (i >= MULTICAST_CAM_TABLE_NUM)
258 break;
22bedad3
JP
259 multi_addr_msw = ((ha->addr[3] << 24) |
260 (ha->addr[2] << 16) |
261 (ha->addr[1] << 8) |
262 (ha->addr[0]));
92744989
GL
263 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
264 multi_addr_msw);
22bedad3
JP
265 multi_addr_lsw = ((ha->addr[5] << 8) |
266 (ha->addr[4]) | (i << 16));
92744989
GL
267 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
268 multi_addr_lsw);
f9dcbcc9 269 i++;
92744989
GL
270 }
271 } else {
272 val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
273 temac_indirect_out32(lp, XTE_AFM_OFFSET,
274 val & ~XTE_AFM_EPPRM_MASK);
275 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
276 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
277 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
278 }
279 mutex_unlock(&lp->indirect_mutex);
280}
281
282struct temac_option {
283 int flg;
284 u32 opt;
285 u32 reg;
286 u32 m_or;
287 u32 m_and;
288} temac_options[] = {
289 /* Turn on jumbo packet support for both Rx and Tx */
290 {
291 .opt = XTE_OPTION_JUMBO,
292 .reg = XTE_TXC_OFFSET,
293 .m_or = XTE_TXC_TXJMBO_MASK,
294 },
295 {
296 .opt = XTE_OPTION_JUMBO,
297 .reg = XTE_RXC1_OFFSET,
298 .m_or =XTE_RXC1_RXJMBO_MASK,
299 },
300 /* Turn on VLAN packet support for both Rx and Tx */
301 {
302 .opt = XTE_OPTION_VLAN,
303 .reg = XTE_TXC_OFFSET,
304 .m_or =XTE_TXC_TXVLAN_MASK,
305 },
306 {
307 .opt = XTE_OPTION_VLAN,
308 .reg = XTE_RXC1_OFFSET,
309 .m_or =XTE_RXC1_RXVLAN_MASK,
310 },
311 /* Turn on FCS stripping on receive packets */
312 {
313 .opt = XTE_OPTION_FCS_STRIP,
314 .reg = XTE_RXC1_OFFSET,
315 .m_or =XTE_RXC1_RXFCS_MASK,
316 },
317 /* Turn on FCS insertion on transmit packets */
318 {
319 .opt = XTE_OPTION_FCS_INSERT,
320 .reg = XTE_TXC_OFFSET,
321 .m_or =XTE_TXC_TXFCS_MASK,
322 },
323 /* Turn on length/type field checking on receive packets */
324 {
325 .opt = XTE_OPTION_LENTYPE_ERR,
326 .reg = XTE_RXC1_OFFSET,
327 .m_or =XTE_RXC1_RXLT_MASK,
328 },
329 /* Turn on flow control */
330 {
331 .opt = XTE_OPTION_FLOW_CONTROL,
332 .reg = XTE_FCC_OFFSET,
333 .m_or =XTE_FCC_RXFLO_MASK,
334 },
335 /* Turn on flow control */
336 {
337 .opt = XTE_OPTION_FLOW_CONTROL,
338 .reg = XTE_FCC_OFFSET,
339 .m_or =XTE_FCC_TXFLO_MASK,
340 },
341 /* Turn on promiscuous frame filtering (all frames are received ) */
342 {
343 .opt = XTE_OPTION_PROMISC,
344 .reg = XTE_AFM_OFFSET,
345 .m_or =XTE_AFM_EPPRM_MASK,
346 },
347 /* Enable transmitter if not already enabled */
348 {
349 .opt = XTE_OPTION_TXEN,
350 .reg = XTE_TXC_OFFSET,
351 .m_or =XTE_TXC_TXEN_MASK,
352 },
353 /* Enable receiver? */
354 {
355 .opt = XTE_OPTION_RXEN,
356 .reg = XTE_RXC1_OFFSET,
357 .m_or =XTE_RXC1_RXEN_MASK,
358 },
359 {}
360};
361
362/**
363 * temac_setoptions
364 */
365static u32 temac_setoptions(struct net_device *ndev, u32 options)
366{
367 struct temac_local *lp = netdev_priv(ndev);
368 struct temac_option *tp = &temac_options[0];
369 int reg;
370
371 mutex_lock(&lp->indirect_mutex);
372 while (tp->opt) {
373 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
374 if (options & tp->opt)
375 reg |= tp->m_or;
376 temac_indirect_out32(lp, tp->reg, reg);
377 tp++;
378 }
379 lp->options |= options;
380 mutex_unlock(&lp->indirect_mutex);
381
382 return (0);
383}
384
385/* Initilize temac */
386static void temac_device_reset(struct net_device *ndev)
387{
388 struct temac_local *lp = netdev_priv(ndev);
389 u32 timeout;
390 u32 val;
391
392 /* Perform a software reset */
393
394 /* 0x300 host enable bit ? */
395 /* reset PHY through control register ?:1 */
396
397 dev_dbg(&ndev->dev, "%s()\n", __func__);
398
399 mutex_lock(&lp->indirect_mutex);
400 /* Reset the receiver and wait for it to finish reset */
401 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
402 timeout = 1000;
403 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
404 udelay(1);
405 if (--timeout == 0) {
406 dev_err(&ndev->dev,
407 "temac_device_reset RX reset timeout!!\n");
408 break;
409 }
410 }
411
412 /* Reset the transmitter and wait for it to finish reset */
413 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
414 timeout = 1000;
415 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
416 udelay(1);
417 if (--timeout == 0) {
418 dev_err(&ndev->dev,
419 "temac_device_reset TX reset timeout!!\n");
420 break;
421 }
422 }
423
424 /* Disable the receiver */
425 val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
426 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
427
428 /* Reset Local Link (DMA) */
429 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
430 timeout = 1000;
431 while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
432 udelay(1);
433 if (--timeout == 0) {
434 dev_err(&ndev->dev,
435 "temac_device_reset DMA reset timeout!!\n");
436 break;
437 }
438 }
439 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
440
441 temac_dma_bd_init(ndev);
442
443 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
444 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
445 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
446 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
447
448 mutex_unlock(&lp->indirect_mutex);
449
450 /* Sync default options with HW
451 * but leave receiver and transmitter disabled. */
452 temac_setoptions(ndev,
453 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
454
455 temac_set_mac_address(ndev, NULL);
456
457 /* Set address filter table */
458 temac_set_multicast_list(ndev);
459 if (temac_setoptions(ndev, lp->options))
460 dev_err(&ndev->dev, "Error setting TEMAC options\n");
461
462 /* Init Driver variable */
463 ndev->trans_start = 0;
464}
465
466void temac_adjust_link(struct net_device *ndev)
467{
468 struct temac_local *lp = netdev_priv(ndev);
469 struct phy_device *phy = lp->phy_dev;
470 u32 mii_speed;
471 int link_state;
472
473 /* hash together the state values to decide if something has changed */
474 link_state = phy->speed | (phy->duplex << 1) | phy->link;
475
476 mutex_lock(&lp->indirect_mutex);
477 if (lp->last_link != link_state) {
478 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
479 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
480
481 switch (phy->speed) {
482 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
483 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
484 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
485 }
486
487 /* Write new speed setting out to TEMAC */
488 temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
489 lp->last_link = link_state;
490 phy_print_status(phy);
491 }
492 mutex_unlock(&lp->indirect_mutex);
493}
494
495static void temac_start_xmit_done(struct net_device *ndev)
496{
497 struct temac_local *lp = netdev_priv(ndev);
498 struct cdmac_bd *cur_p;
499 unsigned int stat = 0;
500
501 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
502 stat = cur_p->app0;
503
504 while (stat & STS_CTRL_APP0_CMPLT) {
505 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
506 DMA_TO_DEVICE);
507 if (cur_p->app4)
508 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
509 cur_p->app0 = 0;
510
511 ndev->stats.tx_packets++;
512 ndev->stats.tx_bytes += cur_p->len;
513
514 lp->tx_bd_ci++;
515 if (lp->tx_bd_ci >= TX_BD_NUM)
516 lp->tx_bd_ci = 0;
517
518 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
519 stat = cur_p->app0;
520 }
521
522 netif_wake_queue(ndev);
523}
524
525static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
526{
527 struct temac_local *lp = netdev_priv(ndev);
528 struct cdmac_bd *cur_p;
529 dma_addr_t start_p, tail_p;
530 int ii;
531 unsigned long num_frag;
532 skb_frag_t *frag;
533
534 num_frag = skb_shinfo(skb)->nr_frags;
535 frag = &skb_shinfo(skb)->frags[0];
536 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
537 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
538
539 if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
540 if (!netif_queue_stopped(ndev)) {
541 netif_stop_queue(ndev);
542 return NETDEV_TX_BUSY;
543 }
544 return NETDEV_TX_BUSY;
545 }
546
547 cur_p->app0 = 0;
548 if (skb->ip_summed == CHECKSUM_PARTIAL) {
549 const struct iphdr *ip = ip_hdr(skb);
550 int length = 0, start = 0, insert = 0;
551
552 switch (ip->protocol) {
553 case IPPROTO_TCP:
554 start = sizeof(struct iphdr) + ETH_HLEN;
555 insert = sizeof(struct iphdr) + ETH_HLEN + 16;
556 length = ip->tot_len - sizeof(struct iphdr);
557 break;
558 case IPPROTO_UDP:
559 start = sizeof(struct iphdr) + ETH_HLEN;
560 insert = sizeof(struct iphdr) + ETH_HLEN + 6;
561 length = ip->tot_len - sizeof(struct iphdr);
562 break;
563 default:
564 break;
565 }
566 cur_p->app1 = ((start << 16) | insert);
567 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
568 length, ip->protocol, 0);
569 skb->data[insert] = 0;
570 skb->data[insert + 1] = 0;
571 }
572 cur_p->app0 |= STS_CTRL_APP0_SOP;
573 cur_p->len = skb_headlen(skb);
574 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
575 DMA_TO_DEVICE);
576 cur_p->app4 = (unsigned long)skb;
577
578 for (ii = 0; ii < num_frag; ii++) {
579 lp->tx_bd_tail++;
580 if (lp->tx_bd_tail >= TX_BD_NUM)
581 lp->tx_bd_tail = 0;
582
583 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
584 cur_p->phys = dma_map_single(ndev->dev.parent,
585 (void *)page_address(frag->page) +
586 frag->page_offset,
587 frag->size, DMA_TO_DEVICE);
588 cur_p->len = frag->size;
589 cur_p->app0 = 0;
590 frag++;
591 }
592 cur_p->app0 |= STS_CTRL_APP0_EOP;
593
594 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
595 lp->tx_bd_tail++;
596 if (lp->tx_bd_tail >= TX_BD_NUM)
597 lp->tx_bd_tail = 0;
598
599 /* Kick off the transfer */
600 temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
601
6ed10654 602 return NETDEV_TX_OK;
92744989
GL
603}
604
605
606static void ll_temac_recv(struct net_device *ndev)
607{
608 struct temac_local *lp = netdev_priv(ndev);
609 struct sk_buff *skb, *new_skb;
610 unsigned int bdstat;
611 struct cdmac_bd *cur_p;
612 dma_addr_t tail_p;
613 int length;
614 unsigned long skb_vaddr;
615 unsigned long flags;
616
617 spin_lock_irqsave(&lp->rx_lock, flags);
618
619 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
620 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
621
622 bdstat = cur_p->app0;
623 while ((bdstat & STS_CTRL_APP0_CMPLT)) {
624
625 skb = lp->rx_skb[lp->rx_bd_ci];
c3b7c12c 626 length = cur_p->app4 & 0x3FFF;
92744989
GL
627
628 skb_vaddr = virt_to_bus(skb->data);
629 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
630 DMA_FROM_DEVICE);
631
632 skb_put(skb, length);
633 skb->dev = ndev;
634 skb->protocol = eth_type_trans(skb, ndev);
635 skb->ip_summed = CHECKSUM_NONE;
636
637 netif_rx(skb);
638
639 ndev->stats.rx_packets++;
640 ndev->stats.rx_bytes += length;
641
642 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
643 GFP_ATOMIC);
644 if (new_skb == 0) {
645 dev_err(&ndev->dev, "no memory for new sk_buff\n");
646 spin_unlock_irqrestore(&lp->rx_lock, flags);
647 return;
648 }
649
650 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
651
652 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
653 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
654 XTE_MAX_JUMBO_FRAME_SIZE,
655 DMA_FROM_DEVICE);
656 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
657 lp->rx_skb[lp->rx_bd_ci] = new_skb;
658
659 lp->rx_bd_ci++;
660 if (lp->rx_bd_ci >= RX_BD_NUM)
661 lp->rx_bd_ci = 0;
662
663 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
664 bdstat = cur_p->app0;
665 }
666 temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
667
668 spin_unlock_irqrestore(&lp->rx_lock, flags);
669}
670
671static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
672{
673 struct net_device *ndev = _ndev;
674 struct temac_local *lp = netdev_priv(ndev);
675 unsigned int status;
676
677 status = temac_dma_in32(lp, TX_IRQ_REG);
678 temac_dma_out32(lp, TX_IRQ_REG, status);
679
680 if (status & (IRQ_COAL | IRQ_DLY))
681 temac_start_xmit_done(lp->ndev);
682 if (status & 0x080)
683 dev_err(&ndev->dev, "DMA error 0x%x\n", status);
684
685 return IRQ_HANDLED;
686}
687
688static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
689{
690 struct net_device *ndev = _ndev;
691 struct temac_local *lp = netdev_priv(ndev);
692 unsigned int status;
693
694 /* Read and clear the status registers */
695 status = temac_dma_in32(lp, RX_IRQ_REG);
696 temac_dma_out32(lp, RX_IRQ_REG, status);
697
698 if (status & (IRQ_COAL | IRQ_DLY))
699 ll_temac_recv(lp->ndev);
700
701 return IRQ_HANDLED;
702}
703
704static int temac_open(struct net_device *ndev)
705{
706 struct temac_local *lp = netdev_priv(ndev);
707 int rc;
708
709 dev_dbg(&ndev->dev, "temac_open()\n");
710
711 if (lp->phy_node) {
712 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
713 temac_adjust_link, 0, 0);
714 if (!lp->phy_dev) {
715 dev_err(lp->dev, "of_phy_connect() failed\n");
716 return -ENODEV;
717 }
718
719 phy_start(lp->phy_dev);
720 }
721
722 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
723 if (rc)
724 goto err_tx_irq;
725 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
726 if (rc)
727 goto err_rx_irq;
728
729 temac_device_reset(ndev);
730 return 0;
731
732 err_rx_irq:
733 free_irq(lp->tx_irq, ndev);
734 err_tx_irq:
735 if (lp->phy_dev)
736 phy_disconnect(lp->phy_dev);
737 lp->phy_dev = NULL;
738 dev_err(lp->dev, "request_irq() failed\n");
739 return rc;
740}
741
742static int temac_stop(struct net_device *ndev)
743{
744 struct temac_local *lp = netdev_priv(ndev);
745
746 dev_dbg(&ndev->dev, "temac_close()\n");
747
748 free_irq(lp->tx_irq, ndev);
749 free_irq(lp->rx_irq, ndev);
750
751 if (lp->phy_dev)
752 phy_disconnect(lp->phy_dev);
753 lp->phy_dev = NULL;
754
755 return 0;
756}
757
758#ifdef CONFIG_NET_POLL_CONTROLLER
759static void
760temac_poll_controller(struct net_device *ndev)
761{
762 struct temac_local *lp = netdev_priv(ndev);
763
764 disable_irq(lp->tx_irq);
765 disable_irq(lp->rx_irq);
766
767 ll_temac_rx_irq(lp->tx_irq, lp);
768 ll_temac_tx_irq(lp->rx_irq, lp);
769
770 enable_irq(lp->tx_irq);
771 enable_irq(lp->rx_irq);
772}
773#endif
774
775static const struct net_device_ops temac_netdev_ops = {
776 .ndo_open = temac_open,
777 .ndo_stop = temac_stop,
778 .ndo_start_xmit = temac_start_xmit,
8ea7a37c 779 .ndo_set_mac_address = netdev_set_mac_address,
92744989
GL
780 //.ndo_set_multicast_list = temac_set_multicast_list,
781#ifdef CONFIG_NET_POLL_CONTROLLER
782 .ndo_poll_controller = temac_poll_controller,
783#endif
784};
785
786/* ---------------------------------------------------------------------
787 * SYSFS device attributes
788 */
789static ssize_t temac_show_llink_regs(struct device *dev,
790 struct device_attribute *attr, char *buf)
791{
792 struct net_device *ndev = dev_get_drvdata(dev);
793 struct temac_local *lp = netdev_priv(ndev);
794 int i, len = 0;
795
796 for (i = 0; i < 0x11; i++)
797 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
798 (i % 8) == 7 ? "\n" : " ");
799 len += sprintf(buf + len, "\n");
800
801 return len;
802}
803
804static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
805
806static struct attribute *temac_device_attrs[] = {
807 &dev_attr_llink_regs.attr,
808 NULL,
809};
810
811static const struct attribute_group temac_attr_group = {
812 .attrs = temac_device_attrs,
813};
814
815static int __init
816temac_of_probe(struct of_device *op, const struct of_device_id *match)
817{
818 struct device_node *np;
819 struct temac_local *lp;
820 struct net_device *ndev;
821 const void *addr;
822 int size, rc = 0;
823 unsigned int dcrs;
824
825 /* Init network device structure */
826 ndev = alloc_etherdev(sizeof(*lp));
827 if (!ndev) {
828 dev_err(&op->dev, "could not allocate device.\n");
829 return -ENOMEM;
830 }
831 ether_setup(ndev);
832 dev_set_drvdata(&op->dev, ndev);
833 SET_NETDEV_DEV(ndev, &op->dev);
834 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
835 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
836 ndev->netdev_ops = &temac_netdev_ops;
837#if 0
838 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
839 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
840 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
841 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
842 ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
843 ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
844 ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
845 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
846 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
847 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
848 ndev->features |= NETIF_F_LRO; /* large receive offload */
849#endif
850
851 /* setup temac private info structure */
852 lp = netdev_priv(ndev);
853 lp->ndev = ndev;
854 lp->dev = &op->dev;
855 lp->options = XTE_OPTION_DEFAULTS;
856 spin_lock_init(&lp->rx_lock);
857 mutex_init(&lp->indirect_mutex);
858
859 /* map device registers */
860 lp->regs = of_iomap(op->node, 0);
861 if (!lp->regs) {
862 dev_err(&op->dev, "could not map temac regs.\n");
863 goto nodev;
864 }
865
866 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
867 np = of_parse_phandle(op->node, "llink-connected", 0);
868 if (!np) {
869 dev_err(&op->dev, "could not find DMA node\n");
870 goto nodev;
871 }
872
873 dcrs = dcr_resource_start(np, 0);
874 if (dcrs == 0) {
875 dev_err(&op->dev, "could not get DMA register address\n");
a419aef8 876 goto nodev;
92744989
GL
877 }
878 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
879 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
880
881 lp->rx_irq = irq_of_parse_and_map(np, 0);
882 lp->tx_irq = irq_of_parse_and_map(np, 1);
883 if (!lp->rx_irq || !lp->tx_irq) {
884 dev_err(&op->dev, "could not determine irqs\n");
885 rc = -ENOMEM;
886 goto nodev;
887 }
888
889 of_node_put(np); /* Finished with the DMA node; drop the reference */
890
891 /* Retrieve the MAC address */
892 addr = of_get_property(op->node, "local-mac-address", &size);
893 if ((!addr) || (size != 6)) {
894 dev_err(&op->dev, "could not find MAC address\n");
895 rc = -ENODEV;
896 goto nodev;
897 }
898 temac_set_mac_address(ndev, (void *)addr);
899
900 rc = temac_mdio_setup(lp, op->node);
901 if (rc)
902 dev_warn(&op->dev, "error registering MDIO bus\n");
903
904 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
905 if (lp->phy_node)
906 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
907
908 /* Add the device attributes */
909 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
910 if (rc) {
911 dev_err(lp->dev, "Error creating sysfs files\n");
912 goto nodev;
913 }
914
915 rc = register_netdev(lp->ndev);
916 if (rc) {
917 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
918 goto err_register_ndev;
919 }
920
921 return 0;
922
923 err_register_ndev:
924 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
925 nodev:
926 free_netdev(ndev);
927 ndev = NULL;
928 return rc;
929}
930
931static int __devexit temac_of_remove(struct of_device *op)
932{
933 struct net_device *ndev = dev_get_drvdata(&op->dev);
934 struct temac_local *lp = netdev_priv(ndev);
935
936 temac_mdio_teardown(lp);
937 unregister_netdev(ndev);
938 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
939 if (lp->phy_node)
940 of_node_put(lp->phy_node);
941 lp->phy_node = NULL;
942 dev_set_drvdata(&op->dev, NULL);
943 free_netdev(ndev);
944 return 0;
945}
946
947static struct of_device_id temac_of_match[] __devinitdata = {
948 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
c3b7c12c
SM
949 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
950 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
951 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
92744989
GL
952 {},
953};
954MODULE_DEVICE_TABLE(of, temac_of_match);
955
956static struct of_platform_driver temac_of_driver = {
957 .match_table = temac_of_match,
958 .probe = temac_of_probe,
959 .remove = __devexit_p(temac_of_remove),
960 .driver = {
961 .owner = THIS_MODULE,
962 .name = "xilinx_temac",
963 },
964};
965
966static int __init temac_init(void)
967{
968 return of_register_platform_driver(&temac_of_driver);
969}
970module_init(temac_init);
971
972static void __exit temac_exit(void)
973{
974 of_unregister_platform_driver(&temac_of_driver);
975}
976module_exit(temac_exit);
977
978MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
979MODULE_AUTHOR("Yoshio Kashiwagi");
980MODULE_LICENSE("GPL");