]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/stmmac/gmac.c
stmmac: reorganise class operations.
[net-next-2.6.git] / drivers / net / stmmac / gmac.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void gmac_dump_regs(unsigned long ioaddr)
46{
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{
63 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */
65 value |= DMA_BUS_MODE_SFT_RESET;
66 writel(value, ioaddr + DMA_BUS_MODE);
67 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
68
69 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
72
73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif
76 writel(value, ioaddr + DMA_BUS_MODE);
77
78 /* Mask interrupts by writing to CSR7 */
79 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
80
81 /* The base address of the RX/TX descriptor lists must be written into
82 * DMA CSR3 and CSR4, respectively. */
83 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
85
86 return 0;
87}
88
89/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr)
91{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
94
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96}
97
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode)
100{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL);
102
103 if (txmode == SF_DMA_MODE) {
104 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105 /* Transmit COE type 2 cannot be done in cut-through mode. */
106 csr6 |= DMA_CONTROL_TSF;
107 /* Operating on second frame increase the performance
108 * especially when transmit store-and-forward is used.*/
109 csr6 |= DMA_CONTROL_OSF;
110 } else {
111 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112 " (threshold = %d)\n", txmode);
113 csr6 &= ~DMA_CONTROL_TSF;
114 csr6 &= DMA_CONTROL_TC_TX_MASK;
af901ca1 115 /* Set the transmit threshold */
47dd7a54
GC
116 if (txmode <= 32)
117 csr6 |= DMA_CONTROL_TTC_32;
118 else if (txmode <= 64)
119 csr6 |= DMA_CONTROL_TTC_64;
120 else if (txmode <= 128)
121 csr6 |= DMA_CONTROL_TTC_128;
122 else if (txmode <= 192)
123 csr6 |= DMA_CONTROL_TTC_192;
124 else
125 csr6 |= DMA_CONTROL_TTC_256;
126 }
127
128 if (rxmode == SF_DMA_MODE) {
129 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130 csr6 |= DMA_CONTROL_RSF;
131 } else {
132 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133 " (threshold = %d)\n", rxmode);
134 csr6 &= ~DMA_CONTROL_RSF;
135 csr6 &= DMA_CONTROL_TC_RX_MASK;
136 if (rxmode <= 32)
137 csr6 |= DMA_CONTROL_RTC_32;
138 else if (rxmode <= 64)
139 csr6 |= DMA_CONTROL_RTC_64;
140 else if (rxmode <= 96)
141 csr6 |= DMA_CONTROL_RTC_96;
142 else
143 csr6 |= DMA_CONTROL_RTC_128;
144 }
145
146 writel(csr6, ioaddr + DMA_CONTROL);
147 return;
148}
149
150/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152 unsigned long ioaddr)
153{
154 return;
155}
156
157static void gmac_dump_dma_regs(unsigned long ioaddr)
158{
159 int i;
160 pr_info(" DMA registers\n");
161 for (i = 0; i < 22; i++) {
162 if ((i < 9) || (i > 17)) {
163 int offset = i * 4;
164 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165 (DMA_BUS_MODE + offset),
166 readl(ioaddr + DMA_BUS_MODE + offset));
167 }
168 }
169 return;
170}
171
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173 struct dma_desc *p, unsigned long ioaddr)
174{
175 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data;
177
178 if (unlikely(p->des01.etx.error_summary)) {
179 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180 if (unlikely(p->des01.etx.jabber_timeout)) {
181 DBG(KERN_ERR "\tjabber_timeout error\n");
182 x->tx_jabber++;
183 }
184
185 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr);
189 }
190
191 if (unlikely(p->des01.etx.loss_carrier)) {
192 DBG(KERN_ERR "\tloss_carrier error\n");
193 x->tx_losscarrier++;
194 stats->tx_carrier_errors++;
195 }
196 if (unlikely(p->des01.etx.no_carrier)) {
197 DBG(KERN_ERR "\tno_carrier error\n");
198 x->tx_carrier++;
199 stats->tx_carrier_errors++;
200 }
201 if (unlikely(p->des01.etx.late_collision)) {
202 DBG(KERN_ERR "\tlate_collision error\n");
203 stats->collisions += p->des01.etx.collision_count;
204 }
205 if (unlikely(p->des01.etx.excessive_collisions)) {
206 DBG(KERN_ERR "\texcessive_collisions\n");
207 stats->collisions += p->des01.etx.collision_count;
208 }
209 if (unlikely(p->des01.etx.excessive_deferral)) {
210 DBG(KERN_INFO "\texcessive tx_deferral\n");
211 x->tx_deferred++;
212 }
213
214 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr);
217 x->tx_underflow++;
218 }
219
220 if (unlikely(p->des01.etx.ip_header_error)) {
221 DBG(KERN_ERR "\tTX IP header csum error\n");
222 x->tx_ip_header_error++;
223 }
224
225 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr);
229 }
230
231 ret = -1;
232 }
233
234 if (unlikely(p->des01.etx.deferred)) {
235 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
236 x->tx_deferred++;
237 }
238#ifdef STMMAC_VLAN_TAG_USED
239 if (p->des01.etx.vlan_frame) {
240 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
241 x->tx_vlan++;
242 }
243#endif
244
245 return ret;
246}
247
248static int gmac_get_tx_len(struct dma_desc *p)
249{
250 return p->des01.etx.buffer1_size;
251}
252
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
254{
255 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257
258 /* bits 5 7 0 | Frame status
259 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error
264 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266 * 0 1 1 | COE bypassed.. no IPv4/6 frame
267 * 0 1 0 | Reserved.
268 */
269 if (status == 0x0) {
270 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
271 ret = good_frame;
272 } else if (status == 0x4) {
273 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
274 ret = good_frame;
275 } else if (status == 0x5) {
276 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
277 ret = csum_none;
278 } else if (status == 0x6) {
279 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
280 ret = csum_none;
281 } else if (status == 0x7) {
282 DBG(KERN_ERR
283 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
284 ret = csum_none;
285 } else if (status == 0x1) {
286 DBG(KERN_ERR
287 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
288 ret = discard_frame;
289 } else if (status == 0x3) {
290 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
291 ret = discard_frame;
292 }
293 return ret;
294}
295
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
297 struct dma_desc *p)
298{
299 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data;
301
302 if (unlikely(p->des01.erx.error_summary)) {
303 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304 if (unlikely(p->des01.erx.descriptor_error)) {
305 DBG(KERN_ERR "\tdescriptor error\n");
306 x->rx_desc++;
307 stats->rx_length_errors++;
308 }
309 if (unlikely(p->des01.erx.overflow_error)) {
310 DBG(KERN_ERR "\toverflow error\n");
311 x->rx_gmac_overflow++;
312 }
313
314 if (unlikely(p->des01.erx.ipc_csum_error))
315 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
316
317 if (unlikely(p->des01.erx.late_collision)) {
318 DBG(KERN_ERR "\tlate_collision error\n");
319 stats->collisions++;
320 stats->collisions++;
321 }
322 if (unlikely(p->des01.erx.receive_watchdog)) {
323 DBG(KERN_ERR "\treceive_watchdog error\n");
324 x->rx_watchdog++;
325 }
326 if (unlikely(p->des01.erx.error_gmii)) {
327 DBG(KERN_ERR "\tReceive Error\n");
328 x->rx_mii++;
329 }
330 if (unlikely(p->des01.erx.crc_error)) {
331 DBG(KERN_ERR "\tCRC error\n");
332 x->rx_crc++;
333 stats->rx_crc_errors++;
334 }
335 ret = discard_frame;
336 }
337
338 /* After a payload csum error, the ES bit is set.
339 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344
345 if (unlikely(p->des01.erx.dribbling)) {
346 DBG(KERN_ERR "GMAC RX: dribbling error\n");
347 ret = discard_frame;
348 }
349 if (unlikely(p->des01.erx.sa_filter_fail)) {
350 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351 x->sa_rx_filter_fail++;
352 ret = discard_frame;
353 }
354 if (unlikely(p->des01.erx.da_filter_fail)) {
355 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356 x->da_rx_filter_fail++;
357 ret = discard_frame;
358 }
359 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++;
362 ret = discard_frame;
363 }
364#ifdef STMMAC_VLAN_TAG_USED
365 if (p->des01.erx.vlan_tag) {
366 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
367 x->rx_vlan++;
368 }
369#endif
370 return ret;
371}
372
373static void gmac_irq_status(unsigned long ioaddr)
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
47dd7a54
GC
403 /* Freeze MMC counters */
404 writel(0x8, ioaddr + GMAC_MMC_CTRL);
405 /* Mask GMAC interrupts */
406 writel(0x207, ioaddr + GMAC_INT_MASK);
407
408#ifdef STMMAC_VLAN_TAG_USED
409 /* Tag detection without filtering */
410 writel(0x0, ioaddr + GMAC_VLAN_TAG);
411#endif
412 return;
413}
414
415static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
416 unsigned int reg_n)
417{
418 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
419 GMAC_ADDR_LOW(reg_n));
420}
421
422static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
423 unsigned int reg_n)
424{
425 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
426 GMAC_ADDR_LOW(reg_n));
427}
428
429static void gmac_set_filter(struct net_device *dev)
430{
431 unsigned long ioaddr = dev->base_addr;
432 unsigned int value = 0;
433
434 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
60963479 435 __func__, dev->mc_count, dev->uc.count);
47dd7a54
GC
436
437 if (dev->flags & IFF_PROMISC)
438 value = GMAC_FRAME_FILTER_PR;
439 else if ((dev->mc_count > HASH_TABLE_SIZE)
440 || (dev->flags & IFF_ALLMULTI)) {
441 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
442 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
443 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
444 } else if (dev->mc_count > 0) {
445 int i;
446 u32 mc_filter[2];
447 struct dev_mc_list *mclist;
448
449 /* Hash filter for multicast */
450 value = GMAC_FRAME_FILTER_HMC;
451
452 memset(mc_filter, 0, sizeof(mc_filter));
453 for (i = 0, mclist = dev->mc_list;
454 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
455 /* The upper 6 bits of the calculated CRC are used to
456 index the contens of the hash table */
457 int bit_nr =
458 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
459 /* The most significant bit determines the register to
460 * use (H/L) while the other 5 bits determine the bit
461 * within the register. */
462 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
463 }
464 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
465 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
466 }
467
468 /* Handle multiple unicast addresses (perfect filtering)*/
60963479 469 if (dev->uc.count > GMAC_MAX_UNICAST_ADDRESSES)
47dd7a54
GC
470 /* Switch to promiscuous mode is more than 16 addrs
471 are required */
472 value |= GMAC_FRAME_FILTER_PR;
473 else {
60963479
GC
474 int reg = 1;
475 struct netdev_hw_addr *ha;
476
477 list_for_each_entry(ha, &dev->uc.list, list) {
478 gmac_set_umac_addr(ioaddr, ha->addr, reg);
479 reg++;
47dd7a54
GC
480 }
481 }
482
483#ifdef FRAME_FILTER_DEBUG
484 /* Enable Receive all mode (to debug filtering_fail errors) */
485 value |= GMAC_FRAME_FILTER_RA;
486#endif
487 writel(value, ioaddr + GMAC_FRAME_FILTER);
488
489 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
490 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
491 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
492
493 return;
494}
495
496static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
497 unsigned int fc, unsigned int pause_time)
498{
499 unsigned int flow = 0;
500
501 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
502 if (fc & FLOW_RX) {
503 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
504 flow |= GMAC_FLOW_CTRL_RFE;
505 }
506 if (fc & FLOW_TX) {
507 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
508 flow |= GMAC_FLOW_CTRL_TFE;
509 }
510
511 if (duplex) {
512 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
513 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
514 }
515
516 writel(flow, ioaddr + GMAC_FLOW_CTRL);
517 return;
518}
519
520static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
521{
522 unsigned int pmt = 0;
523
524 if (mode == WAKE_MAGIC) {
525 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
526 pmt |= power_down | magic_pkt_en;
527 } else if (mode == WAKE_UCAST) {
528 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
529 pmt |= global_unicast;
530 }
531
532 writel(pmt, ioaddr + GMAC_PMT);
533 return;
534}
535
536static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
537 int disable_rx_ic)
538{
539 int i;
540 for (i = 0; i < ring_size; i++) {
541 p->des01.erx.own = 1;
542 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
543 /* To support jumbo frames */
544 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
545 if (i == ring_size - 1)
546 p->des01.erx.end_ring = 1;
547 if (disable_rx_ic)
548 p->des01.erx.disable_ic = 1;
549 p++;
550 }
551 return;
552}
553
554static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
555{
556 int i;
557
558 for (i = 0; i < ring_size; i++) {
559 p->des01.etx.own = 0;
560 if (i == ring_size - 1)
561 p->des01.etx.end_ring = 1;
562 p++;
563 }
564
565 return;
566}
567
568static int gmac_get_tx_owner(struct dma_desc *p)
569{
570 return p->des01.etx.own;
571}
572
573static int gmac_get_rx_owner(struct dma_desc *p)
574{
575 return p->des01.erx.own;
576}
577
578static void gmac_set_tx_owner(struct dma_desc *p)
579{
580 p->des01.etx.own = 1;
581}
582
583static void gmac_set_rx_owner(struct dma_desc *p)
584{
585 p->des01.erx.own = 1;
586}
587
588static int gmac_get_tx_ls(struct dma_desc *p)
589{
590 return p->des01.etx.last_segment;
591}
592
593static void gmac_release_tx_desc(struct dma_desc *p)
594{
595 int ter = p->des01.etx.end_ring;
596
597 memset(p, 0, sizeof(struct dma_desc));
598 p->des01.etx.end_ring = ter;
599
600 return;
601}
602
603static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
604 int csum_flag)
605{
606 p->des01.etx.first_segment = is_fs;
607 if (unlikely(len > BUF_SIZE_4KiB)) {
608 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
609 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
610 } else {
611 p->des01.etx.buffer1_size = len;
612 }
613 if (likely(csum_flag))
614 p->des01.etx.checksum_insertion = cic_full;
615}
616
617static void gmac_clear_tx_ic(struct dma_desc *p)
618{
619 p->des01.etx.interrupt = 0;
620}
621
622static void gmac_close_tx_desc(struct dma_desc *p)
623{
624 p->des01.etx.last_segment = 1;
625 p->des01.etx.interrupt = 1;
626}
627
628static int gmac_get_rx_frame_len(struct dma_desc *p)
629{
630 return p->des01.erx.frame_length;
631}
632
db98a0b0 633struct stmmac_ops gmac_ops = {
47dd7a54 634 .core_init = gmac_core_init,
db98a0b0
GC
635 .dump_regs = gmac_dump_regs,
636 .host_irq_status = gmac_irq_status,
637 .set_filter = gmac_set_filter,
638 .flow_ctrl = gmac_flow_ctrl,
639 .pmt = gmac_pmt,
640 .set_umac_addr = gmac_set_umac_addr,
641 .get_umac_addr = gmac_get_umac_addr,
642};
643
644struct stmmac_dma_ops gmac_dma_ops = {
645 .init = gmac_dma_init,
646 .dump_regs = gmac_dump_dma_regs,
47dd7a54
GC
647 .dma_mode = gmac_dma_operation_mode,
648 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
db98a0b0
GC
649};
650
651struct stmmac_desc_ops gmac_desc_ops = {
47dd7a54
GC
652 .tx_status = gmac_get_tx_frame_status,
653 .rx_status = gmac_get_rx_frame_status,
654 .get_tx_len = gmac_get_tx_len,
47dd7a54
GC
655 .init_rx_desc = gmac_init_rx_desc,
656 .init_tx_desc = gmac_init_tx_desc,
657 .get_tx_owner = gmac_get_tx_owner,
658 .get_rx_owner = gmac_get_rx_owner,
659 .release_tx_desc = gmac_release_tx_desc,
660 .prepare_tx_desc = gmac_prepare_tx_desc,
661 .clear_tx_ic = gmac_clear_tx_ic,
662 .close_tx_desc = gmac_close_tx_desc,
663 .get_tx_ls = gmac_get_tx_ls,
664 .set_tx_owner = gmac_set_tx_owner,
665 .set_rx_owner = gmac_set_rx_owner,
666 .get_rx_frame_len = gmac_get_rx_frame_len,
47dd7a54
GC
667};
668
669struct mac_device_info *gmac_setup(unsigned long ioaddr)
670{
671 struct mac_device_info *mac;
672 u32 uid = readl(ioaddr + GMAC_VERSION);
673
674 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
675 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
676
677 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
678
db98a0b0
GC
679 mac->mac = &gmac_ops;
680 mac->desc = &gmac_desc_ops;
681 mac->dma = &gmac_dma_ops;
682
683 mac->pmt = PMT_SUPPORTED;
684 mac->link.port = GMAC_CONTROL_PS;
685 mac->link.duplex = GMAC_CONTROL_DM;
686 mac->link.speed = GMAC_CONTROL_FES;
687 mac->mii.addr = GMAC_MII_ADDR;
688 mac->mii.data = GMAC_MII_DATA;
47dd7a54
GC
689
690 return mac;
691}