]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/host/au1xmmc.c
sdhci: add support to ENE-CB714
[net-next-2.6.git] / drivers / mmc / host / au1xmmc.c
CommitLineData
ba264b34
PP
1/*
2 * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
3 *
4 * Copyright (c) 2005, Advanced Micro Devices, Inc.
5 *
6 * Developed with help from the 2.4.30 MMC AU1XXX controller including
7 * the following copyright notices:
8 * Copyright (c) 2003-2004 Embedded Edge, LLC.
9 * Portions Copyright (C) 2002 Embedix, Inc
10 * Copyright 2002 Hewlett-Packard Company
11
12 * 2.6 version of this driver inspired by:
13 * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 * All Rights Reserved.
15 * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 * All Rights Reserved.
17 *
18
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24/* Why is a timer used to detect insert events?
25 *
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */
36
ba264b34
PP
37#include <linux/module.h>
38#include <linux/init.h>
b256f9df 39#include <linux/platform_device.h>
ba264b34
PP
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43
44#include <linux/mmc/host.h>
ba264b34
PP
45#include <asm/io.h>
46#include <asm/mach-au1x00/au1000.h>
47#include <asm/mach-au1x00/au1xxx_dbdma.h>
48#include <asm/mach-au1x00/au1100_mmc.h>
49#include <asm/scatterlist.h>
50
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc"
55
56/* Set this to enable special debugging macros */
ba264b34 57
c6563178
RK
58#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
ba264b34 60#else
c6563178 61#define DBG(fmt, idx, args...)
ba264b34
PP
62#endif
63
64const struct {
65 u32 iobase;
66 u32 tx_devid, rx_devid;
67 u16 bcsrpwr;
68 u16 bcsrstatus;
69 u16 wpstatus;
70} au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78
60da8de6 79#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table))
ba264b34
PP
80
81/* This array stores pointers for the hosts (used by the IRQ handler) */
82struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT];
83static int dma = 1;
84
85#ifdef MODULE
8d3b33f6 86module_param(dma, bool, 0);
ba264b34
PP
87MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)");
88#endif
89
90static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
91{
92 u32 val = au_readl(HOST_CONFIG(host));
93 val |= mask;
94 au_writel(val, HOST_CONFIG(host));
95 au_sync();
96}
97
98static inline void FLUSH_FIFO(struct au1xmmc_host *host)
99{
100 u32 val = au_readl(HOST_CONFIG2(host));
101
102 au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
103 au_sync_delay(1);
104
105 /* SEND_STOP will turn off clock control - this re-enables it */
106 val &= ~SD_CONFIG2_DF;
107
108 au_writel(val, HOST_CONFIG2(host));
109 au_sync();
110}
111
112static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
113{
114 u32 val = au_readl(HOST_CONFIG(host));
115 val &= ~mask;
116 au_writel(val, HOST_CONFIG(host));
117 au_sync();
118}
119
120static inline void SEND_STOP(struct au1xmmc_host *host)
121{
122
123 /* We know the value of CONFIG2, so avoid a read we don't need */
124 u32 mask = SD_CONFIG2_EN;
125
126 WARN_ON(host->status != HOST_S_DATA);
127 host->status = HOST_S_STOP;
128
129 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host));
130 au_sync();
131
132 /* Send the stop commmand */
133 au_writel(STOP_CMD, HOST_CMD(host));
134}
135
136static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
137{
138
139 u32 val = au1xmmc_card_table[host->id].bcsrpwr;
140
141 bcsr->board &= ~val;
142 if (state) bcsr->board |= val;
143
144 au_sync_delay(1);
145}
146
147static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
148{
149 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus)
150 ? 1 : 0;
151}
152
82999770 153static int au1xmmc_card_readonly(struct mmc_host *mmc)
ba264b34 154{
82999770 155 struct au1xmmc_host *host = mmc_priv(mmc);
ba264b34
PP
156 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
157 ? 1 : 0;
158}
159
160static void au1xmmc_finish_request(struct au1xmmc_host *host)
161{
162
163 struct mmc_request *mrq = host->mrq;
164
165 host->mrq = NULL;
166 host->flags &= HOST_F_ACTIVE;
167
168 host->dma.len = 0;
169 host->dma.dir = 0;
170
171 host->pio.index = 0;
172 host->pio.offset = 0;
173 host->pio.len = 0;
174
175 host->status = HOST_S_IDLE;
176
177 bcsr->disk_leds |= (1 << 8);
178
179 mmc_request_done(host->mmc, mrq);
180}
181
182static void au1xmmc_tasklet_finish(unsigned long param)
183{
184 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
185 au1xmmc_finish_request(host);
186}
187
188static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
c0f3b6c7 189 struct mmc_command *cmd, unsigned int flags)
ba264b34 190{
ba264b34
PP
191 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
192
e142c24c 193 switch (mmc_resp_type(cmd)) {
279bc445
ML
194 case MMC_RSP_NONE:
195 break;
ba264b34
PP
196 case MMC_RSP_R1:
197 mmccmd |= SD_CMD_RT_1;
198 break;
199 case MMC_RSP_R1B:
200 mmccmd |= SD_CMD_RT_1B;
201 break;
202 case MMC_RSP_R2:
203 mmccmd |= SD_CMD_RT_2;
204 break;
205 case MMC_RSP_R3:
206 mmccmd |= SD_CMD_RT_3;
207 break;
279bc445
ML
208 default:
209 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
210 mmc_resp_type(cmd));
211 return MMC_ERR_INVALID;
ba264b34
PP
212 }
213
c0f3b6c7
YY
214 if (flags & MMC_DATA_READ) {
215 if (flags & MMC_DATA_MULTI)
216 mmccmd |= SD_CMD_CT_4;
217 else
218 mmccmd |= SD_CMD_CT_2;
219 } else if (flags & MMC_DATA_WRITE) {
220 if (flags & MMC_DATA_MULTI)
221 mmccmd |= SD_CMD_CT_3;
222 else
223 mmccmd |= SD_CMD_CT_1;
ba264b34
PP
224 }
225
226 au_writel(cmd->arg, HOST_CMDARG(host));
227 au_sync();
228
229 if (wait)
230 IRQ_OFF(host, SD_CONFIG_CR);
231
232 au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
233 au_sync();
234
235 /* Wait for the command to go on the line */
236
237 while(1) {
238 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
239 break;
240 }
241
242 /* Wait for the command to come back */
243
244 if (wait) {
245 u32 status = au_readl(HOST_STATUS(host));
246
247 while(!(status & SD_STATUS_CR))
248 status = au_readl(HOST_STATUS(host));
249
250 /* Clear the CR status */
251 au_writel(SD_STATUS_CR, HOST_STATUS(host));
252
253 IRQ_ON(host, SD_CONFIG_CR);
254 }
255
256 return MMC_ERR_NONE;
257}
258
259static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
260{
261
262 struct mmc_request *mrq = host->mrq;
263 struct mmc_data *data;
264 u32 crc;
265
266 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
267
268 if (host->mrq == NULL)
269 return;
270
271 data = mrq->cmd->data;
272
273 if (status == 0)
274 status = au_readl(HOST_STATUS(host));
275
276 /* The transaction is really over when the SD_STATUS_DB bit is clear */
277
278 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
279 status = au_readl(HOST_STATUS(host));
280
281 data->error = MMC_ERR_NONE;
282 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
283
284 /* Process any errors */
285
286 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
287 if (host->flags & HOST_F_XMIT)
288 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
289
290 if (crc)
291 data->error = MMC_ERR_BADCRC;
292
293 /* Clear the CRC bits */
294 au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
295
296 data->bytes_xfered = 0;
297
298 if (data->error == MMC_ERR_NONE) {
299 if (host->flags & HOST_F_DMA) {
300 u32 chan = DMA_CHANNEL(host);
301
302 chan_tab_t *c = *((chan_tab_t **) chan);
303 au1x_dma_chan_t *cp = c->chan_ptr;
304 data->bytes_xfered = cp->ddma_bytecnt;
305 }
306 else
307 data->bytes_xfered =
2c171bf1 308 (data->blocks * data->blksz) -
ba264b34
PP
309 host->pio.len;
310 }
311
312 au1xmmc_finish_request(host);
313}
314
315static void au1xmmc_tasklet_data(unsigned long param)
316{
317 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
318
319 u32 status = au_readl(HOST_STATUS(host));
320 au1xmmc_data_complete(host, status);
321}
322
323#define AU1XMMC_MAX_TRANSFER 8
324
325static void au1xmmc_send_pio(struct au1xmmc_host *host)
326{
327
328 struct mmc_data *data = 0;
329 int sg_len, max, count = 0;
330 unsigned char *sg_ptr;
331 u32 status = 0;
332 struct scatterlist *sg;
333
334 data = host->mrq->data;
335
336 if (!(host->flags & HOST_F_XMIT))
337 return;
338
339 /* This is the pointer to the data buffer */
340 sg = &data->sg[host->pio.index];
341 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
342
343 /* This is the space left inside the buffer */
344 sg_len = data->sg[host->pio.index].length - host->pio.offset;
345
346 /* Check to if we need less then the size of the sg_buffer */
347
348 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
349 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
350
351 for(count = 0; count < max; count++ ) {
352 unsigned char val;
353
354 status = au_readl(HOST_STATUS(host));
355
356 if (!(status & SD_STATUS_TH))
357 break;
358
359 val = *sg_ptr++;
360
361 au_writel((unsigned long) val, HOST_TXPORT(host));
362 au_sync();
363 }
364
365 host->pio.len -= count;
366 host->pio.offset += count;
367
368 if (count == sg_len) {
369 host->pio.index++;
370 host->pio.offset = 0;
371 }
372
373 if (host->pio.len == 0) {
374 IRQ_OFF(host, SD_CONFIG_TH);
375
376 if (host->flags & HOST_F_STOP)
377 SEND_STOP(host);
378
379 tasklet_schedule(&host->data_task);
380 }
381}
382
383static void au1xmmc_receive_pio(struct au1xmmc_host *host)
384{
385
386 struct mmc_data *data = 0;
387 int sg_len = 0, max = 0, count = 0;
388 unsigned char *sg_ptr = 0;
389 u32 status = 0;
390 struct scatterlist *sg;
391
392 data = host->mrq->data;
393
394 if (!(host->flags & HOST_F_RECV))
395 return;
396
397 max = host->pio.len;
398
399 if (host->pio.index < host->dma.len) {
400 sg = &data->sg[host->pio.index];
401 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
402
403 /* This is the space left inside the buffer */
404 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
405
406 /* Check to if we need less then the size of the sg_buffer */
407 if (sg_len < max) max = sg_len;
408 }
409
410 if (max > AU1XMMC_MAX_TRANSFER)
411 max = AU1XMMC_MAX_TRANSFER;
412
413 for(count = 0; count < max; count++ ) {
414 u32 val;
415 status = au_readl(HOST_STATUS(host));
416
417 if (!(status & SD_STATUS_NE))
418 break;
419
420 if (status & SD_STATUS_RC) {
c6563178 421 DBG("RX CRC Error [%d + %d].\n", host->id,
ba264b34
PP
422 host->pio.len, count);
423 break;
424 }
425
426 if (status & SD_STATUS_RO) {
c6563178 427 DBG("RX Overrun [%d + %d]\n", host->id,
ba264b34
PP
428 host->pio.len, count);
429 break;
430 }
431 else if (status & SD_STATUS_RU) {
c6563178 432 DBG("RX Underrun [%d + %d]\n", host->id,
ba264b34
PP
433 host->pio.len, count);
434 break;
435 }
436
437 val = au_readl(HOST_RXPORT(host));
438
439 if (sg_ptr)
440 *sg_ptr++ = (unsigned char) (val & 0xFF);
441 }
442
443 host->pio.len -= count;
444 host->pio.offset += count;
445
446 if (sg_len && count == sg_len) {
447 host->pio.index++;
448 host->pio.offset = 0;
449 }
450
451 if (host->pio.len == 0) {
452 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
453 IRQ_OFF(host, SD_CONFIG_NE);
454
455 if (host->flags & HOST_F_STOP)
456 SEND_STOP(host);
457
458 tasklet_schedule(&host->data_task);
459 }
460}
461
462/* static void au1xmmc_cmd_complete
463 This is called when a command has been completed - grab the response
464 and check for errors. Then start the data transfer if it is indicated.
465*/
466
467static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
468{
469
470 struct mmc_request *mrq = host->mrq;
471 struct mmc_command *cmd;
472 int trans;
473
474 if (!host->mrq)
475 return;
476
477 cmd = mrq->cmd;
478 cmd->error = MMC_ERR_NONE;
479
e9225176
RK
480 if (cmd->flags & MMC_RSP_PRESENT) {
481 if (cmd->flags & MMC_RSP_136) {
482 u32 r[4];
483 int i;
484
485 r[0] = au_readl(host->iobase + SD_RESP3);
486 r[1] = au_readl(host->iobase + SD_RESP2);
487 r[2] = au_readl(host->iobase + SD_RESP1);
488 r[3] = au_readl(host->iobase + SD_RESP0);
489
490 /* The CRC is omitted from the response, so really
491 * we only got 120 bytes, but the engine expects
492 * 128 bits, so we have to shift things up
493 */
494
495 for(i = 0; i < 4; i++) {
496 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
497 if (i != 3)
498 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
499 }
500 } else {
501 /* Techincally, we should be getting all 48 bits of
502 * the response (SD_RESP1 + SD_RESP2), but because
503 * our response omits the CRC, our data ends up
504 * being shifted 8 bits to the right. In this case,
505 * that means that the OSR data starts at bit 31,
506 * so we can just read RESP0 and return that
507 */
508 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
ba264b34
PP
509 }
510 }
511
512 /* Figure out errors */
513
514 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
515 cmd->error = MMC_ERR_BADCRC;
516
517 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
518
519 if (!trans || cmd->error != MMC_ERR_NONE) {
520
521 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
522 tasklet_schedule(&host->finish_task);
523 return;
524 }
525
526 host->status = HOST_S_DATA;
527
528 if (host->flags & HOST_F_DMA) {
529 u32 channel = DMA_CHANNEL(host);
530
531 /* Start the DMA as soon as the buffer gets something in it */
532
533 if (host->flags & HOST_F_RECV) {
534 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
535
536 while((status & mask) != mask)
537 status = au_readl(HOST_STATUS(host));
538 }
539
540 au1xxx_dbdma_start(channel);
541 }
542}
543
544static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
545{
546
547 unsigned int pbus = get_au1x00_speed();
548 unsigned int divisor;
549 u32 config;
550
551 /* From databook:
552 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
553 */
554
555 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
556 pbus /= 2;
557
558 divisor = ((pbus / rate) / 2) - 1;
559
560 config = au_readl(HOST_CONFIG(host));
561
562 config &= ~(SD_CONFIG_DIV);
563 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
564
565 au_writel(config, HOST_CONFIG(host));
566 au_sync();
567}
568
569static int
570au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
571{
572
2c171bf1 573 int datalen = data->blocks * data->blksz;
ba264b34
PP
574
575 if (dma != 0)
576 host->flags |= HOST_F_DMA;
577
578 if (data->flags & MMC_DATA_READ)
579 host->flags |= HOST_F_RECV;
580 else
581 host->flags |= HOST_F_XMIT;
582
583 if (host->mrq->stop)
584 host->flags |= HOST_F_STOP;
585
586 host->dma.dir = DMA_BIDIRECTIONAL;
587
588 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
589 data->sg_len, host->dma.dir);
590
591 if (host->dma.len == 0)
592 return MMC_ERR_TIMEOUT;
593
2c171bf1 594 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
ba264b34
PP
595
596 if (host->flags & HOST_F_DMA) {
597 int i;
598 u32 channel = DMA_CHANNEL(host);
599
600 au1xxx_dbdma_stop(channel);
601
602 for(i = 0; i < host->dma.len; i++) {
603 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
604 struct scatterlist *sg = &data->sg[i];
605 int sg_len = sg->length;
606
607 int len = (datalen > sg_len) ? sg_len : datalen;
608
609 if (i == host->dma.len - 1)
610 flags = DDMA_FLAGS_IE;
611
612 if (host->flags & HOST_F_XMIT){
613 ret = au1xxx_dbdma_put_source_flags(channel,
614 (void *) (page_address(sg->page) +
615 sg->offset),
616 len, flags);
617 }
618 else {
619 ret = au1xxx_dbdma_put_dest_flags(channel,
620 (void *) (page_address(sg->page) +
621 sg->offset),
622 len, flags);
623 }
624
625 if (!ret)
626 goto dataerr;
627
628 datalen -= len;
629 }
630 }
631 else {
632 host->pio.index = 0;
633 host->pio.offset = 0;
634 host->pio.len = datalen;
635
636 if (host->flags & HOST_F_XMIT)
637 IRQ_ON(host, SD_CONFIG_TH);
638 else
639 IRQ_ON(host, SD_CONFIG_NE);
640 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
641 }
642
643 return MMC_ERR_NONE;
644
645 dataerr:
646 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir);
647 return MMC_ERR_TIMEOUT;
648}
649
650/* static void au1xmmc_request
651 This actually starts a command or data transaction
652*/
653
654static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
655{
656
657 struct au1xmmc_host *host = mmc_priv(mmc);
c0f3b6c7 658 unsigned int flags = 0;
ba264b34
PP
659 int ret = MMC_ERR_NONE;
660
661 WARN_ON(irqs_disabled());
662 WARN_ON(host->status != HOST_S_IDLE);
663
664 host->mrq = mrq;
665 host->status = HOST_S_CMD;
666
667 bcsr->disk_leds &= ~(1 << 8);
668
669 if (mrq->data) {
670 FLUSH_FIFO(host);
c0f3b6c7 671 flags = mrq->data->flags;
ba264b34
PP
672 ret = au1xmmc_prepare_data(host, mrq->data);
673 }
674
675 if (ret == MMC_ERR_NONE)
c0f3b6c7 676 ret = au1xmmc_send_command(host, 0, mrq->cmd, flags);
ba264b34
PP
677
678 if (ret != MMC_ERR_NONE) {
679 mrq->cmd->error = ret;
680 au1xmmc_finish_request(host);
681 }
682}
683
684static void au1xmmc_reset_controller(struct au1xmmc_host *host)
685{
686
687 /* Apply the clock */
688 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
689 au_sync_delay(1);
690
691 au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
692 au_sync_delay(5);
693
694 au_writel(~0, HOST_STATUS(host));
695 au_sync();
696
697 au_writel(0, HOST_BLKSIZE(host));
698 au_writel(0x001fffff, HOST_TIMEOUT(host));
699 au_sync();
700
701 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
702 au_sync();
703
704 au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
705 au_sync_delay(1);
706
707 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
708 au_sync();
709
710 /* Configure interrupts */
711 au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
712 au_sync();
713}
714
715
716static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
717{
718 struct au1xmmc_host *host = mmc_priv(mmc);
719
ba264b34
PP
720 if (ios->power_mode == MMC_POWER_OFF)
721 au1xmmc_set_power(host, 0);
722 else if (ios->power_mode == MMC_POWER_ON) {
723 au1xmmc_set_power(host, 1);
724 }
725
726 if (ios->clock && ios->clock != host->clock) {
727 au1xmmc_set_clock(host, ios->clock);
728 host->clock = ios->clock;
729 }
730}
731
53e62d3a 732static void au1xmmc_dma_callback(int irq, void *dev_id)
ba264b34
PP
733{
734 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
ba264b34
PP
735
736 /* Avoid spurious interrupts */
737
738 if (!host->mrq)
739 return;
740
741 if (host->flags & HOST_F_STOP)
742 SEND_STOP(host);
743
744 tasklet_schedule(&host->data_task);
745}
746
747#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
748#define STATUS_DATA_IN (SD_STATUS_NE)
749#define STATUS_DATA_OUT (SD_STATUS_TH)
750
7d12e780 751static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
ba264b34
PP
752{
753
754 u32 status;
755 int i, ret = 0;
756
757 disable_irq(AU1100_SD_IRQ);
758
759 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
760 struct au1xmmc_host * host = au1xmmc_hosts[i];
761 u32 handled = 1;
762
763 status = au_readl(HOST_STATUS(host));
764
765 if (host->mrq && (status & STATUS_TIMEOUT)) {
766 if (status & SD_STATUS_RAT)
767 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
768
769 else if (status & SD_STATUS_DT)
770 host->mrq->data->error = MMC_ERR_TIMEOUT;
771
772 /* In PIO mode, interrupts might still be enabled */
773 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
774
775 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF);
776 tasklet_schedule(&host->finish_task);
777 }
778#if 0
779 else if (status & SD_STATUS_DD) {
780
781 /* Sometimes we get a DD before a NE in PIO mode */
782
783 if (!(host->flags & HOST_F_DMA) &&
784 (status & SD_STATUS_NE))
785 au1xmmc_receive_pio(host);
786 else {
787 au1xmmc_data_complete(host, status);
788 //tasklet_schedule(&host->data_task);
789 }
790 }
791#endif
792 else if (status & (SD_STATUS_CR)) {
793 if (host->status == HOST_S_CMD)
794 au1xmmc_cmd_complete(host,status);
795 }
796 else if (!(host->flags & HOST_F_DMA)) {
797 if ((host->flags & HOST_F_XMIT) &&
798 (status & STATUS_DATA_OUT))
799 au1xmmc_send_pio(host);
800 else if ((host->flags & HOST_F_RECV) &&
801 (status & STATUS_DATA_IN))
802 au1xmmc_receive_pio(host);
803 }
804 else if (status & 0x203FBC70) {
c6563178 805 DBG("Unhandled status %8.8x\n", host->id, status);
ba264b34
PP
806 handled = 0;
807 }
808
809 au_writel(status, HOST_STATUS(host));
810 au_sync();
811
812 ret |= handled;
813 }
814
815 enable_irq(AU1100_SD_IRQ);
816 return ret;
817}
818
819static void au1xmmc_poll_event(unsigned long arg)
820{
821 struct au1xmmc_host *host = (struct au1xmmc_host *) arg;
822
823 int card = au1xmmc_card_inserted(host);
824 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
825
826 if (card != controller) {
827 host->flags &= ~HOST_F_ACTIVE;
828 if (card) host->flags |= HOST_F_ACTIVE;
829 mmc_detect_change(host->mmc, 0);
830 }
831
832 if (host->mrq != NULL) {
833 u32 status = au_readl(HOST_STATUS(host));
c6563178 834 DBG("PENDING - %8.8x\n", host->id, status);
ba264b34
PP
835 }
836
837 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
838}
839
840static dbdev_tab_t au1xmmc_mem_dbdev =
841{
842 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
843};
844
845static void au1xmmc_init_dma(struct au1xmmc_host *host)
846{
847
848 u32 rxchan, txchan;
849
850 int txid = au1xmmc_card_table[host->id].tx_devid;
851 int rxid = au1xmmc_card_table[host->id].rx_devid;
852
853 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
854 of 8 bits. And since devices are shared, we need to create
855 our own to avoid freaking out other devices
856 */
857
858 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
859
860 txchan = au1xxx_dbdma_chan_alloc(memid, txid,
861 au1xmmc_dma_callback, (void *) host);
862
863 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid,
864 au1xmmc_dma_callback, (void *) host);
865
866 au1xxx_dbdma_set_devwidth(txchan, 8);
867 au1xxx_dbdma_set_devwidth(rxchan, 8);
868
869 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT);
870 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT);
871
872 host->tx_chan = txchan;
873 host->rx_chan = rxchan;
874}
875
bf8c80a6 876static const struct mmc_host_ops au1xmmc_ops = {
ba264b34
PP
877 .request = au1xmmc_request,
878 .set_ios = au1xmmc_set_ios,
82999770 879 .get_ro = au1xmmc_card_readonly,
ba264b34
PP
880};
881
b256f9df 882static int __devinit au1xmmc_probe(struct platform_device *pdev)
ba264b34
PP
883{
884
885 int i, ret = 0;
886
887 /* THe interrupt is shared among all controllers */
dace1453 888 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
ba264b34
PP
889
890 if (ret) {
891 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
892 AU1100_SD_IRQ, ret);
893 return -ENXIO;
894 }
895
896 disable_irq(AU1100_SD_IRQ);
897
898 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
b256f9df 899 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
ba264b34
PP
900 struct au1xmmc_host *host = 0;
901
902 if (!mmc) {
903 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i);
904 au1xmmc_hosts[i] = 0;
905 continue;
906 }
907
908 mmc->ops = &au1xmmc_ops;
909
910 mmc->f_min = 450000;
911 mmc->f_max = 24000000;
912
913 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
914 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
915
fe4a3c7a 916 mmc->max_blk_size = 2048;
55db890a 917 mmc->max_blk_count = 512;
fe4a3c7a 918
ba264b34
PP
919 mmc->ocr_avail = AU1XMMC_OCR;
920
921 host = mmc_priv(mmc);
922 host->mmc = mmc;
923
924 host->id = i;
925 host->iobase = au1xmmc_card_table[host->id].iobase;
926 host->clock = 0;
927 host->power_mode = MMC_POWER_OFF;
928
929 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0;
930 host->status = HOST_S_IDLE;
931
932 init_timer(&host->timer);
933
934 host->timer.function = au1xmmc_poll_event;
935 host->timer.data = (unsigned long) host;
936 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
937
938 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
939 (unsigned long) host);
940
941 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
942 (unsigned long) host);
943
944 spin_lock_init(&host->lock);
945
946 if (dma != 0)
947 au1xmmc_init_dma(host);
948
949 au1xmmc_reset_controller(host);
950
951 mmc_add_host(mmc);
952 au1xmmc_hosts[i] = host;
953
954 add_timer(&host->timer);
955
956 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n",
957 host->id, host->iobase, dma ? "dma" : "pio");
958 }
959
960 enable_irq(AU1100_SD_IRQ);
961
962 return 0;
963}
964
b256f9df 965static int __devexit au1xmmc_remove(struct platform_device *pdev)
ba264b34
PP
966{
967
968 int i;
969
970 disable_irq(AU1100_SD_IRQ);
971
972 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
973 struct au1xmmc_host *host = au1xmmc_hosts[i];
974 if (!host) continue;
975
976 tasklet_kill(&host->data_task);
977 tasklet_kill(&host->finish_task);
978
979 del_timer_sync(&host->timer);
980 au1xmmc_set_power(host, 0);
981
982 mmc_remove_host(host->mmc);
983
984 au1xxx_dbdma_chan_free(host->tx_chan);
985 au1xxx_dbdma_chan_free(host->rx_chan);
986
987 au_writel(0x0, HOST_ENABLE(host));
988 au_sync();
989 }
990
991 free_irq(AU1100_SD_IRQ, 0);
992 return 0;
993}
994
b256f9df 995static struct platform_driver au1xmmc_driver = {
ba264b34
PP
996 .probe = au1xmmc_probe,
997 .remove = au1xmmc_remove,
998 .suspend = NULL,
b256f9df
MM
999 .resume = NULL,
1000 .driver = {
1001 .name = DRIVER_NAME,
1002 },
ba264b34
PP
1003};
1004
1005static int __init au1xmmc_init(void)
1006{
b256f9df 1007 return platform_driver_register(&au1xmmc_driver);
ba264b34
PP
1008}
1009
1010static void __exit au1xmmc_exit(void)
1011{
b256f9df 1012 platform_driver_unregister(&au1xmmc_driver);
ba264b34
PP
1013}
1014
1015module_init(au1xmmc_init);
1016module_exit(au1xmmc_exit);
1017
1018#ifdef MODULE
1019MODULE_AUTHOR("Advanced Micro Devices, Inc");
1020MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1021MODULE_LICENSE("GPL");
1022#endif
1023