]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/host/au1xmmc.c
au1xmmc: enable 4 bit transfer mode
[net-next-2.6.git] / drivers / mmc / host / au1xmmc.c
CommitLineData
ba264b34 1/*
70f10482 2 * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
ba264b34
PP
3 *
4 * Copyright (c) 2005, Advanced Micro Devices, Inc.
5 *
6 * Developed with help from the 2.4.30 MMC AU1XXX controller including
7 * the following copyright notices:
8 * Copyright (c) 2003-2004 Embedded Edge, LLC.
9 * Portions Copyright (C) 2002 Embedix, Inc
10 * Copyright 2002 Hewlett-Packard Company
11
12 * 2.6 version of this driver inspired by:
13 * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 * All Rights Reserved.
15 * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 * All Rights Reserved.
17 *
18
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24/* Why is a timer used to detect insert events?
25 *
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */
36
ba264b34
PP
37#include <linux/module.h>
38#include <linux/init.h>
b256f9df 39#include <linux/platform_device.h>
ba264b34
PP
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
0ada7a02 43#include <linux/scatterlist.h>
c4223c2c 44#include <linux/leds.h>
ba264b34 45#include <linux/mmc/host.h>
c4223c2c 46
ba264b34
PP
47#include <asm/io.h>
48#include <asm/mach-au1x00/au1000.h>
49#include <asm/mach-au1x00/au1xxx_dbdma.h>
50#include <asm/mach-au1x00/au1100_mmc.h>
ba264b34
PP
51
52#include <au1xxx.h>
53#include "au1xmmc.h"
54
55#define DRIVER_NAME "au1xxx-mmc"
56
57/* Set this to enable special debugging macros */
c4223c2c 58/* #define DEBUG */
ba264b34 59
c6563178
RK
60#ifdef DEBUG
61#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
ba264b34 62#else
c6563178 63#define DBG(fmt, idx, args...)
ba264b34
PP
64#endif
65
ba264b34
PP
66static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
67{
68 u32 val = au_readl(HOST_CONFIG(host));
69 val |= mask;
70 au_writel(val, HOST_CONFIG(host));
71 au_sync();
72}
73
74static inline void FLUSH_FIFO(struct au1xmmc_host *host)
75{
76 u32 val = au_readl(HOST_CONFIG2(host));
77
78 au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
79 au_sync_delay(1);
80
81 /* SEND_STOP will turn off clock control - this re-enables it */
82 val &= ~SD_CONFIG2_DF;
83
84 au_writel(val, HOST_CONFIG2(host));
85 au_sync();
86}
87
88static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
89{
90 u32 val = au_readl(HOST_CONFIG(host));
91 val &= ~mask;
92 au_writel(val, HOST_CONFIG(host));
93 au_sync();
94}
95
96static inline void SEND_STOP(struct au1xmmc_host *host)
97{
281dd23e 98 u32 config2;
ba264b34
PP
99
100 WARN_ON(host->status != HOST_S_DATA);
101 host->status = HOST_S_STOP;
102
281dd23e
ML
103 config2 = au_readl(HOST_CONFIG2(host));
104 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
ba264b34
PP
105 au_sync();
106
107 /* Send the stop commmand */
108 au_writel(STOP_CMD, HOST_CMD(host));
109}
110
111static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
112{
c4223c2c
ML
113 if (host->platdata && host->platdata->set_power)
114 host->platdata->set_power(host->mmc, state);
ba264b34
PP
115}
116
c4223c2c 117static int au1xmmc_card_inserted(struct au1xmmc_host *host)
ba264b34 118{
c4223c2c
ML
119 int ret;
120
121 if (host->platdata && host->platdata->card_inserted)
122 ret = host->platdata->card_inserted(host->mmc);
123 else
124 ret = 1; /* assume there is a card */
125
126 return ret;
ba264b34
PP
127}
128
82999770 129static int au1xmmc_card_readonly(struct mmc_host *mmc)
ba264b34 130{
82999770 131 struct au1xmmc_host *host = mmc_priv(mmc);
c4223c2c
ML
132 int ret;
133
134 if (host->platdata && host->platdata->card_readonly)
135 ret = host->platdata->card_readonly(mmc);
136 else
137 ret = 0; /* assume card is read-write */
138
139 return ret;
ba264b34
PP
140}
141
142static void au1xmmc_finish_request(struct au1xmmc_host *host)
143{
144
145 struct mmc_request *mrq = host->mrq;
146
147 host->mrq = NULL;
c4223c2c 148 host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
ba264b34
PP
149
150 host->dma.len = 0;
151 host->dma.dir = 0;
152
153 host->pio.index = 0;
154 host->pio.offset = 0;
155 host->pio.len = 0;
156
157 host->status = HOST_S_IDLE;
158
ba264b34
PP
159 mmc_request_done(host->mmc, mrq);
160}
161
162static void au1xmmc_tasklet_finish(unsigned long param)
163{
164 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
165 au1xmmc_finish_request(host);
166}
167
168static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
be0192aa 169 struct mmc_command *cmd, struct mmc_data *data)
ba264b34 170{
ba264b34
PP
171 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
172
e142c24c 173 switch (mmc_resp_type(cmd)) {
279bc445
ML
174 case MMC_RSP_NONE:
175 break;
ba264b34
PP
176 case MMC_RSP_R1:
177 mmccmd |= SD_CMD_RT_1;
178 break;
179 case MMC_RSP_R1B:
180 mmccmd |= SD_CMD_RT_1B;
181 break;
182 case MMC_RSP_R2:
183 mmccmd |= SD_CMD_RT_2;
184 break;
185 case MMC_RSP_R3:
186 mmccmd |= SD_CMD_RT_3;
187 break;
279bc445
ML
188 default:
189 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
190 mmc_resp_type(cmd));
17b0429d 191 return -EINVAL;
ba264b34
PP
192 }
193
be0192aa 194 if (data) {
6356a9d9 195 if (data->flags & MMC_DATA_READ) {
be0192aa
PO
196 if (data->blocks > 1)
197 mmccmd |= SD_CMD_CT_4;
198 else
199 mmccmd |= SD_CMD_CT_2;
6356a9d9 200 } else if (data->flags & MMC_DATA_WRITE) {
be0192aa
PO
201 if (data->blocks > 1)
202 mmccmd |= SD_CMD_CT_3;
203 else
204 mmccmd |= SD_CMD_CT_1;
205 }
ba264b34
PP
206 }
207
208 au_writel(cmd->arg, HOST_CMDARG(host));
209 au_sync();
210
211 if (wait)
212 IRQ_OFF(host, SD_CONFIG_CR);
213
214 au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
215 au_sync();
216
217 /* Wait for the command to go on the line */
218
219 while(1) {
220 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
221 break;
222 }
223
224 /* Wait for the command to come back */
225
226 if (wait) {
227 u32 status = au_readl(HOST_STATUS(host));
228
229 while(!(status & SD_STATUS_CR))
230 status = au_readl(HOST_STATUS(host));
231
232 /* Clear the CR status */
233 au_writel(SD_STATUS_CR, HOST_STATUS(host));
234
235 IRQ_ON(host, SD_CONFIG_CR);
236 }
237
17b0429d 238 return 0;
ba264b34
PP
239}
240
241static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
242{
243
244 struct mmc_request *mrq = host->mrq;
245 struct mmc_data *data;
246 u32 crc;
247
248 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
249
250 if (host->mrq == NULL)
251 return;
252
253 data = mrq->cmd->data;
254
255 if (status == 0)
256 status = au_readl(HOST_STATUS(host));
257
258 /* The transaction is really over when the SD_STATUS_DB bit is clear */
259
260 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
261 status = au_readl(HOST_STATUS(host));
262
17b0429d 263 data->error = 0;
ba264b34
PP
264 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
265
266 /* Process any errors */
267
268 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
269 if (host->flags & HOST_F_XMIT)
270 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
271
272 if (crc)
17b0429d 273 data->error = -EILSEQ;
ba264b34
PP
274
275 /* Clear the CRC bits */
276 au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
277
278 data->bytes_xfered = 0;
279
17b0429d 280 if (!data->error) {
ba264b34 281 if (host->flags & HOST_F_DMA) {
c4223c2c 282#ifdef CONFIG_SOC_AU1200 /* DBDMA */
ba264b34
PP
283 u32 chan = DMA_CHANNEL(host);
284
285 chan_tab_t *c = *((chan_tab_t **) chan);
286 au1x_dma_chan_t *cp = c->chan_ptr;
287 data->bytes_xfered = cp->ddma_bytecnt;
c4223c2c 288#endif
ba264b34
PP
289 }
290 else
291 data->bytes_xfered =
2c171bf1 292 (data->blocks * data->blksz) -
ba264b34
PP
293 host->pio.len;
294 }
295
296 au1xmmc_finish_request(host);
297}
298
299static void au1xmmc_tasklet_data(unsigned long param)
300{
301 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
302
303 u32 status = au_readl(HOST_STATUS(host));
304 au1xmmc_data_complete(host, status);
305}
306
307#define AU1XMMC_MAX_TRANSFER 8
308
309static void au1xmmc_send_pio(struct au1xmmc_host *host)
310{
311
312 struct mmc_data *data = 0;
313 int sg_len, max, count = 0;
314 unsigned char *sg_ptr;
315 u32 status = 0;
316 struct scatterlist *sg;
317
318 data = host->mrq->data;
319
320 if (!(host->flags & HOST_F_XMIT))
321 return;
322
323 /* This is the pointer to the data buffer */
324 sg = &data->sg[host->pio.index];
45711f1a 325 sg_ptr = sg_virt(sg) + host->pio.offset;
ba264b34
PP
326
327 /* This is the space left inside the buffer */
328 sg_len = data->sg[host->pio.index].length - host->pio.offset;
329
330 /* Check to if we need less then the size of the sg_buffer */
331
332 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
333 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
334
335 for(count = 0; count < max; count++ ) {
336 unsigned char val;
337
338 status = au_readl(HOST_STATUS(host));
339
340 if (!(status & SD_STATUS_TH))
341 break;
342
343 val = *sg_ptr++;
344
345 au_writel((unsigned long) val, HOST_TXPORT(host));
346 au_sync();
347 }
348
349 host->pio.len -= count;
350 host->pio.offset += count;
351
352 if (count == sg_len) {
353 host->pio.index++;
354 host->pio.offset = 0;
355 }
356
357 if (host->pio.len == 0) {
358 IRQ_OFF(host, SD_CONFIG_TH);
359
360 if (host->flags & HOST_F_STOP)
361 SEND_STOP(host);
362
363 tasklet_schedule(&host->data_task);
364 }
365}
366
367static void au1xmmc_receive_pio(struct au1xmmc_host *host)
368{
369
370 struct mmc_data *data = 0;
371 int sg_len = 0, max = 0, count = 0;
372 unsigned char *sg_ptr = 0;
373 u32 status = 0;
374 struct scatterlist *sg;
375
376 data = host->mrq->data;
377
378 if (!(host->flags & HOST_F_RECV))
379 return;
380
381 max = host->pio.len;
382
383 if (host->pio.index < host->dma.len) {
384 sg = &data->sg[host->pio.index];
45711f1a 385 sg_ptr = sg_virt(sg) + host->pio.offset;
ba264b34
PP
386
387 /* This is the space left inside the buffer */
388 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
389
390 /* Check to if we need less then the size of the sg_buffer */
391 if (sg_len < max) max = sg_len;
392 }
393
394 if (max > AU1XMMC_MAX_TRANSFER)
395 max = AU1XMMC_MAX_TRANSFER;
396
397 for(count = 0; count < max; count++ ) {
398 u32 val;
399 status = au_readl(HOST_STATUS(host));
400
401 if (!(status & SD_STATUS_NE))
402 break;
403
404 if (status & SD_STATUS_RC) {
c4223c2c 405 DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
ba264b34
PP
406 host->pio.len, count);
407 break;
408 }
409
410 if (status & SD_STATUS_RO) {
c4223c2c 411 DBG("RX Overrun [%d + %d]\n", host->pdev->id,
ba264b34
PP
412 host->pio.len, count);
413 break;
414 }
415 else if (status & SD_STATUS_RU) {
c4223c2c 416 DBG("RX Underrun [%d + %d]\n", host->pdev->id,
ba264b34
PP
417 host->pio.len, count);
418 break;
419 }
420
421 val = au_readl(HOST_RXPORT(host));
422
423 if (sg_ptr)
424 *sg_ptr++ = (unsigned char) (val & 0xFF);
425 }
426
427 host->pio.len -= count;
428 host->pio.offset += count;
429
430 if (sg_len && count == sg_len) {
431 host->pio.index++;
432 host->pio.offset = 0;
433 }
434
435 if (host->pio.len == 0) {
436 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
437 IRQ_OFF(host, SD_CONFIG_NE);
438
439 if (host->flags & HOST_F_STOP)
440 SEND_STOP(host);
441
442 tasklet_schedule(&host->data_task);
443 }
444}
445
446/* static void au1xmmc_cmd_complete
447 This is called when a command has been completed - grab the response
448 and check for errors. Then start the data transfer if it is indicated.
449*/
450
451static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
452{
453
454 struct mmc_request *mrq = host->mrq;
455 struct mmc_command *cmd;
456 int trans;
457
458 if (!host->mrq)
459 return;
460
461 cmd = mrq->cmd;
17b0429d 462 cmd->error = 0;
ba264b34 463
e9225176
RK
464 if (cmd->flags & MMC_RSP_PRESENT) {
465 if (cmd->flags & MMC_RSP_136) {
466 u32 r[4];
467 int i;
468
469 r[0] = au_readl(host->iobase + SD_RESP3);
470 r[1] = au_readl(host->iobase + SD_RESP2);
471 r[2] = au_readl(host->iobase + SD_RESP1);
472 r[3] = au_readl(host->iobase + SD_RESP0);
473
474 /* The CRC is omitted from the response, so really
475 * we only got 120 bytes, but the engine expects
476 * 128 bits, so we have to shift things up
477 */
478
479 for(i = 0; i < 4; i++) {
480 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
481 if (i != 3)
482 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
483 }
484 } else {
485 /* Techincally, we should be getting all 48 bits of
486 * the response (SD_RESP1 + SD_RESP2), but because
487 * our response omits the CRC, our data ends up
488 * being shifted 8 bits to the right. In this case,
489 * that means that the OSR data starts at bit 31,
490 * so we can just read RESP0 and return that
491 */
492 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
ba264b34
PP
493 }
494 }
495
496 /* Figure out errors */
497
498 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
17b0429d 499 cmd->error = -EILSEQ;
ba264b34
PP
500
501 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
502
17b0429d 503 if (!trans || cmd->error) {
ba264b34
PP
504
505 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
506 tasklet_schedule(&host->finish_task);
507 return;
508 }
509
510 host->status = HOST_S_DATA;
511
512 if (host->flags & HOST_F_DMA) {
c4223c2c 513#ifdef CONFIG_SOC_AU1200 /* DBDMA */
ba264b34
PP
514 u32 channel = DMA_CHANNEL(host);
515
516 /* Start the DMA as soon as the buffer gets something in it */
517
518 if (host->flags & HOST_F_RECV) {
519 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
520
521 while((status & mask) != mask)
522 status = au_readl(HOST_STATUS(host));
523 }
524
525 au1xxx_dbdma_start(channel);
c4223c2c 526#endif
ba264b34
PP
527 }
528}
529
530static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
531{
532
533 unsigned int pbus = get_au1x00_speed();
534 unsigned int divisor;
535 u32 config;
536
537 /* From databook:
538 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
539 */
540
541 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
542 pbus /= 2;
543
544 divisor = ((pbus / rate) / 2) - 1;
545
546 config = au_readl(HOST_CONFIG(host));
547
548 config &= ~(SD_CONFIG_DIV);
549 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
550
551 au_writel(config, HOST_CONFIG(host));
552 au_sync();
553}
554
555static int
556au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
557{
2c171bf1 558 int datalen = data->blocks * data->blksz;
ba264b34 559
ba264b34
PP
560 if (data->flags & MMC_DATA_READ)
561 host->flags |= HOST_F_RECV;
562 else
563 host->flags |= HOST_F_XMIT;
564
565 if (host->mrq->stop)
566 host->flags |= HOST_F_STOP;
567
568 host->dma.dir = DMA_BIDIRECTIONAL;
569
570 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
571 data->sg_len, host->dma.dir);
572
573 if (host->dma.len == 0)
17b0429d 574 return -ETIMEDOUT;
ba264b34 575
2c171bf1 576 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
ba264b34
PP
577
578 if (host->flags & HOST_F_DMA) {
c4223c2c 579#ifdef CONFIG_SOC_AU1200 /* DBDMA */
ba264b34
PP
580 int i;
581 u32 channel = DMA_CHANNEL(host);
582
583 au1xxx_dbdma_stop(channel);
584
585 for(i = 0; i < host->dma.len; i++) {
586 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
587 struct scatterlist *sg = &data->sg[i];
588 int sg_len = sg->length;
589
590 int len = (datalen > sg_len) ? sg_len : datalen;
591
592 if (i == host->dma.len - 1)
593 flags = DDMA_FLAGS_IE;
594
595 if (host->flags & HOST_F_XMIT){
596 ret = au1xxx_dbdma_put_source_flags(channel,
45711f1a 597 (void *) sg_virt(sg), len, flags);
ba264b34
PP
598 }
599 else {
600 ret = au1xxx_dbdma_put_dest_flags(channel,
45711f1a 601 (void *) sg_virt(sg),
ba264b34
PP
602 len, flags);
603 }
604
c4223c2c 605 if (!ret)
ba264b34
PP
606 goto dataerr;
607
608 datalen -= len;
609 }
c4223c2c 610#endif
ba264b34
PP
611 }
612 else {
613 host->pio.index = 0;
614 host->pio.offset = 0;
615 host->pio.len = datalen;
616
617 if (host->flags & HOST_F_XMIT)
618 IRQ_ON(host, SD_CONFIG_TH);
619 else
620 IRQ_ON(host, SD_CONFIG_NE);
621 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
622 }
623
17b0429d 624 return 0;
ba264b34 625
c4223c2c
ML
626dataerr:
627 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
628 host->dma.dir);
17b0429d 629 return -ETIMEDOUT;
ba264b34
PP
630}
631
632/* static void au1xmmc_request
633 This actually starts a command or data transaction
634*/
635
636static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
637{
638
639 struct au1xmmc_host *host = mmc_priv(mmc);
c0f3b6c7 640 unsigned int flags = 0;
17b0429d 641 int ret = 0;
ba264b34
PP
642
643 WARN_ON(irqs_disabled());
644 WARN_ON(host->status != HOST_S_IDLE);
645
646 host->mrq = mrq;
647 host->status = HOST_S_CMD;
648
ba264b34
PP
649 if (mrq->data) {
650 FLUSH_FIFO(host);
c0f3b6c7 651 flags = mrq->data->flags;
ba264b34
PP
652 ret = au1xmmc_prepare_data(host, mrq->data);
653 }
654
17b0429d 655 if (!ret)
be0192aa 656 ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data);
ba264b34 657
17b0429d 658 if (ret) {
ba264b34
PP
659 mrq->cmd->error = ret;
660 au1xmmc_finish_request(host);
661 }
662}
663
664static void au1xmmc_reset_controller(struct au1xmmc_host *host)
665{
666
667 /* Apply the clock */
668 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
669 au_sync_delay(1);
670
671 au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
672 au_sync_delay(5);
673
674 au_writel(~0, HOST_STATUS(host));
675 au_sync();
676
677 au_writel(0, HOST_BLKSIZE(host));
678 au_writel(0x001fffff, HOST_TIMEOUT(host));
679 au_sync();
680
681 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
682 au_sync();
683
684 au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
685 au_sync_delay(1);
686
687 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
688 au_sync();
689
690 /* Configure interrupts */
691 au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
692 au_sync();
693}
694
695
696static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
697{
698 struct au1xmmc_host *host = mmc_priv(mmc);
281dd23e 699 u32 config2;
ba264b34 700
ba264b34
PP
701 if (ios->power_mode == MMC_POWER_OFF)
702 au1xmmc_set_power(host, 0);
703 else if (ios->power_mode == MMC_POWER_ON) {
704 au1xmmc_set_power(host, 1);
705 }
706
707 if (ios->clock && ios->clock != host->clock) {
708 au1xmmc_set_clock(host, ios->clock);
709 host->clock = ios->clock;
710 }
281dd23e
ML
711
712 config2 = au_readl(HOST_CONFIG2(host));
713 switch (ios->bus_width) {
714 case MMC_BUS_WIDTH_4:
715 config2 |= SD_CONFIG2_WB;
716 break;
717 case MMC_BUS_WIDTH_1:
718 config2 &= ~SD_CONFIG2_WB;
719 break;
720 }
721 au_writel(config2, HOST_CONFIG2(host));
722 au_sync();
ba264b34
PP
723}
724
ba264b34
PP
725#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
726#define STATUS_DATA_IN (SD_STATUS_NE)
727#define STATUS_DATA_OUT (SD_STATUS_TH)
728
7d12e780 729static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
ba264b34 730{
c4223c2c 731 struct au1xmmc_host *host = dev_id;
ba264b34 732 u32 status;
ba264b34 733
c4223c2c 734 status = au_readl(HOST_STATUS(host));
ba264b34 735
c4223c2c
ML
736 if (!(status & SD_STATUS_I))
737 return IRQ_NONE; /* not ours */
ba264b34 738
c4223c2c
ML
739 if (host->mrq && (status & STATUS_TIMEOUT)) {
740 if (status & SD_STATUS_RAT)
741 host->mrq->cmd->error = -ETIMEDOUT;
742 else if (status & SD_STATUS_DT)
743 host->mrq->data->error = -ETIMEDOUT;
ba264b34 744
c4223c2c
ML
745 /* In PIO mode, interrupts might still be enabled */
746 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
ba264b34 747
c4223c2c
ML
748 /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
749 tasklet_schedule(&host->finish_task);
750 }
ba264b34 751#if 0
c4223c2c
ML
752 else if (status & SD_STATUS_DD) {
753 /* Sometimes we get a DD before a NE in PIO mode */
754 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
755 au1xmmc_receive_pio(host);
756 else {
757 au1xmmc_data_complete(host, status);
758 /* tasklet_schedule(&host->data_task); */
ba264b34 759 }
ba264b34 760 }
c4223c2c
ML
761#endif
762 else if (status & SD_STATUS_CR) {
763 if (host->status == HOST_S_CMD)
764 au1xmmc_cmd_complete(host, status);
765
766 } else if (!(host->flags & HOST_F_DMA)) {
767 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
768 au1xmmc_send_pio(host);
769 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
770 au1xmmc_receive_pio(host);
771
772 } else if (status & 0x203F3C70) {
773 DBG("Unhandled status %8.8x\n", host->pdev->id,
774 status);
ba264b34
PP
775 }
776
c4223c2c
ML
777 au_writel(status, HOST_STATUS(host));
778 au_sync();
ba264b34 779
c4223c2c 780 return IRQ_HANDLED;
ba264b34
PP
781}
782
c4223c2c
ML
783#ifdef CONFIG_SOC_AU1200
784/* 8bit memory DMA device */
785static dbdev_tab_t au1xmmc_mem_dbdev = {
786 .dev_id = DSCR_CMD0_ALWAYS,
787 .dev_flags = DEV_FLAGS_ANYUSE,
788 .dev_tsize = 0,
789 .dev_devwidth = 8,
790 .dev_physaddr = 0x00000000,
791 .dev_intlevel = 0,
792 .dev_intpolarity = 0,
ba264b34 793};
c4223c2c 794static int memid;
ba264b34 795
c4223c2c 796static void au1xmmc_dbdma_callback(int irq, void *dev_id)
ba264b34 797{
c4223c2c 798 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
ba264b34 799
c4223c2c
ML
800 /* Avoid spurious interrupts */
801 if (!host->mrq)
802 return;
ba264b34 803
c4223c2c
ML
804 if (host->flags & HOST_F_STOP)
805 SEND_STOP(host);
ba264b34 806
c4223c2c
ML
807 tasklet_schedule(&host->data_task);
808}
ba264b34 809
c4223c2c
ML
810static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
811{
812 struct resource *res;
813 int txid, rxid;
814
815 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
816 if (!res)
817 return -ENODEV;
818 txid = res->start;
819
820 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
821 if (!res)
822 return -ENODEV;
823 rxid = res->start;
824
825 if (!memid)
826 return -ENODEV;
827
828 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
829 au1xmmc_dbdma_callback, (void *)host);
830 if (!host->tx_chan) {
831 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
832 return -ENODEV;
833 }
834
835 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
836 au1xmmc_dbdma_callback, (void *)host);
837 if (!host->rx_chan) {
838 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
839 au1xxx_dbdma_chan_free(host->tx_chan);
840 return -ENODEV;
841 }
ba264b34 842
c4223c2c
ML
843 au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
844 au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
ba264b34 845
c4223c2c
ML
846 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
847 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
ba264b34 848
c4223c2c
ML
849 /* DBDMA is good to go */
850 host->flags |= HOST_F_DMA;
ba264b34 851
c4223c2c
ML
852 return 0;
853}
ba264b34 854
c4223c2c
ML
855static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
856{
857 if (host->flags & HOST_F_DMA) {
858 host->flags &= ~HOST_F_DMA;
859 au1xxx_dbdma_chan_free(host->tx_chan);
860 au1xxx_dbdma_chan_free(host->rx_chan);
861 }
ba264b34 862}
c4223c2c 863#endif
ba264b34 864
bf8c80a6 865static const struct mmc_host_ops au1xmmc_ops = {
ba264b34
PP
866 .request = au1xmmc_request,
867 .set_ios = au1xmmc_set_ios,
82999770 868 .get_ro = au1xmmc_card_readonly,
ba264b34
PP
869};
870
c4223c2c 871static void au1xmmc_poll_event(unsigned long arg)
ba264b34 872{
c4223c2c
ML
873 struct au1xmmc_host *host = (struct au1xmmc_host *)arg;
874 int card = au1xmmc_card_inserted(host);
875 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
876
877 if (card != controller) {
878 host->flags &= ~HOST_F_ACTIVE;
879 if (card)
880 host->flags |= HOST_F_ACTIVE;
881 mmc_detect_change(host->mmc, 0);
882 }
ba264b34 883
c4223c2c
ML
884#ifdef DEBUG
885 if (host->mrq != NULL) {
886 u32 status = au_readl(HOST_STATUS(host));
887 DBG("PENDING - %8.8x\n", host->pdev->id, status);
888 }
889#endif
890 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
891}
ba264b34 892
c4223c2c
ML
893static void au1xmmc_init_cd_poll_timer(struct au1xmmc_host *host)
894{
895 init_timer(&host->timer);
896 host->timer.function = au1xmmc_poll_event;
897 host->timer.data = (unsigned long)host;
898 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
899}
ba264b34 900
c4223c2c
ML
901static int __devinit au1xmmc_probe(struct platform_device *pdev)
902{
903 struct mmc_host *mmc;
904 struct au1xmmc_host *host;
905 struct resource *r;
906 int ret;
907
908 mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
909 if (!mmc) {
910 dev_err(&pdev->dev, "no memory for mmc_host\n");
911 ret = -ENOMEM;
912 goto out0;
ba264b34
PP
913 }
914
c4223c2c
ML
915 host = mmc_priv(mmc);
916 host->mmc = mmc;
917 host->platdata = pdev->dev.platform_data;
918 host->pdev = pdev;
ba264b34 919
c4223c2c
ML
920 ret = -ENODEV;
921 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
922 if (!r) {
923 dev_err(&pdev->dev, "no mmio defined\n");
924 goto out1;
925 }
ba264b34 926
c4223c2c
ML
927 host->ioarea = request_mem_region(r->start, r->end - r->start + 1,
928 pdev->name);
929 if (!host->ioarea) {
930 dev_err(&pdev->dev, "mmio already in use\n");
931 goto out1;
932 }
ba264b34 933
c4223c2c
ML
934 host->iobase = (unsigned long)ioremap(r->start, 0x3c);
935 if (!host->iobase) {
936 dev_err(&pdev->dev, "cannot remap mmio\n");
937 goto out2;
938 }
939
940 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
941 if (!r) {
942 dev_err(&pdev->dev, "no IRQ defined\n");
943 goto out3;
944 }
ba264b34 945
c4223c2c
ML
946 host->irq = r->start;
947 /* IRQ is shared among both SD controllers */
948 ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
949 DRIVER_NAME, host);
950 if (ret) {
951 dev_err(&pdev->dev, "cannot grab IRQ\n");
952 goto out3;
953 }
ba264b34 954
c4223c2c 955 mmc->ops = &au1xmmc_ops;
ba264b34 956
c4223c2c
ML
957 mmc->f_min = 450000;
958 mmc->f_max = 24000000;
fe4a3c7a 959
c4223c2c
ML
960 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
961 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
ba264b34 962
c4223c2c
ML
963 mmc->max_blk_size = 2048;
964 mmc->max_blk_count = 512;
ba264b34 965
c4223c2c 966 mmc->ocr_avail = AU1XMMC_OCR;
281dd23e 967 mmc->caps = MMC_CAP_4_BIT_DATA;
ba264b34 968
c4223c2c 969 host->status = HOST_S_IDLE;
ba264b34 970
c4223c2c
ML
971 /* board-specific carddetect setup, if any */
972 if (host->platdata && host->platdata->cd_setup) {
973 ret = host->platdata->cd_setup(mmc, 1);
974 if (ret) {
975 dev_err(&pdev->dev, "board CD setup failed\n");
976 goto out4;
977 }
978 } else {
979 /* poll the board-specific is-card-in-socket-? method */
980 au1xmmc_init_cd_poll_timer(host);
981 }
ba264b34 982
c4223c2c
ML
983 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
984 (unsigned long)host);
ba264b34 985
c4223c2c
ML
986 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
987 (unsigned long)host);
ba264b34 988
c4223c2c
ML
989#ifdef CONFIG_SOC_AU1200
990 ret = au1xmmc_dbdma_init(host);
991 if (ret)
992 printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n");
993#endif
ba264b34 994
c4223c2c
ML
995#ifdef CONFIG_LEDS_CLASS
996 if (host->platdata && host->platdata->led) {
997 struct led_classdev *led = host->platdata->led;
998 led->name = mmc_hostname(mmc);
999 led->brightness = LED_OFF;
1000 led->default_trigger = mmc_hostname(mmc);
1001 ret = led_classdev_register(mmc_dev(mmc), led);
1002 if (ret)
1003 goto out5;
1004 }
1005#endif
ba264b34 1006
c4223c2c 1007 au1xmmc_reset_controller(host);
ba264b34 1008
c4223c2c
ML
1009 ret = mmc_add_host(mmc);
1010 if (ret) {
1011 dev_err(&pdev->dev, "cannot add mmc host\n");
1012 goto out6;
1013 }
ba264b34 1014
c4223c2c 1015 platform_set_drvdata(pdev, mmc);
ba264b34 1016
c4223c2c
ML
1017 /* start the carddetect poll timer if necessary */
1018 if (!(host->platdata && host->platdata->cd_setup))
ba264b34
PP
1019 add_timer(&host->timer);
1020
c4223c2c
ML
1021 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
1022 " (mode=%s)\n", pdev->id, host->iobase,
1023 host->flags & HOST_F_DMA ? "dma" : "pio");
ba264b34 1024
c4223c2c 1025 return 0; /* all ok */
ba264b34 1026
c4223c2c
ML
1027out6:
1028#ifdef CONFIG_LEDS_CLASS
1029 if (host->platdata && host->platdata->led)
1030 led_classdev_unregister(host->platdata->led);
1031out5:
1032#endif
1033 au_writel(0, HOST_ENABLE(host));
1034 au_writel(0, HOST_CONFIG(host));
1035 au_writel(0, HOST_CONFIG2(host));
1036 au_sync();
1037
1038#ifdef CONFIG_SOC_AU1200
1039 au1xmmc_dbdma_shutdown(host);
1040#endif
1041
1042 tasklet_kill(&host->data_task);
1043 tasklet_kill(&host->finish_task);
1044
1045 if (host->platdata && host->platdata->cd_setup)
1046 host->platdata->cd_setup(mmc, 0);
1047out4:
1048 free_irq(host->irq, host);
1049out3:
1050 iounmap((void *)host->iobase);
1051out2:
1052 release_resource(host->ioarea);
1053 kfree(host->ioarea);
1054out1:
1055 mmc_free_host(mmc);
1056out0:
1057 return ret;
ba264b34
PP
1058}
1059
b256f9df 1060static int __devexit au1xmmc_remove(struct platform_device *pdev)
ba264b34 1061{
c4223c2c
ML
1062 struct mmc_host *mmc = platform_get_drvdata(pdev);
1063 struct au1xmmc_host *host;
1064
1065 if (mmc) {
1066 host = mmc_priv(mmc);
1067
1068 mmc_remove_host(mmc);
ba264b34 1069
c4223c2c
ML
1070#ifdef CONFIG_LEDS_CLASS
1071 if (host->platdata && host->platdata->led)
1072 led_classdev_unregister(host->platdata->led);
1073#endif
ba264b34 1074
c4223c2c
ML
1075 if (host->platdata && host->platdata->cd_setup)
1076 host->platdata->cd_setup(mmc, 0);
1077 else
1078 del_timer_sync(&host->timer);
ba264b34 1079
c4223c2c
ML
1080 au_writel(0, HOST_ENABLE(host));
1081 au_writel(0, HOST_CONFIG(host));
1082 au_writel(0, HOST_CONFIG2(host));
1083 au_sync();
ba264b34
PP
1084
1085 tasklet_kill(&host->data_task);
1086 tasklet_kill(&host->finish_task);
1087
c4223c2c
ML
1088#ifdef CONFIG_SOC_AU1200
1089 au1xmmc_dbdma_shutdown(host);
1090#endif
ba264b34
PP
1091 au1xmmc_set_power(host, 0);
1092
c4223c2c
ML
1093 free_irq(host->irq, host);
1094 iounmap((void *)host->iobase);
1095 release_resource(host->ioarea);
1096 kfree(host->ioarea);
ba264b34 1097
c4223c2c 1098 mmc_free_host(mmc);
ba264b34 1099 }
ba264b34
PP
1100 return 0;
1101}
1102
b256f9df 1103static struct platform_driver au1xmmc_driver = {
ba264b34
PP
1104 .probe = au1xmmc_probe,
1105 .remove = au1xmmc_remove,
1106 .suspend = NULL,
b256f9df
MM
1107 .resume = NULL,
1108 .driver = {
1109 .name = DRIVER_NAME,
bc65c724 1110 .owner = THIS_MODULE,
b256f9df 1111 },
ba264b34
PP
1112};
1113
1114static int __init au1xmmc_init(void)
1115{
c4223c2c
ML
1116#ifdef CONFIG_SOC_AU1200
1117 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1118 * of 8 bits. And since devices are shared, we need to create
1119 * our own to avoid freaking out other devices.
1120 */
1121 memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1122 if (!memid)
1123 printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n");
1124#endif
b256f9df 1125 return platform_driver_register(&au1xmmc_driver);
ba264b34
PP
1126}
1127
1128static void __exit au1xmmc_exit(void)
1129{
c4223c2c
ML
1130#ifdef CONFIG_SOC_AU1200
1131 if (memid)
1132 au1xxx_ddma_del_device(memid);
1133#endif
b256f9df 1134 platform_driver_unregister(&au1xmmc_driver);
ba264b34
PP
1135}
1136
1137module_init(au1xmmc_init);
1138module_exit(au1xmmc_exit);
1139
ba264b34
PP
1140MODULE_AUTHOR("Advanced Micro Devices, Inc");
1141MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1142MODULE_LICENSE("GPL");
bc65c724 1143MODULE_ALIAS("platform:au1xxx-mmc");