]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/at91_mci.c
AT91 MMC 2 : Use platform resources
[net-next-2.6.git] / drivers / mmc / at91_mci.c
CommitLineData
65dbf343
AV
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67
68#include <linux/mmc/host.h>
69#include <linux/mmc/protocol.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
75#include <asm/arch/gpio.h>
55d8baee
AV
76#include <asm/arch/at91_mci.h>
77#include <asm/arch/at91_pdc.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
65dbf343
AV
83static struct clk *mci_clk;
84
85#define FL_SENT_COMMAND (1 << 0)
86#define FL_SENT_STOP (1 << 1)
87
88
89
e0b19b83
AV
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 92
65dbf343
AV
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
e0b19b83 103 void __iomem *baseaddr;
17ea0595 104 int irq;
e0b19b83 105
65dbf343
AV
106 struct at91_mmc_data *board;
107 int present;
108
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127};
128
129/*
130 * Copy from sg to a dma block - used for transfers
131 */
132static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
133{
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
148 int index;
149 unsigned int *sgbuffer;
150
151 sg = &data->sg[i];
152
153 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
154 amount = min(size, sg->length);
155 size -= amount;
156 amount /= 4;
157
158 for (index = 0; index < amount; index++)
159 *dmabuf++ = swab32(sgbuffer[index]);
160
161 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
162
163 if (size == 0)
164 break;
165 }
166
167 /*
168 * Check that we didn't get a request to transfer
169 * more data than can fit into the SG list.
170 */
171 BUG_ON(size != 0);
172}
173
174/*
175 * Prepare a dma read
176 */
177static void at91mci_pre_dma_read(struct at91mci_host *host)
178{
179 int i;
180 struct scatterlist *sg;
181 struct mmc_command *cmd;
182 struct mmc_data *data;
183
b44fb7a0 184 pr_debug("pre dma read\n");
65dbf343
AV
185
186 cmd = host->cmd;
187 if (!cmd) {
b44fb7a0 188 pr_debug("no command\n");
65dbf343
AV
189 return;
190 }
191
192 data = cmd->data;
193 if (!data) {
b44fb7a0 194 pr_debug("no data\n");
65dbf343
AV
195 return;
196 }
197
198 for (i = 0; i < 2; i++) {
199 /* nothing left to transfer */
200 if (host->transfer_index >= data->sg_len) {
b44fb7a0 201 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
202 break;
203 }
204
205 /* Check to see if this needs filling */
206 if (i == 0) {
e0b19b83 207 if (at91_mci_read(host, AT91_PDC_RCR) != 0) {
b44fb7a0 208 pr_debug("Transfer active in current\n");
65dbf343
AV
209 continue;
210 }
211 }
212 else {
e0b19b83 213 if (at91_mci_read(host, AT91_PDC_RNCR) != 0) {
b44fb7a0 214 pr_debug("Transfer active in next\n");
65dbf343
AV
215 continue;
216 }
217 }
218
219 /* Setup the next transfer */
b44fb7a0 220 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
221
222 sg = &data->sg[host->transfer_index++];
b44fb7a0 223 pr_debug("sg = %p\n", sg);
65dbf343
AV
224
225 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
226
b44fb7a0 227 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
228
229 if (i == 0) {
e0b19b83
AV
230 at91_mci_write(host, AT91_PDC_RPR, sg->dma_address);
231 at91_mci_write(host, AT91_PDC_RCR, sg->length / 4);
65dbf343
AV
232 }
233 else {
e0b19b83
AV
234 at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address);
235 at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4);
65dbf343
AV
236 }
237 }
238
b44fb7a0 239 pr_debug("pre dma read done\n");
65dbf343
AV
240}
241
242/*
243 * Handle after a dma read
244 */
245static void at91mci_post_dma_read(struct at91mci_host *host)
246{
247 struct mmc_command *cmd;
248 struct mmc_data *data;
249
b44fb7a0 250 pr_debug("post dma read\n");
65dbf343
AV
251
252 cmd = host->cmd;
253 if (!cmd) {
b44fb7a0 254 pr_debug("no command\n");
65dbf343
AV
255 return;
256 }
257
258 data = cmd->data;
259 if (!data) {
b44fb7a0 260 pr_debug("no data\n");
65dbf343
AV
261 return;
262 }
263
264 while (host->in_use_index < host->transfer_index) {
265 unsigned int *buffer;
266 int index;
267 int len;
268
269 struct scatterlist *sg;
270
b44fb7a0 271 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
272
273 sg = &data->sg[host->in_use_index++];
274
b44fb7a0 275 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
279 /* Swap the contents of the buffer */
280 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
b44fb7a0 281 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
65dbf343
AV
282
283 data->bytes_xfered += sg->length;
284
285 len = sg->length / 4;
286
287 for (index = 0; index < len; index++) {
288 buffer[index] = swab32(buffer[index]);
289 }
290 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
291 flush_dcache_page(sg->page);
292 }
293
294 /* Is there another transfer to trigger? */
295 if (host->transfer_index < data->sg_len)
296 at91mci_pre_dma_read(host);
297 else {
e0b19b83
AV
298 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
299 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
300 }
301
b44fb7a0 302 pr_debug("post dma read done\n");
65dbf343
AV
303}
304
305/*
306 * Handle transmitted data
307 */
308static void at91_mci_handle_transmitted(struct at91mci_host *host)
309{
310 struct mmc_command *cmd;
311 struct mmc_data *data;
312
b44fb7a0 313 pr_debug("Handling the transmit\n");
65dbf343
AV
314
315 /* Disable the transfer */
e0b19b83 316 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
317
318 /* Now wait for cmd ready */
e0b19b83
AV
319 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
320 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
65dbf343
AV
321
322 cmd = host->cmd;
323 if (!cmd) return;
324
325 data = cmd->data;
326 if (!data) return;
327
328 data->bytes_xfered = host->total_length;
329}
330
331/*
332 * Enable the controller
333 */
e0b19b83 334static void at91_mci_enable(struct at91mci_host *host)
65dbf343 335{
e0b19b83
AV
336 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
337 at91_mci_write(host, AT91_MCI_IDR, 0xFFFFFFFF);
338 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
339 at91_mci_write(host, AT91_MCI_MR, 0x834A);
340 at91_mci_write(host, AT91_MCI_SDCR, 0x0);
65dbf343
AV
341}
342
343/*
344 * Disable the controller
345 */
e0b19b83 346static void at91_mci_disable(struct at91mci_host *host)
65dbf343 347{
e0b19b83 348 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
349}
350
351/*
352 * Send a command
353 * return the interrupts to enable
354 */
355static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
356{
357 unsigned int cmdr, mr;
358 unsigned int block_length;
359 struct mmc_data *data = cmd->data;
360
361 unsigned int blocks;
362 unsigned int ier = 0;
363
364 host->cmd = cmd;
365
366 /* Not sure if this is needed */
367#if 0
e0b19b83 368 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 369 pr_debug("Clearing timeout\n");
e0b19b83
AV
370 at91_mci_write(host, AT91_MCI_ARGR, 0);
371 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
372 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 373 /* spin */
e0b19b83 374 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
375 }
376 }
377#endif
378 cmdr = cmd->opcode;
379
380 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
381 cmdr |= AT91_MCI_RSPTYP_NONE;
382 else {
383 /* if a response is expected then allow maximum response latancy */
384 cmdr |= AT91_MCI_MAXLAT;
385 /* set 136 bit response for R2, 48 bit response otherwise */
386 if (mmc_resp_type(cmd) == MMC_RSP_R2)
387 cmdr |= AT91_MCI_RSPTYP_136;
388 else
389 cmdr |= AT91_MCI_RSPTYP_48;
390 }
391
392 if (data) {
a3fd4a1b 393 block_length = data->blksz;
65dbf343
AV
394 blocks = data->blocks;
395
396 /* always set data start - also set direction flag for read */
397 if (data->flags & MMC_DATA_READ)
398 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
399 else if (data->flags & MMC_DATA_WRITE)
400 cmdr |= AT91_MCI_TRCMD_START;
401
402 if (data->flags & MMC_DATA_STREAM)
403 cmdr |= AT91_MCI_TRTYP_STREAM;
404 if (data->flags & MMC_DATA_MULTI)
405 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
406 }
407 else {
408 block_length = 0;
409 blocks = 0;
410 }
411
412 if (cmd->opcode == MMC_STOP_TRANSMISSION)
413 cmdr |= AT91_MCI_TRCMD_STOP;
414
415 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
416 cmdr |= AT91_MCI_OPDCMD;
417
418 /*
419 * Set the arguments and send the command
420 */
b44fb7a0 421 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
e0b19b83 422 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
423
424 if (!data) {
e0b19b83
AV
425 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
426 at91_mci_write(host, AT91_PDC_RPR, 0);
427 at91_mci_write(host, AT91_PDC_RCR, 0);
428 at91_mci_write(host, AT91_PDC_RNPR, 0);
429 at91_mci_write(host, AT91_PDC_RNCR, 0);
430 at91_mci_write(host, AT91_PDC_TPR, 0);
431 at91_mci_write(host, AT91_PDC_TCR, 0);
432 at91_mci_write(host, AT91_PDC_TNPR, 0);
433 at91_mci_write(host, AT91_PDC_TNCR, 0);
434
435 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
436 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
437 return AT91_MCI_CMDRDY;
438 }
439
e0b19b83
AV
440 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
441 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
65dbf343
AV
442
443 /*
444 * Disable the PDC controller
445 */
e0b19b83 446 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
447
448 if (cmdr & AT91_MCI_TRCMD_START) {
449 data->bytes_xfered = 0;
450 host->transfer_index = 0;
451 host->in_use_index = 0;
452 if (cmdr & AT91_MCI_TRDIR) {
453 /*
454 * Handle a read
455 */
456 host->buffer = NULL;
457 host->total_length = 0;
458
459 at91mci_pre_dma_read(host);
460 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
461 }
462 else {
463 /*
464 * Handle a write
465 */
466 host->total_length = block_length * blocks;
467 host->buffer = dma_alloc_coherent(NULL,
468 host->total_length,
469 &host->physical_address, GFP_KERNEL);
470
471 at91mci_sg_to_dma(host, data);
472
b44fb7a0 473 pr_debug("Transmitting %d bytes\n", host->total_length);
65dbf343 474
e0b19b83
AV
475 at91_mci_write(host, AT91_PDC_TPR, host->physical_address);
476 at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4);
65dbf343
AV
477 ier = AT91_MCI_TXBUFE;
478 }
479 }
480
481 /*
482 * Send the command and then enable the PDC - not the other way round as
483 * the data sheet says
484 */
485
e0b19b83
AV
486 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
487 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
488
489 if (cmdr & AT91_MCI_TRCMD_START) {
490 if (cmdr & AT91_MCI_TRDIR)
e0b19b83 491 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN);
65dbf343 492 else
e0b19b83 493 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN);
65dbf343
AV
494 }
495 return ier;
496}
497
498/*
499 * Wait for a command to complete
500 */
501static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
502{
503 unsigned int ier;
504
505 ier = at91_mci_send_command(host, cmd);
506
b44fb7a0 507 pr_debug("setting ier to %08X\n", ier);
65dbf343
AV
508
509 /* Stop on errors or the required value */
e0b19b83 510 at91_mci_write(host, AT91_MCI_IER, 0xffff0000 | ier);
65dbf343
AV
511}
512
513/*
514 * Process the next step in the request
515 */
516static void at91mci_process_next(struct at91mci_host *host)
517{
518 if (!(host->flags & FL_SENT_COMMAND)) {
519 host->flags |= FL_SENT_COMMAND;
520 at91mci_process_command(host, host->request->cmd);
521 }
522 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
523 host->flags |= FL_SENT_STOP;
524 at91mci_process_command(host, host->request->stop);
525 }
526 else
527 mmc_request_done(host->mmc, host->request);
528}
529
530/*
531 * Handle a command that has been completed
532 */
533static void at91mci_completed_command(struct at91mci_host *host)
534{
535 struct mmc_command *cmd = host->cmd;
536 unsigned int status;
537
e0b19b83 538 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 539
e0b19b83
AV
540 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
541 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
542 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
543 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
544
545 if (host->buffer) {
546 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
547 host->buffer = NULL;
548 }
549
e0b19b83 550 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 551
b44fb7a0 552 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
553 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
554
555 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
556 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
557 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
558 if ((status & AT91_MCI_RCRCE) &&
559 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
560 cmd->error = MMC_ERR_NONE;
561 }
562 else {
563 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
564 cmd->error = MMC_ERR_TIMEOUT;
565 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
566 cmd->error = MMC_ERR_BADCRC;
567 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
568 cmd->error = MMC_ERR_FIFO;
569 else
570 cmd->error = MMC_ERR_FAILED;
571
b44fb7a0 572 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
573 cmd->error, cmd->opcode, cmd->retries);
574 }
575 }
576 else
577 cmd->error = MMC_ERR_NONE;
578
579 at91mci_process_next(host);
580}
581
582/*
583 * Handle an MMC request
584 */
585static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
586{
587 struct at91mci_host *host = mmc_priv(mmc);
588 host->request = mrq;
589 host->flags = 0;
590
591 at91mci_process_next(host);
592}
593
594/*
595 * Set the IOS
596 */
597static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
598{
599 int clkdiv;
600 struct at91mci_host *host = mmc_priv(mmc);
601 unsigned long at91_master_clock = clk_get_rate(mci_clk);
602
b44fb7a0 603 host->bus_mode = ios->bus_mode;
65dbf343
AV
604
605 if (ios->clock == 0) {
606 /* Disable the MCI controller */
e0b19b83 607 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
608 clkdiv = 0;
609 }
610 else {
611 /* Enable the MCI controller */
e0b19b83 612 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
613
614 if ((at91_master_clock % (ios->clock * 2)) == 0)
615 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
616 else
617 clkdiv = (at91_master_clock / ios->clock) / 2;
618
b44fb7a0 619 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
620 at91_master_clock / (2 * (clkdiv + 1)));
621 }
622 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 623 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 624 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
625 }
626 else {
b44fb7a0 627 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 628 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
629 }
630
631 /* Set the clock divider */
e0b19b83 632 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
633
634 /* maybe switch power to the card */
b44fb7a0 635 if (host->board->vcc_pin) {
65dbf343
AV
636 switch (ios->power_mode) {
637 case MMC_POWER_OFF:
638 at91_set_gpio_output(host->board->vcc_pin, 0);
639 break;
640 case MMC_POWER_UP:
641 case MMC_POWER_ON:
642 at91_set_gpio_output(host->board->vcc_pin, 1);
643 break;
644 }
645 }
646}
647
648/*
649 * Handle an interrupt
650 */
7d12e780 651static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
652{
653 struct at91mci_host *host = devid;
654 int completed = 0;
655
656 unsigned int int_status;
657
e0b19b83
AV
658 int_status = at91_mci_read(host, AT91_MCI_SR);
659 pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(host, AT91_MCI_IMR),
660 int_status & at91_mci_read(host, AT91_MCI_IMR));
65dbf343 661
e0b19b83 662 if ((int_status & at91_mci_read(host, AT91_MCI_IMR)) & 0xffff0000)
65dbf343
AV
663 completed = 1;
664
e0b19b83 665 int_status &= at91_mci_read(host, AT91_MCI_IMR);
65dbf343
AV
666
667 if (int_status & AT91_MCI_UNRE)
b44fb7a0 668 pr_debug("MMC: Underrun error\n");
65dbf343 669 if (int_status & AT91_MCI_OVRE)
b44fb7a0 670 pr_debug("MMC: Overrun error\n");
65dbf343 671 if (int_status & AT91_MCI_DTOE)
b44fb7a0 672 pr_debug("MMC: Data timeout\n");
65dbf343 673 if (int_status & AT91_MCI_DCRCE)
b44fb7a0 674 pr_debug("MMC: CRC error in data\n");
65dbf343 675 if (int_status & AT91_MCI_RTOE)
b44fb7a0 676 pr_debug("MMC: Response timeout\n");
65dbf343 677 if (int_status & AT91_MCI_RENDE)
b44fb7a0 678 pr_debug("MMC: Response end bit error\n");
65dbf343 679 if (int_status & AT91_MCI_RCRCE)
b44fb7a0 680 pr_debug("MMC: Response CRC error\n");
65dbf343 681 if (int_status & AT91_MCI_RDIRE)
b44fb7a0 682 pr_debug("MMC: Response direction error\n");
65dbf343 683 if (int_status & AT91_MCI_RINDE)
b44fb7a0 684 pr_debug("MMC: Response index error\n");
65dbf343
AV
685
686 /* Only continue processing if no errors */
687 if (!completed) {
688 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 689 pr_debug("TX buffer empty\n");
65dbf343
AV
690 at91_mci_handle_transmitted(host);
691 }
692
693 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 694 pr_debug("RX buffer full\n");
e0b19b83 695 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
696 }
697
698 if (int_status & AT91_MCI_ENDTX) {
b44fb7a0 699 pr_debug("Transmit has ended\n");
65dbf343
AV
700 }
701
702 if (int_status & AT91_MCI_ENDRX) {
b44fb7a0 703 pr_debug("Receive has ended\n");
65dbf343
AV
704 at91mci_post_dma_read(host);
705 }
706
707 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 708 pr_debug("Card is ready\n");
e0b19b83 709 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
710 }
711
712 if (int_status & AT91_MCI_DTIP) {
b44fb7a0 713 pr_debug("Data transfer in progress\n");
65dbf343
AV
714 }
715
716 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 717 pr_debug("Block transfer has ended\n");
65dbf343
AV
718 }
719
720 if (int_status & AT91_MCI_TXRDY) {
b44fb7a0 721 pr_debug("Ready to transmit\n");
65dbf343
AV
722 }
723
724 if (int_status & AT91_MCI_RXRDY) {
b44fb7a0 725 pr_debug("Ready to receive\n");
65dbf343
AV
726 }
727
728 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 729 pr_debug("Command ready\n");
65dbf343
AV
730 completed = 1;
731 }
732 }
e0b19b83 733 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
734
735 if (completed) {
b44fb7a0 736 pr_debug("Completed command\n");
e0b19b83 737 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343
AV
738 at91mci_completed_command(host);
739 }
740
741 return IRQ_HANDLED;
742}
743
7d12e780 744static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
745{
746 struct at91mci_host *host = _host;
747 int present = !at91_get_gpio_value(irq);
748
749 /*
750 * we expect this irq on both insert and remove,
751 * and use a short delay to debounce.
752 */
753 if (present != host->present) {
754 host->present = present;
b44fb7a0 755 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
756 present ? "insert" : "remove");
757 if (!present) {
b44fb7a0 758 pr_debug("****** Resetting SD-card bus width ******\n");
e0b19b83 759 at91_mci_write(host, AT91_MCI_SDCR, 0);
65dbf343
AV
760 }
761 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
762 }
763 return IRQ_HANDLED;
764}
765
766int at91_mci_get_ro(struct mmc_host *mmc)
767{
768 int read_only = 0;
769 struct at91mci_host *host = mmc_priv(mmc);
770
771 if (host->board->wp_pin) {
772 read_only = at91_get_gpio_value(host->board->wp_pin);
773 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
774 (read_only ? "read-only" : "read-write") );
775 }
776 else {
777 printk(KERN_WARNING "%s: host does not support reading read-only "
778 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
779 }
780 return read_only;
781}
782
ab7aefd0 783static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
784 .request = at91_mci_request,
785 .set_ios = at91_mci_set_ios,
786 .get_ro = at91_mci_get_ro,
787};
788
789/*
790 * Probe for the device
791 */
792static int at91_mci_probe(struct platform_device *pdev)
793{
794 struct mmc_host *mmc;
795 struct at91mci_host *host;
17ea0595 796 struct resource *res;
65dbf343
AV
797 int ret;
798
b44fb7a0 799 pr_debug("Probe MCI devices\n");
65dbf343 800
17ea0595
AV
801 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
802 if (!res)
803 return -ENXIO;
804
805 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
806 return -EBUSY;
807
65dbf343
AV
808 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
809 if (!mmc) {
b44fb7a0 810 pr_debug("Failed to allocate mmc host\n");
17ea0595 811 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
812 return -ENOMEM;
813 }
814
815 mmc->ops = &at91_mci_ops;
816 mmc->f_min = 375000;
817 mmc->f_max = 25000000;
818 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
42431acb 819 mmc->caps = MMC_CAP_BYTEBLOCK;
65dbf343
AV
820
821 host = mmc_priv(mmc);
822 host->mmc = mmc;
823 host->buffer = NULL;
824 host->bus_mode = 0;
825 host->board = pdev->dev.platform_data;
826 if (host->board->wire4) {
827#ifdef SUPPORT_4WIRE
828 mmc->caps |= MMC_CAP_4_BIT_DATA;
829#else
830 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
831#endif
832 }
833
834 /*
835 * Get Clock
836 */
837 mci_clk = clk_get(&pdev->dev, "mci_clk");
b44fb7a0 838 if (IS_ERR(mci_clk)) {
65dbf343 839 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 840 mmc_free_host(mmc);
17ea0595 841 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
842 return -ENODEV;
843 }
65dbf343 844
17ea0595
AV
845 /*
846 * Map I/O region
847 */
848 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
849 if (!host->baseaddr) {
850 clk_put(mci_clk);
851 mmc_free_host(mmc);
852 release_mem_region(res->start, res->end - res->start + 1);
853 return -ENOMEM;
854 }
e0b19b83
AV
855
856 /*
857 * Reset hardware
858 */
17ea0595 859 clk_enable(mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
860 at91_mci_disable(host);
861 at91_mci_enable(host);
862
65dbf343
AV
863 /*
864 * Allocate the MCI interrupt
865 */
17ea0595
AV
866 host->irq = platform_get_irq(pdev, 0);
867 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 868 if (ret) {
b44fb7a0
AV
869 printk(KERN_ERR "Failed to request MCI interrupt\n");
870 clk_disable(mci_clk);
871 clk_put(mci_clk);
872 mmc_free_host(mmc);
17ea0595
AV
873 iounmap(host->baseaddr);
874 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
875 return ret;
876 }
877
878 platform_set_drvdata(pdev, mmc);
879
880 /*
881 * Add host to MMC layer
882 */
883 if (host->board->det_pin)
884 host->present = !at91_get_gpio_value(host->board->det_pin);
885 else
886 host->present = -1;
887
888 mmc_add_host(mmc);
889
890 /*
891 * monitor card insertion/removal if we can
892 */
893 if (host->board->det_pin) {
894 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 895 0, DRIVER_NAME, host);
65dbf343 896 if (ret)
b44fb7a0 897 printk(KERN_ERR "couldn't allocate MMC detect irq\n");
65dbf343
AV
898 }
899
b44fb7a0 900 pr_debug(KERN_INFO "Added MCI driver\n");
65dbf343
AV
901
902 return 0;
903}
904
905/*
906 * Remove a device
907 */
908static int at91_mci_remove(struct platform_device *pdev)
909{
910 struct mmc_host *mmc = platform_get_drvdata(pdev);
911 struct at91mci_host *host;
17ea0595 912 struct resource *res;
65dbf343
AV
913
914 if (!mmc)
915 return -1;
916
917 host = mmc_priv(mmc);
918
919 if (host->present != -1) {
920 free_irq(host->board->det_pin, host);
921 cancel_delayed_work(&host->mmc->detect);
922 }
923
e0b19b83 924 at91_mci_disable(host);
17ea0595
AV
925 mmc_remove_host(mmc);
926 free_irq(host->irq, host);
65dbf343
AV
927
928 clk_disable(mci_clk); /* Disable the peripheral clock */
929 clk_put(mci_clk);
930
17ea0595
AV
931 iounmap(host->baseaddr);
932 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
933 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 934
17ea0595
AV
935 mmc_free_host(mmc);
936 platform_set_drvdata(pdev, NULL);
b44fb7a0 937 pr_debug("MCI Removed\n");
65dbf343
AV
938
939 return 0;
940}
941
942#ifdef CONFIG_PM
943static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
944{
945 struct mmc_host *mmc = platform_get_drvdata(pdev);
946 int ret = 0;
947
948 if (mmc)
949 ret = mmc_suspend_host(mmc, state);
950
951 return ret;
952}
953
954static int at91_mci_resume(struct platform_device *pdev)
955{
956 struct mmc_host *mmc = platform_get_drvdata(pdev);
957 int ret = 0;
958
959 if (mmc)
960 ret = mmc_resume_host(mmc);
961
962 return ret;
963}
964#else
965#define at91_mci_suspend NULL
966#define at91_mci_resume NULL
967#endif
968
969static struct platform_driver at91_mci_driver = {
970 .probe = at91_mci_probe,
971 .remove = at91_mci_remove,
972 .suspend = at91_mci_suspend,
973 .resume = at91_mci_resume,
974 .driver = {
975 .name = DRIVER_NAME,
976 .owner = THIS_MODULE,
977 },
978};
979
980static int __init at91_mci_init(void)
981{
982 return platform_driver_register(&at91_mci_driver);
983}
984
985static void __exit at91_mci_exit(void)
986{
987 platform_driver_unregister(&at91_mci_driver);
988}
989
990module_init(at91_mci_init);
991module_exit(at91_mci_exit);
992
993MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
994MODULE_AUTHOR("Nick Randell");
995MODULE_LICENSE("GPL");