]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/host/at91_mci.c
mmc: at91_mci: use one coherent DMA buffer
[net-next-2.6.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
6e996ee8
DB
73#include <asm/gpio.h>
74
a09e64fb
RK
75#include <mach/board.h>
76#include <mach/cpu.h>
77#include <mach/at91_mci.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
df05a303
AV
81#define FL_SENT_COMMAND (1 << 0)
82#define FL_SENT_STOP (1 << 1)
65dbf343 83
df05a303
AV
84#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 87
e0b19b83
AV
88#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 90
3780d906
WM
91#define MCI_BLKSIZE 512
92#define MCI_MAXBLKSIZE 4095
93#define MCI_BLKATONCE 256
94#define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE)
65dbf343
AV
95
96/*
97 * Low level type for this driver
98 */
99struct at91mci_host
100{
101 struct mmc_host *mmc;
102 struct mmc_command *cmd;
103 struct mmc_request *request;
104
e0b19b83 105 void __iomem *baseaddr;
17ea0595 106 int irq;
e0b19b83 107
65dbf343
AV
108 struct at91_mmc_data *board;
109 int present;
110
3dd3b039
AV
111 struct clk *mci_clk;
112
65dbf343
AV
113 /*
114 * Flag indicating when the command has been sent. This is used to
115 * work out whether or not to send the stop
116 */
117 unsigned int flags;
118 /* flag for current bus settings */
119 u32 bus_mode;
120
121 /* DMA buffer used for transmitting */
122 unsigned int* buffer;
123 dma_addr_t physical_address;
124 unsigned int total_length;
125
126 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
127 int in_use_index;
128
129 /* Latest in the scatterlist that has been enabled for transfer */
130 int transfer_index;
e181dce8
MP
131
132 /* Timer for timeouts */
133 struct timer_list timer;
65dbf343
AV
134};
135
c5a89c6c
MP
136/*
137 * Reset the controller and restore most of the state
138 */
139static void at91_reset_host(struct at91mci_host *host)
140{
141 unsigned long flags;
142 u32 mr;
143 u32 sdcr;
144 u32 dtor;
145 u32 imr;
146
147 local_irq_save(flags);
148 imr = at91_mci_read(host, AT91_MCI_IMR);
149
150 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
151
152 /* save current state */
153 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
154 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
155 dtor = at91_mci_read(host, AT91_MCI_DTOR);
156
157 /* reset the controller */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
159
160 /* restore state */
161 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
162 at91_mci_write(host, AT91_MCI_MR, mr);
163 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
164 at91_mci_write(host, AT91_MCI_DTOR, dtor);
165 at91_mci_write(host, AT91_MCI_IER, imr);
166
167 /* make sure sdio interrupts will fire */
168 at91_mci_read(host, AT91_MCI_SR);
169
170 local_irq_restore(flags);
171}
172
e181dce8
MP
173static void at91_timeout_timer(unsigned long data)
174{
175 struct at91mci_host *host;
176
177 host = (struct at91mci_host *)data;
178
179 if (host->request) {
180 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
181
182 if (host->cmd && host->cmd->data) {
183 host->cmd->data->error = -ETIMEDOUT;
184 } else {
185 if (host->cmd)
186 host->cmd->error = -ETIMEDOUT;
187 else
188 host->request->cmd->error = -ETIMEDOUT;
189 }
190
c5a89c6c 191 at91_reset_host(host);
e181dce8
MP
192 mmc_request_done(host->mmc, host->request);
193 }
194}
195
65dbf343
AV
196/*
197 * Copy from sg to a dma block - used for transfers
198 */
e8d04d3d 199static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
200{
201 unsigned int len, i, size;
202 unsigned *dmabuf = host->buffer;
203
5385edc5 204 size = data->blksz * data->blocks;
65dbf343
AV
205 len = data->sg_len;
206
5385edc5
VS
207 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
208 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
209 if (host->total_length == 12)
210 memset(dmabuf, 0, 12);
211
65dbf343
AV
212 /*
213 * Just loop through all entries. Size might not
214 * be the entire list though so make sure that
215 * we do not transfer too much.
216 */
217 for (i = 0; i < len; i++) {
218 struct scatterlist *sg;
219 int amount;
65dbf343
AV
220 unsigned int *sgbuffer;
221
222 sg = &data->sg[i];
223
45711f1a 224 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
65dbf343
AV
225 amount = min(size, sg->length);
226 size -= amount;
65dbf343 227
99eeb8df
AV
228 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
229 int index;
230
231 for (index = 0; index < (amount / 4); index++)
232 *dmabuf++ = swab32(sgbuffer[index]);
5385edc5 233 } else {
0b3520f2
WM
234 char *tmpv = (char *)dmabuf;
235 memcpy(tmpv, sgbuffer, amount);
236 tmpv += amount;
237 dmabuf = (unsigned *)tmpv;
5385edc5 238 }
65dbf343 239
0b3520f2 240 kunmap_atomic(((void *)sgbuffer) - sg->offset, KM_BIO_SRC_IRQ);
65dbf343
AV
241
242 if (size == 0)
243 break;
244 }
245
246 /*
247 * Check that we didn't get a request to transfer
248 * more data than can fit into the SG list.
249 */
250 BUG_ON(size != 0);
251}
252
253/*
254 * Prepare a dma read
255 */
e8d04d3d 256static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
257{
258 int i;
259 struct scatterlist *sg;
260 struct mmc_command *cmd;
261 struct mmc_data *data;
262
b44fb7a0 263 pr_debug("pre dma read\n");
65dbf343
AV
264
265 cmd = host->cmd;
266 if (!cmd) {
b44fb7a0 267 pr_debug("no command\n");
65dbf343
AV
268 return;
269 }
270
271 data = cmd->data;
272 if (!data) {
b44fb7a0 273 pr_debug("no data\n");
65dbf343
AV
274 return;
275 }
276
277 for (i = 0; i < 2; i++) {
278 /* nothing left to transfer */
279 if (host->transfer_index >= data->sg_len) {
b44fb7a0 280 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
281 break;
282 }
283
284 /* Check to see if this needs filling */
285 if (i == 0) {
93a3ddc2 286 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 287 pr_debug("Transfer active in current\n");
65dbf343
AV
288 continue;
289 }
290 }
291 else {
93a3ddc2 292 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 293 pr_debug("Transfer active in next\n");
65dbf343
AV
294 continue;
295 }
296 }
297
298 /* Setup the next transfer */
b44fb7a0 299 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
300
301 sg = &data->sg[host->transfer_index++];
b44fb7a0 302 pr_debug("sg = %p\n", sg);
65dbf343 303
45711f1a 304 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
65dbf343 305
b44fb7a0 306 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
307
308 if (i == 0) {
93a3ddc2 309 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
80f92546 310 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
65dbf343
AV
311 }
312 else {
93a3ddc2 313 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
80f92546 314 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
65dbf343
AV
315 }
316 }
317
b44fb7a0 318 pr_debug("pre dma read done\n");
65dbf343
AV
319}
320
321/*
322 * Handle after a dma read
323 */
e8d04d3d 324static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
325{
326 struct mmc_command *cmd;
327 struct mmc_data *data;
328
b44fb7a0 329 pr_debug("post dma read\n");
65dbf343
AV
330
331 cmd = host->cmd;
332 if (!cmd) {
b44fb7a0 333 pr_debug("no command\n");
65dbf343
AV
334 return;
335 }
336
337 data = cmd->data;
338 if (!data) {
b44fb7a0 339 pr_debug("no data\n");
65dbf343
AV
340 return;
341 }
342
343 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
344 struct scatterlist *sg;
345
b44fb7a0 346 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
347
348 sg = &data->sg[host->in_use_index++];
349
b44fb7a0 350 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
351
352 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
353
99eeb8df 354 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 355 unsigned int *buffer;
99eeb8df 356 int index;
65dbf343 357
ed99c541 358 /* Swap the contents of the buffer */
45711f1a 359 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
ed99c541
NF
360 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
361
99eeb8df
AV
362 for (index = 0; index < (sg->length / 4); index++)
363 buffer[index] = swab32(buffer[index]);
ed99c541
NF
364
365 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 366 }
99eeb8df 367
45711f1a 368 flush_dcache_page(sg_page(sg));
4ac24a87
NF
369
370 data->bytes_xfered += sg->length;
65dbf343
AV
371 }
372
373 /* Is there another transfer to trigger? */
374 if (host->transfer_index < data->sg_len)
e8d04d3d 375 at91_mci_pre_dma_read(host);
65dbf343 376 else {
ed99c541 377 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 378 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
379 }
380
b44fb7a0 381 pr_debug("post dma read done\n");
65dbf343
AV
382}
383
384/*
385 * Handle transmitted data
386 */
387static void at91_mci_handle_transmitted(struct at91mci_host *host)
388{
389 struct mmc_command *cmd;
390 struct mmc_data *data;
391
b44fb7a0 392 pr_debug("Handling the transmit\n");
65dbf343
AV
393
394 /* Disable the transfer */
93a3ddc2 395 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
396
397 /* Now wait for cmd ready */
e0b19b83 398 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
399
400 cmd = host->cmd;
401 if (!cmd) return;
402
403 data = cmd->data;
404 if (!data) return;
405
be0192aa 406 if (cmd->data->blocks > 1) {
ed99c541
NF
407 pr_debug("multiple write : wait for BLKE...\n");
408 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
409 } else
410 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
4ac24a87
NF
411}
412
413/*
414 * Update bytes tranfered count during a write operation
415 */
416static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
417{
418 struct mmc_data *data;
ed99c541 419
4ac24a87
NF
420 /* always deal with the effective request (and not the current cmd) */
421
422 if (host->request->cmd && host->request->cmd->error != 0)
423 return;
424
425 if (host->request->data) {
426 data = host->request->data;
427 if (data->flags & MMC_DATA_WRITE) {
428 /* card is in IDLE mode now */
429 pr_debug("-> bytes_xfered %d, total_length = %d\n",
430 data->bytes_xfered, host->total_length);
5385edc5 431 data->bytes_xfered = data->blksz * data->blocks;
4ac24a87
NF
432 }
433 }
65dbf343
AV
434}
435
4ac24a87 436
ed99c541
NF
437/*Handle after command sent ready*/
438static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
439{
440 if (!host->cmd)
441 return 1;
442 else if (!host->cmd->data) {
443 if (host->flags & FL_SENT_STOP) {
444 /*After multi block write, we must wait for NOTBUSY*/
445 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
446 } else return 1;
447 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
448 /*After sendding multi-block-write command, start DMA transfer*/
4ac24a87 449 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
ed99c541
NF
450 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
451 }
452
453 /* command not completed, have to wait */
454 return 0;
455}
456
457
65dbf343
AV
458/*
459 * Enable the controller
460 */
e0b19b83 461static void at91_mci_enable(struct at91mci_host *host)
65dbf343 462{
ed99c541
NF
463 unsigned int mr;
464
e0b19b83 465 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 466 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 467 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
468 mr = AT91_MCI_PDCMODE | 0x34a;
469
470 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
471 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
472
473 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
474
475 /* use Slot A or B (only one at same time) */
476 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
477}
478
479/*
480 * Disable the controller
481 */
e0b19b83 482static void at91_mci_disable(struct at91mci_host *host)
65dbf343 483{
e0b19b83 484 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
485}
486
487/*
488 * Send a command
65dbf343 489 */
ed99c541 490static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
491{
492 unsigned int cmdr, mr;
493 unsigned int block_length;
494 struct mmc_data *data = cmd->data;
495
496 unsigned int blocks;
497 unsigned int ier = 0;
498
499 host->cmd = cmd;
500
ed99c541 501 /* Needed for leaving busy state before CMD1 */
e0b19b83 502 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 503 pr_debug("Clearing timeout\n");
e0b19b83
AV
504 at91_mci_write(host, AT91_MCI_ARGR, 0);
505 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
506 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 507 /* spin */
e0b19b83 508 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
509 }
510 }
ed99c541 511
65dbf343
AV
512 cmdr = cmd->opcode;
513
514 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
515 cmdr |= AT91_MCI_RSPTYP_NONE;
516 else {
517 /* if a response is expected then allow maximum response latancy */
518 cmdr |= AT91_MCI_MAXLAT;
519 /* set 136 bit response for R2, 48 bit response otherwise */
520 if (mmc_resp_type(cmd) == MMC_RSP_R2)
521 cmdr |= AT91_MCI_RSPTYP_136;
522 else
523 cmdr |= AT91_MCI_RSPTYP_48;
524 }
525
526 if (data) {
1d4de9ed 527
9da3cbaf
VS
528 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
529 if (data->blksz & 0x3) {
530 pr_debug("Unsupported block size\n");
531 cmd->error = -EINVAL;
532 mmc_request_done(host->mmc, host->request);
533 return;
534 }
535 if (data->flags & MMC_DATA_STREAM) {
536 pr_debug("Stream commands not supported\n");
537 cmd->error = -EINVAL;
538 mmc_request_done(host->mmc, host->request);
539 return;
540 }
1d4de9ed
MP
541 }
542
a3fd4a1b 543 block_length = data->blksz;
65dbf343
AV
544 blocks = data->blocks;
545
546 /* always set data start - also set direction flag for read */
547 if (data->flags & MMC_DATA_READ)
548 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
549 else if (data->flags & MMC_DATA_WRITE)
550 cmdr |= AT91_MCI_TRCMD_START;
551
552 if (data->flags & MMC_DATA_STREAM)
553 cmdr |= AT91_MCI_TRTYP_STREAM;
be0192aa 554 if (data->blocks > 1)
65dbf343
AV
555 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
556 }
557 else {
558 block_length = 0;
559 blocks = 0;
560 }
561
b6cedb38 562 if (host->flags & FL_SENT_STOP)
65dbf343
AV
563 cmdr |= AT91_MCI_TRCMD_STOP;
564
565 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
566 cmdr |= AT91_MCI_OPDCMD;
567
568 /*
569 * Set the arguments and send the command
570 */
f3a8efa9 571 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 572 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
573
574 if (!data) {
93a3ddc2
AV
575 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
576 at91_mci_write(host, ATMEL_PDC_RPR, 0);
577 at91_mci_write(host, ATMEL_PDC_RCR, 0);
578 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
579 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
580 at91_mci_write(host, ATMEL_PDC_TPR, 0);
581 at91_mci_write(host, ATMEL_PDC_TCR, 0);
582 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
583 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
584 ier = AT91_MCI_CMDRDY;
585 } else {
586 /* zero block length and PDC mode */
12bd2575 587 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
80f92546
MP
588 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
589 mr |= (block_length << 16);
590 mr |= AT91_MCI_PDCMODE;
591 at91_mci_write(host, AT91_MCI_MR, mr);
e0b19b83 592
9da3cbaf 593 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
c5a89c6c
MP
594 at91_mci_write(host, AT91_MCI_BLKR,
595 AT91_MCI_BLKR_BCNT(blocks) |
596 AT91_MCI_BLKR_BLKLEN(block_length));
597
ed99c541
NF
598 /*
599 * Disable the PDC controller
600 */
601 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 602
ed99c541
NF
603 if (cmdr & AT91_MCI_TRCMD_START) {
604 data->bytes_xfered = 0;
605 host->transfer_index = 0;
606 host->in_use_index = 0;
607 if (cmdr & AT91_MCI_TRDIR) {
608 /*
609 * Handle a read
610 */
ed99c541
NF
611 host->total_length = 0;
612
613 at91_mci_pre_dma_read(host);
614 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
615 }
616 else {
617 /*
618 * Handle a write
619 */
620 host->total_length = block_length * blocks;
5385edc5
VS
621 /*
622 * AT91SAM926[0/3] Data Write Operation and
623 * number of bytes erratum
624 */
625 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
626 if (host->total_length < 12)
627 host->total_length = 12;
e385ea63 628
ed99c541
NF
629 at91_mci_sg_to_dma(host, data);
630
631 pr_debug("Transmitting %d bytes\n", host->total_length);
632
633 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
80f92546
MP
634 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
635 host->total_length : host->total_length / 4);
636
ed99c541
NF
637 ier = AT91_MCI_CMDRDY;
638 }
65dbf343
AV
639 }
640 }
641
642 /*
643 * Send the command and then enable the PDC - not the other way round as
644 * the data sheet says
645 */
646
e0b19b83
AV
647 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
648 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
649
650 if (cmdr & AT91_MCI_TRCMD_START) {
651 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 652 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 653 }
65dbf343 654
ed99c541 655 /* Enable selected interrupts */
df05a303 656 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
657}
658
659/*
660 * Process the next step in the request
661 */
e8d04d3d 662static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
663{
664 if (!(host->flags & FL_SENT_COMMAND)) {
665 host->flags |= FL_SENT_COMMAND;
ed99c541 666 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
667 }
668 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
669 host->flags |= FL_SENT_STOP;
ed99c541 670 at91_mci_send_command(host, host->request->stop);
e181dce8
MP
671 } else {
672 del_timer(&host->timer);
c5a89c6c
MP
673 /* the at91rm9200 mci controller hangs after some transfers,
674 * and the workaround is to reset it after each transfer.
675 */
676 if (cpu_is_at91rm9200())
677 at91_reset_host(host);
65dbf343 678 mmc_request_done(host->mmc, host->request);
e181dce8 679 }
65dbf343
AV
680}
681
682/*
683 * Handle a command that has been completed
684 */
ba7deeed 685static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
65dbf343
AV
686{
687 struct mmc_command *cmd = host->cmd;
fa1fe010 688 struct mmc_data *data = cmd->data;
65dbf343 689
7a6588ba 690 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
65dbf343 691
e0b19b83
AV
692 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
693 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
694 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
695 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343 696
ba7deeed
NF
697 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
698 status, at91_mci_read(host, AT91_MCI_SR),
699 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
65dbf343 700
9e3866b5 701 if (status & AT91_MCI_ERRORS) {
b6cedb38 702 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 703 cmd->error = 0;
65dbf343
AV
704 }
705 else {
fa1fe010
NF
706 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
707 if (data) {
708 if (status & AT91_MCI_DTOE)
709 data->error = -ETIMEDOUT;
710 else if (status & AT91_MCI_DCRCE)
711 data->error = -EILSEQ;
712 }
713 } else {
714 if (status & AT91_MCI_RTOE)
715 cmd->error = -ETIMEDOUT;
716 else if (status & AT91_MCI_RCRCE)
717 cmd->error = -EILSEQ;
718 else
719 cmd->error = -EIO;
720 }
65dbf343 721
fa1fe010
NF
722 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
723 cmd->error, data ? data->error : 0,
724 cmd->opcode, cmd->retries);
65dbf343
AV
725 }
726 }
727 else
17b0429d 728 cmd->error = 0;
65dbf343 729
e8d04d3d 730 at91_mci_process_next(host);
65dbf343
AV
731}
732
733/*
734 * Handle an MMC request
735 */
736static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
737{
738 struct at91mci_host *host = mmc_priv(mmc);
739 host->request = mrq;
740 host->flags = 0;
741
a04ac5b9
WM
742 /* more than 1s timeout needed with slow SD cards */
743 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
e181dce8 744
e8d04d3d 745 at91_mci_process_next(host);
65dbf343
AV
746}
747
748/*
749 * Set the IOS
750 */
751static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
752{
753 int clkdiv;
754 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 755 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 756
b44fb7a0 757 host->bus_mode = ios->bus_mode;
65dbf343
AV
758
759 if (ios->clock == 0) {
760 /* Disable the MCI controller */
e0b19b83 761 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
762 clkdiv = 0;
763 }
764 else {
765 /* Enable the MCI controller */
e0b19b83 766 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
767
768 if ((at91_master_clock % (ios->clock * 2)) == 0)
769 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
770 else
771 clkdiv = (at91_master_clock / ios->clock) / 2;
772
b44fb7a0 773 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
774 at91_master_clock / (2 * (clkdiv + 1)));
775 }
776 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 777 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 778 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
779 }
780 else {
b44fb7a0 781 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 782 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
783 }
784
785 /* Set the clock divider */
e0b19b83 786 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
787
788 /* maybe switch power to the card */
b44fb7a0 789 if (host->board->vcc_pin) {
65dbf343
AV
790 switch (ios->power_mode) {
791 case MMC_POWER_OFF:
6e996ee8 792 gpio_set_value(host->board->vcc_pin, 0);
65dbf343
AV
793 break;
794 case MMC_POWER_UP:
6e996ee8 795 gpio_set_value(host->board->vcc_pin, 1);
65dbf343 796 break;
e5c0ef90
MP
797 case MMC_POWER_ON:
798 break;
799 default:
800 WARN_ON(1);
65dbf343
AV
801 }
802 }
803}
804
805/*
806 * Handle an interrupt
807 */
7d12e780 808static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
809{
810 struct at91mci_host *host = devid;
811 int completed = 0;
df05a303 812 unsigned int int_status, int_mask;
65dbf343 813
e0b19b83 814 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 815 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 816
f3a8efa9 817 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 818 int_status & int_mask);
37b758e8 819
df05a303
AV
820 int_status = int_status & int_mask;
821
822 if (int_status & AT91_MCI_ERRORS) {
65dbf343 823 completed = 1;
37b758e8 824
df05a303
AV
825 if (int_status & AT91_MCI_UNRE)
826 pr_debug("MMC: Underrun error\n");
827 if (int_status & AT91_MCI_OVRE)
828 pr_debug("MMC: Overrun error\n");
829 if (int_status & AT91_MCI_DTOE)
830 pr_debug("MMC: Data timeout\n");
831 if (int_status & AT91_MCI_DCRCE)
832 pr_debug("MMC: CRC error in data\n");
833 if (int_status & AT91_MCI_RTOE)
834 pr_debug("MMC: Response timeout\n");
835 if (int_status & AT91_MCI_RENDE)
836 pr_debug("MMC: Response end bit error\n");
837 if (int_status & AT91_MCI_RCRCE)
838 pr_debug("MMC: Response CRC error\n");
839 if (int_status & AT91_MCI_RDIRE)
840 pr_debug("MMC: Response direction error\n");
841 if (int_status & AT91_MCI_RINDE)
842 pr_debug("MMC: Response index error\n");
843 } else {
844 /* Only continue processing if no errors */
65dbf343 845
65dbf343 846 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 847 pr_debug("TX buffer empty\n");
65dbf343
AV
848 at91_mci_handle_transmitted(host);
849 }
850
ed99c541
NF
851 if (int_status & AT91_MCI_ENDRX) {
852 pr_debug("ENDRX\n");
853 at91_mci_post_dma_read(host);
854 }
855
65dbf343 856 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 857 pr_debug("RX buffer full\n");
ed99c541
NF
858 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
859 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
860 completed = 1;
65dbf343
AV
861 }
862
df05a303 863 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 864 pr_debug("Transmit has ended\n");
65dbf343 865
65dbf343 866 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 867 pr_debug("Card is ready\n");
4ac24a87 868 at91_mci_update_bytes_xfered(host);
ed99c541 869 completed = 1;
65dbf343
AV
870 }
871
df05a303 872 if (int_status & AT91_MCI_DTIP)
b44fb7a0 873 pr_debug("Data transfer in progress\n");
65dbf343 874
ed99c541 875 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 876 pr_debug("Block transfer has ended\n");
4ac24a87
NF
877 if (host->request->data && host->request->data->blocks > 1) {
878 /* multi block write : complete multi write
879 * command and send stop */
880 completed = 1;
881 } else {
882 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
883 }
ed99c541 884 }
65dbf343 885
7a6588ba
EB
886 if (int_status & AT91_MCI_SDIOIRQA)
887 mmc_signal_sdio_irq(host->mmc);
888
889 if (int_status & AT91_MCI_SDIOIRQB)
890 mmc_signal_sdio_irq(host->mmc);
891
df05a303 892 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 893 pr_debug("Ready to transmit\n");
65dbf343 894
df05a303 895 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 896 pr_debug("Ready to receive\n");
65dbf343
AV
897
898 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 899 pr_debug("Command ready\n");
ed99c541 900 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
901 }
902 }
65dbf343
AV
903
904 if (completed) {
b44fb7a0 905 pr_debug("Completed command\n");
7a6588ba 906 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
ba7deeed 907 at91_mci_completed_command(host, int_status);
df05a303 908 } else
7a6588ba 909 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
65dbf343
AV
910
911 return IRQ_HANDLED;
912}
913
7d12e780 914static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
915{
916 struct at91mci_host *host = _host;
6e996ee8 917 int present = !gpio_get_value(irq_to_gpio(irq));
65dbf343
AV
918
919 /*
920 * we expect this irq on both insert and remove,
921 * and use a short delay to debounce.
922 */
923 if (present != host->present) {
924 host->present = present;
b44fb7a0 925 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
926 present ? "insert" : "remove");
927 if (!present) {
b44fb7a0 928 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 929 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343 930 }
a04ac5b9
WM
931 /* 0.5s needed because of early card detect switch firing */
932 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
65dbf343
AV
933 }
934 return IRQ_HANDLED;
935}
936
a26b498c 937static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343 938{
65dbf343
AV
939 struct at91mci_host *host = mmc_priv(mmc);
940
08f80bb5
AV
941 if (host->board->wp_pin)
942 return !!gpio_get_value(host->board->wp_pin);
943 /*
944 * Board doesn't support read only detection; let the mmc core
945 * decide what to do.
946 */
947 return -ENOSYS;
65dbf343
AV
948}
949
7a6588ba
EB
950static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
951{
952 struct at91mci_host *host = mmc_priv(mmc);
953
954 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
955 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
956 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
957 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
958
959}
960
ab7aefd0 961static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
962 .request = at91_mci_request,
963 .set_ios = at91_mci_set_ios,
964 .get_ro = at91_mci_get_ro,
7a6588ba 965 .enable_sdio_irq = at91_mci_enable_sdio_irq,
65dbf343
AV
966};
967
968/*
969 * Probe for the device
970 */
a26b498c 971static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
972{
973 struct mmc_host *mmc;
974 struct at91mci_host *host;
17ea0595 975 struct resource *res;
65dbf343
AV
976 int ret;
977
17ea0595
AV
978 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
979 if (!res)
980 return -ENXIO;
981
982 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
983 return -EBUSY;
984
65dbf343
AV
985 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
986 if (!mmc) {
6e996ee8
DB
987 ret = -ENOMEM;
988 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
989 goto fail6;
65dbf343
AV
990 }
991
992 mmc->ops = &at91_mci_ops;
993 mmc->f_min = 375000;
994 mmc->f_max = 25000000;
995 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
23af6039 996 mmc->caps = MMC_CAP_SDIO_IRQ;
65dbf343 997
3780d906
WM
998 mmc->max_blk_size = MCI_MAXBLKSIZE;
999 mmc->max_blk_count = MCI_BLKATONCE;
1000 mmc->max_req_size = MCI_BUFSIZE;
fe4a3c7a 1001
65dbf343
AV
1002 host = mmc_priv(mmc);
1003 host->mmc = mmc;
65dbf343
AV
1004 host->bus_mode = 0;
1005 host->board = pdev->dev.platform_data;
1006 if (host->board->wire4) {
ed99c541
NF
1007 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
1008 mmc->caps |= MMC_CAP_4_BIT_DATA;
1009 else
6e996ee8 1010 dev_warn(&pdev->dev, "4 wire bus mode not supported"
ed99c541 1011 " - using 1 wire\n");
65dbf343
AV
1012 }
1013
3780d906
WM
1014 host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE,
1015 &host->physical_address, GFP_KERNEL);
1016 if (!host->buffer) {
1017 ret = -ENOMEM;
1018 dev_err(&pdev->dev, "Can't allocate transmit buffer\n");
1019 goto fail5;
1020 }
1021
6e996ee8
DB
1022 /*
1023 * Reserve GPIOs ... board init code makes sure these pins are set
1024 * up as GPIOs with the right direction (input, except for vcc)
1025 */
1026 if (host->board->det_pin) {
1027 ret = gpio_request(host->board->det_pin, "mmc_detect");
1028 if (ret < 0) {
1029 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
3780d906 1030 goto fail4b;
6e996ee8
DB
1031 }
1032 }
1033 if (host->board->wp_pin) {
1034 ret = gpio_request(host->board->wp_pin, "mmc_wp");
1035 if (ret < 0) {
1036 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
1037 goto fail4;
1038 }
1039 }
1040 if (host->board->vcc_pin) {
1041 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
1042 if (ret < 0) {
1043 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
1044 goto fail3;
1045 }
1046 }
1047
65dbf343
AV
1048 /*
1049 * Get Clock
1050 */
3dd3b039
AV
1051 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
1052 if (IS_ERR(host->mci_clk)) {
6e996ee8
DB
1053 ret = -ENODEV;
1054 dev_dbg(&pdev->dev, "no mci_clk?\n");
1055 goto fail2;
65dbf343 1056 }
65dbf343 1057
17ea0595
AV
1058 /*
1059 * Map I/O region
1060 */
1061 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
1062 if (!host->baseaddr) {
6e996ee8
DB
1063 ret = -ENOMEM;
1064 goto fail1;
17ea0595 1065 }
e0b19b83
AV
1066
1067 /*
1068 * Reset hardware
1069 */
3dd3b039 1070 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
1071 at91_mci_disable(host);
1072 at91_mci_enable(host);
1073
65dbf343
AV
1074 /*
1075 * Allocate the MCI interrupt
1076 */
17ea0595 1077 host->irq = platform_get_irq(pdev, 0);
6e996ee8
DB
1078 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
1079 mmc_hostname(mmc), host);
65dbf343 1080 if (ret) {
6e996ee8
DB
1081 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
1082 goto fail0;
65dbf343
AV
1083 }
1084
99ba0405
NF
1085 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1086
65dbf343
AV
1087 platform_set_drvdata(pdev, mmc);
1088
1089 /*
1090 * Add host to MMC layer
1091 */
63b66438 1092 if (host->board->det_pin) {
6e996ee8 1093 host->present = !gpio_get_value(host->board->det_pin);
63b66438 1094 }
65dbf343
AV
1095 else
1096 host->present = -1;
1097
1098 mmc_add_host(mmc);
1099
1100 /*
1101 * monitor card insertion/removal if we can
1102 */
1103 if (host->board->det_pin) {
6e996ee8
DB
1104 ret = request_irq(gpio_to_irq(host->board->det_pin),
1105 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
65dbf343 1106 if (ret)
6e996ee8
DB
1107 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
1108 else
1109 device_init_wakeup(&pdev->dev, 1);
65dbf343
AV
1110 }
1111
f3a8efa9 1112 pr_debug("Added MCI driver\n");
65dbf343
AV
1113
1114 return 0;
6e996ee8
DB
1115
1116fail0:
1117 clk_disable(host->mci_clk);
1118 iounmap(host->baseaddr);
1119fail1:
1120 clk_put(host->mci_clk);
1121fail2:
1122 if (host->board->vcc_pin)
1123 gpio_free(host->board->vcc_pin);
1124fail3:
1125 if (host->board->wp_pin)
1126 gpio_free(host->board->wp_pin);
1127fail4:
1128 if (host->board->det_pin)
1129 gpio_free(host->board->det_pin);
3780d906
WM
1130fail4b:
1131 if (host->buffer)
1132 dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
1133 host->buffer, host->physical_address);
6e996ee8
DB
1134fail5:
1135 mmc_free_host(mmc);
1136fail6:
1137 release_mem_region(res->start, res->end - res->start + 1);
1138 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1139 return ret;
65dbf343
AV
1140}
1141
1142/*
1143 * Remove a device
1144 */
a26b498c 1145static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
1146{
1147 struct mmc_host *mmc = platform_get_drvdata(pdev);
1148 struct at91mci_host *host;
17ea0595 1149 struct resource *res;
65dbf343
AV
1150
1151 if (!mmc)
1152 return -1;
1153
1154 host = mmc_priv(mmc);
1155
3780d906
WM
1156 if (host->buffer)
1157 dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
1158 host->buffer, host->physical_address);
1159
e0cda54e 1160 if (host->board->det_pin) {
6e996ee8
DB
1161 if (device_can_wakeup(&pdev->dev))
1162 free_irq(gpio_to_irq(host->board->det_pin), host);
63b66438 1163 device_init_wakeup(&pdev->dev, 0);
6e996ee8 1164 gpio_free(host->board->det_pin);
65dbf343
AV
1165 }
1166
e0b19b83 1167 at91_mci_disable(host);
e181dce8 1168 del_timer_sync(&host->timer);
17ea0595
AV
1169 mmc_remove_host(mmc);
1170 free_irq(host->irq, host);
65dbf343 1171
3dd3b039
AV
1172 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1173 clk_put(host->mci_clk);
65dbf343 1174
6e996ee8
DB
1175 if (host->board->vcc_pin)
1176 gpio_free(host->board->vcc_pin);
1177 if (host->board->wp_pin)
1178 gpio_free(host->board->wp_pin);
1179
17ea0595
AV
1180 iounmap(host->baseaddr);
1181 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1182 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 1183
17ea0595
AV
1184 mmc_free_host(mmc);
1185 platform_set_drvdata(pdev, NULL);
b44fb7a0 1186 pr_debug("MCI Removed\n");
65dbf343
AV
1187
1188 return 0;
1189}
1190
1191#ifdef CONFIG_PM
1192static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1193{
1194 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1195 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1196 int ret = 0;
1197
e0cda54e 1198 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1199 enable_irq_wake(host->board->det_pin);
1200
65dbf343
AV
1201 if (mmc)
1202 ret = mmc_suspend_host(mmc, state);
1203
1204 return ret;
1205}
1206
1207static int at91_mci_resume(struct platform_device *pdev)
1208{
1209 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1210 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1211 int ret = 0;
1212
e0cda54e 1213 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1214 disable_irq_wake(host->board->det_pin);
1215
65dbf343
AV
1216 if (mmc)
1217 ret = mmc_resume_host(mmc);
1218
1219 return ret;
1220}
1221#else
1222#define at91_mci_suspend NULL
1223#define at91_mci_resume NULL
1224#endif
1225
1226static struct platform_driver at91_mci_driver = {
a26b498c 1227 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1228 .suspend = at91_mci_suspend,
1229 .resume = at91_mci_resume,
1230 .driver = {
1231 .name = DRIVER_NAME,
1232 .owner = THIS_MODULE,
1233 },
1234};
1235
1236static int __init at91_mci_init(void)
1237{
a26b498c 1238 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1239}
1240
1241static void __exit at91_mci_exit(void)
1242{
1243 platform_driver_unregister(&at91_mci_driver);
1244}
1245
1246module_init(at91_mci_init);
1247module_exit(at91_mci_exit);
1248
1249MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1250MODULE_AUTHOR("Nick Randell");
1251MODULE_LICENSE("GPL");
bc65c724 1252MODULE_ALIAS("platform:at91_mci");