]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/mmci.c
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
[net-next-2.6.git] / drivers / mmc / mmci.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/highmem.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/protocol.h>
a62c80e5 21#include <linux/amba/bus.h>
f8ce2547 22#include <linux/clk.h>
1da177e4 23
e9c091b4 24#include <asm/cacheflush.h>
7b09cdac 25#include <asm/div64.h>
1da177e4 26#include <asm/io.h>
1da177e4 27#include <asm/scatterlist.h>
c6b8fdad 28#include <asm/sizes.h>
1da177e4
LT
29#include <asm/mach/mmc.h>
30
31#include "mmci.h"
32
33#define DRIVER_NAME "mmci-pl18x"
34
1da177e4 35#define DBG(host,fmt,args...) \
d366b643 36 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
1da177e4
LT
37
38static unsigned int fmax = 515633;
39
40static void
41mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42{
43 writel(0, host->base + MMCICOMMAND);
44
45 host->mrq = NULL;
46 host->cmd = NULL;
47
48 if (mrq->data)
49 mrq->data->bytes_xfered = host->data_xfered;
50
51 /*
52 * Need to drop the host lock here; mmc_request_done may call
53 * back into the driver...
54 */
55 spin_unlock(&host->lock);
56 mmc_request_done(host->mmc, mrq);
57 spin_lock(&host->lock);
58}
59
60static void mmci_stop_data(struct mmci_host *host)
61{
62 writel(0, host->base + MMCIDATACTRL);
63 writel(0, host->base + MMCIMASK1);
64 host->data = NULL;
65}
66
67static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
68{
69 unsigned int datactrl, timeout, irqmask;
7b09cdac 70 unsigned long long clks;
1da177e4 71 void __iomem *base;
3bc87f24 72 int blksz_bits;
1da177e4
LT
73
74 DBG(host, "blksz %04x blks %04x flags %08x\n",
3bc87f24 75 data->blksz, data->blocks, data->flags);
1da177e4
LT
76
77 host->data = data;
3bc87f24 78 host->size = data->blksz;
1da177e4
LT
79 host->data_xfered = 0;
80
81 mmci_init_sg(host, data);
82
7b09cdac
RK
83 clks = (unsigned long long)data->timeout_ns * host->cclk;
84 do_div(clks, 1000000000UL);
85
86 timeout = data->timeout_clks + (unsigned int)clks;
1da177e4
LT
87
88 base = host->base;
89 writel(timeout, base + MMCIDATATIMER);
90 writel(host->size, base + MMCIDATALENGTH);
91
3bc87f24
RK
92 blksz_bits = ffs(data->blksz) - 1;
93 BUG_ON(1 << blksz_bits != data->blksz);
94
95 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
1da177e4
LT
96 if (data->flags & MMC_DATA_READ) {
97 datactrl |= MCI_DPSM_DIRECTION;
98 irqmask = MCI_RXFIFOHALFFULLMASK;
0425a142
RK
99
100 /*
101 * If we have less than a FIFOSIZE of bytes to transfer,
102 * trigger a PIO interrupt as soon as any data is available.
103 */
104 if (host->size < MCI_FIFOSIZE)
105 irqmask |= MCI_RXDATAAVLBLMASK;
1da177e4
LT
106 } else {
107 /*
108 * We don't actually need to include "FIFO empty" here
109 * since its implicit in "FIFO half empty".
110 */
111 irqmask = MCI_TXFIFOHALFEMPTYMASK;
112 }
113
114 writel(datactrl, base + MMCIDATACTRL);
115 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
116 writel(irqmask, base + MMCIMASK1);
117}
118
119static void
120mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
121{
122 void __iomem *base = host->base;
123
124 DBG(host, "op %02x arg %08x flags %08x\n",
125 cmd->opcode, cmd->arg, cmd->flags);
126
127 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
128 writel(0, base + MMCICOMMAND);
129 udelay(1);
130 }
131
132 c |= cmd->opcode | MCI_CPSM_ENABLE;
e9225176
RK
133 if (cmd->flags & MMC_RSP_PRESENT) {
134 if (cmd->flags & MMC_RSP_136)
135 c |= MCI_CPSM_LONGRSP;
1da177e4 136 c |= MCI_CPSM_RESPONSE;
1da177e4
LT
137 }
138 if (/*interrupt*/0)
139 c |= MCI_CPSM_INTERRUPT;
140
141 host->cmd = cmd;
142
143 writel(cmd->arg, base + MMCIARGUMENT);
144 writel(c, base + MMCICOMMAND);
145}
146
147static void
148mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
149 unsigned int status)
150{
151 if (status & MCI_DATABLOCKEND) {
3bc87f24 152 host->data_xfered += data->blksz;
1da177e4
LT
153 }
154 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
155 if (status & MCI_DATACRCFAIL)
156 data->error = MMC_ERR_BADCRC;
157 else if (status & MCI_DATATIMEOUT)
158 data->error = MMC_ERR_TIMEOUT;
159 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
160 data->error = MMC_ERR_FIFO;
161 status |= MCI_DATAEND;
e9c091b4
RK
162
163 /*
164 * We hit an error condition. Ensure that any data
165 * partially written to a page is properly coherent.
166 */
167 if (host->sg_len && data->flags & MMC_DATA_READ)
168 flush_dcache_page(host->sg_ptr->page);
1da177e4
LT
169 }
170 if (status & MCI_DATAEND) {
171 mmci_stop_data(host);
172
173 if (!data->stop) {
174 mmci_request_end(host, data->mrq);
175 } else {
176 mmci_start_command(host, data->stop, 0);
177 }
178 }
179}
180
181static void
182mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
183 unsigned int status)
184{
185 void __iomem *base = host->base;
186
187 host->cmd = NULL;
188
189 cmd->resp[0] = readl(base + MMCIRESPONSE0);
190 cmd->resp[1] = readl(base + MMCIRESPONSE1);
191 cmd->resp[2] = readl(base + MMCIRESPONSE2);
192 cmd->resp[3] = readl(base + MMCIRESPONSE3);
193
194 if (status & MCI_CMDTIMEOUT) {
195 cmd->error = MMC_ERR_TIMEOUT;
196 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
197 cmd->error = MMC_ERR_BADCRC;
198 }
199
200 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
201 mmci_request_end(host, cmd->mrq);
202 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
203 mmci_start_data(host, cmd->data);
204 }
205}
206
207static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
208{
209 void __iomem *base = host->base;
210 char *ptr = buffer;
211 u32 status;
212
213 do {
214 int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
215
216 if (count > remain)
217 count = remain;
218
219 if (count <= 0)
220 break;
221
222 readsl(base + MMCIFIFO, ptr, count >> 2);
223
224 ptr += count;
225 remain -= count;
226
227 if (remain == 0)
228 break;
229
230 status = readl(base + MMCISTATUS);
231 } while (status & MCI_RXDATAAVLBL);
232
233 return ptr - buffer;
234}
235
236static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
237{
238 void __iomem *base = host->base;
239 char *ptr = buffer;
240
241 do {
242 unsigned int count, maxcnt;
243
244 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
245 count = min(remain, maxcnt);
246
247 writesl(base + MMCIFIFO, ptr, count >> 2);
248
249 ptr += count;
250 remain -= count;
251
252 if (remain == 0)
253 break;
254
255 status = readl(base + MMCISTATUS);
256 } while (status & MCI_TXFIFOHALFEMPTY);
257
258 return ptr - buffer;
259}
260
261/*
262 * PIO data transfer IRQ handler.
263 */
7d12e780 264static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1da177e4
LT
265{
266 struct mmci_host *host = dev_id;
267 void __iomem *base = host->base;
268 u32 status;
269
270 status = readl(base + MMCISTATUS);
271
272 DBG(host, "irq1 %08x\n", status);
273
274 do {
275 unsigned long flags;
276 unsigned int remain, len;
277 char *buffer;
278
279 /*
280 * For write, we only need to test the half-empty flag
281 * here - if the FIFO is completely empty, then by
282 * definition it is more than half empty.
283 *
284 * For read, check for data available.
285 */
286 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
287 break;
288
289 /*
290 * Map the current scatter buffer.
291 */
292 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
293 remain = host->sg_ptr->length - host->sg_off;
294
295 len = 0;
296 if (status & MCI_RXACTIVE)
297 len = mmci_pio_read(host, buffer, remain);
298 if (status & MCI_TXACTIVE)
299 len = mmci_pio_write(host, buffer, remain, status);
300
301 /*
302 * Unmap the buffer.
303 */
f3e2628b 304 mmci_kunmap_atomic(host, buffer, &flags);
1da177e4
LT
305
306 host->sg_off += len;
307 host->size -= len;
308 remain -= len;
309
310 if (remain)
311 break;
312
e9c091b4
RK
313 /*
314 * If we were reading, and we have completed this
315 * page, ensure that the data cache is coherent.
316 */
317 if (status & MCI_RXACTIVE)
318 flush_dcache_page(host->sg_ptr->page);
319
1da177e4
LT
320 if (!mmci_next_sg(host))
321 break;
322
323 status = readl(base + MMCISTATUS);
324 } while (1);
325
326 /*
327 * If we're nearing the end of the read, switch to
328 * "any data available" mode.
329 */
330 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
331 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
332
333 /*
334 * If we run out of data, disable the data IRQs; this
335 * prevents a race where the FIFO becomes empty before
336 * the chip itself has disabled the data path, and
337 * stops us racing with our data end IRQ.
338 */
339 if (host->size == 0) {
340 writel(0, base + MMCIMASK1);
341 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
342 }
343
344 return IRQ_HANDLED;
345}
346
347/*
348 * Handle completion of command and data transfers.
349 */
7d12e780 350static irqreturn_t mmci_irq(int irq, void *dev_id)
1da177e4
LT
351{
352 struct mmci_host *host = dev_id;
353 u32 status;
354 int ret = 0;
355
356 spin_lock(&host->lock);
357
358 do {
359 struct mmc_command *cmd;
360 struct mmc_data *data;
361
362 status = readl(host->base + MMCISTATUS);
363 status &= readl(host->base + MMCIMASK0);
364 writel(status, host->base + MMCICLEAR);
365
366 DBG(host, "irq0 %08x\n", status);
367
368 data = host->data;
369 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
370 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
371 mmci_data_irq(host, data, status);
372
373 cmd = host->cmd;
374 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
375 mmci_cmd_irq(host, cmd, status);
376
377 ret = 1;
378 } while (status);
379
380 spin_unlock(&host->lock);
381
382 return IRQ_RETVAL(ret);
383}
384
385static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
386{
387 struct mmci_host *host = mmc_priv(mmc);
388
389 WARN_ON(host->mrq != NULL);
390
391 spin_lock_irq(&host->lock);
392
393 host->mrq = mrq;
394
395 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
396 mmci_start_data(host, mrq->data);
397
398 mmci_start_command(host, mrq->cmd, 0);
399
400 spin_unlock_irq(&host->lock);
401}
402
403static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
404{
405 struct mmci_host *host = mmc_priv(mmc);
406 u32 clk = 0, pwr = 0;
407
1da177e4
LT
408 if (ios->clock) {
409 if (ios->clock >= host->mclk) {
410 clk = MCI_CLK_BYPASS;
411 host->cclk = host->mclk;
412 } else {
413 clk = host->mclk / (2 * ios->clock) - 1;
414 if (clk > 256)
415 clk = 255;
416 host->cclk = host->mclk / (2 * (clk + 1));
417 }
418 clk |= MCI_CLK_ENABLE;
419 }
420
421 if (host->plat->translate_vdd)
422 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
423
424 switch (ios->power_mode) {
425 case MMC_POWER_OFF:
426 break;
427 case MMC_POWER_UP:
428 pwr |= MCI_PWR_UP;
429 break;
430 case MMC_POWER_ON:
431 pwr |= MCI_PWR_ON;
432 break;
433 }
434
435 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
436 pwr |= MCI_ROD;
437
438 writel(clk, host->base + MMCICLOCK);
439
440 if (host->pwr != pwr) {
441 host->pwr = pwr;
442 writel(pwr, host->base + MMCIPOWER);
443 }
444}
445
446static struct mmc_host_ops mmci_ops = {
447 .request = mmci_request,
448 .set_ios = mmci_set_ios,
449};
450
451static void mmci_check_status(unsigned long data)
452{
453 struct mmci_host *host = (struct mmci_host *)data;
454 unsigned int status;
455
456 status = host->plat->status(mmc_dev(host->mmc));
457 if (status ^ host->oldstat)
8dc00335 458 mmc_detect_change(host->mmc, 0);
1da177e4
LT
459
460 host->oldstat = status;
461 mod_timer(&host->timer, jiffies + HZ);
462}
463
464static int mmci_probe(struct amba_device *dev, void *id)
465{
466 struct mmc_platform_data *plat = dev->dev.platform_data;
467 struct mmci_host *host;
468 struct mmc_host *mmc;
469 int ret;
470
471 /* must have platform data */
472 if (!plat) {
473 ret = -EINVAL;
474 goto out;
475 }
476
477 ret = amba_request_regions(dev, DRIVER_NAME);
478 if (ret)
479 goto out;
480
481 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
482 if (!mmc) {
483 ret = -ENOMEM;
484 goto rel_regions;
485 }
486
487 host = mmc_priv(mmc);
488 host->clk = clk_get(&dev->dev, "MCLK");
489 if (IS_ERR(host->clk)) {
490 ret = PTR_ERR(host->clk);
491 host->clk = NULL;
492 goto host_free;
493 }
494
1da177e4
LT
495 ret = clk_enable(host->clk);
496 if (ret)
a8d3584a 497 goto clk_free;
1da177e4
LT
498
499 host->plat = plat;
500 host->mclk = clk_get_rate(host->clk);
501 host->mmc = mmc;
502 host->base = ioremap(dev->res.start, SZ_4K);
503 if (!host->base) {
504 ret = -ENOMEM;
505 goto clk_disable;
506 }
507
508 mmc->ops = &mmci_ops;
509 mmc->f_min = (host->mclk + 511) / 512;
510 mmc->f_max = min(host->mclk, fmax);
511 mmc->ocr_avail = plat->ocr_mask;
db53f28b 512 mmc->caps = MMC_CAP_MULTIWRITE;
1da177e4
LT
513
514 /*
515 * We can do SGIO
516 */
517 mmc->max_hw_segs = 16;
518 mmc->max_phys_segs = NR_SG;
519
520 /*
521 * Since we only have a 16-bit data length register, we must
522 * ensure that we don't exceed 2^16-1 bytes in a single request.
523 * Choose 64 (512-byte) sectors as the limit.
524 */
525 mmc->max_sectors = 64;
526
527 /*
528 * Set the maximum segment size. Since we aren't doing DMA
529 * (yet) we are only limited by the data length register.
530 */
531 mmc->max_seg_size = mmc->max_sectors << 9;
532
533 spin_lock_init(&host->lock);
534
535 writel(0, host->base + MMCIMASK0);
536 writel(0, host->base + MMCIMASK1);
537 writel(0xfff, host->base + MMCICLEAR);
538
dace1453 539 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1da177e4
LT
540 if (ret)
541 goto unmap;
542
dace1453 543 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
1da177e4
LT
544 if (ret)
545 goto irq0_free;
546
547 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
548
549 amba_set_drvdata(dev, mmc);
550
551 mmc_add_host(mmc);
552
e29419ff 553 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
d366b643 554 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
e29419ff 555 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
1da177e4
LT
556
557 init_timer(&host->timer);
558 host->timer.data = (unsigned long)host;
559 host->timer.function = mmci_check_status;
560 host->timer.expires = jiffies + HZ;
561 add_timer(&host->timer);
562
563 return 0;
564
565 irq0_free:
566 free_irq(dev->irq[0], host);
567 unmap:
568 iounmap(host->base);
569 clk_disable:
570 clk_disable(host->clk);
1da177e4
LT
571 clk_free:
572 clk_put(host->clk);
573 host_free:
574 mmc_free_host(mmc);
575 rel_regions:
576 amba_release_regions(dev);
577 out:
578 return ret;
579}
580
581static int mmci_remove(struct amba_device *dev)
582{
583 struct mmc_host *mmc = amba_get_drvdata(dev);
584
585 amba_set_drvdata(dev, NULL);
586
587 if (mmc) {
588 struct mmci_host *host = mmc_priv(mmc);
589
590 del_timer_sync(&host->timer);
591
592 mmc_remove_host(mmc);
593
594 writel(0, host->base + MMCIMASK0);
595 writel(0, host->base + MMCIMASK1);
596
597 writel(0, host->base + MMCICOMMAND);
598 writel(0, host->base + MMCIDATACTRL);
599
600 free_irq(dev->irq[0], host);
601 free_irq(dev->irq[1], host);
602
603 iounmap(host->base);
604 clk_disable(host->clk);
1da177e4
LT
605 clk_put(host->clk);
606
607 mmc_free_host(mmc);
608
609 amba_release_regions(dev);
610 }
611
612 return 0;
613}
614
615#ifdef CONFIG_PM
e5378ca8 616static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1da177e4
LT
617{
618 struct mmc_host *mmc = amba_get_drvdata(dev);
619 int ret = 0;
620
621 if (mmc) {
622 struct mmci_host *host = mmc_priv(mmc);
623
624 ret = mmc_suspend_host(mmc, state);
625 if (ret == 0)
626 writel(0, host->base + MMCIMASK0);
627 }
628
629 return ret;
630}
631
632static int mmci_resume(struct amba_device *dev)
633{
634 struct mmc_host *mmc = amba_get_drvdata(dev);
635 int ret = 0;
636
637 if (mmc) {
638 struct mmci_host *host = mmc_priv(mmc);
639
640 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
641
642 ret = mmc_resume_host(mmc);
643 }
644
645 return ret;
646}
647#else
648#define mmci_suspend NULL
649#define mmci_resume NULL
650#endif
651
652static struct amba_id mmci_ids[] = {
653 {
654 .id = 0x00041180,
655 .mask = 0x000fffff,
656 },
657 {
658 .id = 0x00041181,
659 .mask = 0x000fffff,
660 },
661 { 0, 0 },
662};
663
664static struct amba_driver mmci_driver = {
665 .drv = {
666 .name = DRIVER_NAME,
667 },
668 .probe = mmci_probe,
669 .remove = mmci_remove,
670 .suspend = mmci_suspend,
671 .resume = mmci_resume,
672 .id_table = mmci_ids,
673};
674
675static int __init mmci_init(void)
676{
677 return amba_driver_register(&mmci_driver);
678}
679
680static void __exit mmci_exit(void)
681{
682 amba_driver_unregister(&mmci_driver);
683}
684
685module_init(mmci_init);
686module_exit(mmci_exit);
687module_param(fmax, uint, 0444);
688
689MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
690MODULE_LICENSE("GPL");