]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/host/sdhci.c
sdhci: Split card-detection IRQs management from sdhci_init()
[net-next-2.6.git] / drivers / mmc / host / sdhci.c
CommitLineData
d129bceb 1/*
70f10482 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
d129bceb 3 *
b69c9058 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
d129bceb
PO
5 *
6 * This program is free software; you can redistribute it and/or modify
643f720c
PO
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
84c46a53
PO
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
d129bceb
PO
14 */
15
d129bceb
PO
16#include <linux/delay.h>
17#include <linux/highmem.h>
b8c86fc5 18#include <linux/io.h>
d129bceb 19#include <linux/dma-mapping.h>
11763609 20#include <linux/scatterlist.h>
d129bceb 21
2f730fec
PO
22#include <linux/leds.h>
23
d129bceb 24#include <linux/mmc/host.h>
d129bceb 25
d129bceb
PO
26#include "sdhci.h"
27
28#define DRIVER_NAME "sdhci"
d129bceb 29
d129bceb 30#define DBG(f, x...) \
c6563178 31 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
d129bceb 32
f9134319
PO
33#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
34 defined(CONFIG_MMC_SDHCI_MODULE))
35#define SDHCI_USE_LEDS_CLASS
36#endif
37
df673b22 38static unsigned int debug_quirks = 0;
67435274 39
d129bceb
PO
40static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
41static void sdhci_finish_data(struct sdhci_host *);
42
43static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
44static void sdhci_finish_command(struct sdhci_host *);
45
46static void sdhci_dumpregs(struct sdhci_host *host)
47{
48 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
49
50 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
4e4141a5
AV
51 sdhci_readl(host, SDHCI_DMA_ADDRESS),
52 sdhci_readw(host, SDHCI_HOST_VERSION));
d129bceb 53 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
4e4141a5
AV
54 sdhci_readw(host, SDHCI_BLOCK_SIZE),
55 sdhci_readw(host, SDHCI_BLOCK_COUNT));
d129bceb 56 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
4e4141a5
AV
57 sdhci_readl(host, SDHCI_ARGUMENT),
58 sdhci_readw(host, SDHCI_TRANSFER_MODE));
d129bceb 59 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
4e4141a5
AV
60 sdhci_readl(host, SDHCI_PRESENT_STATE),
61 sdhci_readb(host, SDHCI_HOST_CONTROL));
d129bceb 62 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
4e4141a5
AV
63 sdhci_readb(host, SDHCI_POWER_CONTROL),
64 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
d129bceb 65 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
4e4141a5
AV
66 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
67 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
d129bceb 68 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
4e4141a5
AV
69 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
70 sdhci_readl(host, SDHCI_INT_STATUS));
d129bceb 71 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
4e4141a5
AV
72 sdhci_readl(host, SDHCI_INT_ENABLE),
73 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
d129bceb 74 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
4e4141a5
AV
75 sdhci_readw(host, SDHCI_ACMD12_ERR),
76 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
d129bceb 77 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
4e4141a5
AV
78 sdhci_readl(host, SDHCI_CAPABILITIES),
79 sdhci_readl(host, SDHCI_MAX_CURRENT));
d129bceb
PO
80
81 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
82}
83
84/*****************************************************************************\
85 * *
86 * Low level functions *
87 * *
88\*****************************************************************************/
89
7260cf5e
AV
90static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
91{
92 u32 ier;
93
94 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
95 ier &= ~clear;
96 ier |= set;
97 sdhci_writel(host, ier, SDHCI_INT_ENABLE);
98 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
99}
100
101static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
102{
103 sdhci_clear_set_irqs(host, 0, irqs);
104}
105
106static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
107{
108 sdhci_clear_set_irqs(host, irqs, 0);
109}
110
111static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
112{
113 u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
114
115 if (enable)
116 sdhci_unmask_irqs(host, irqs);
117 else
118 sdhci_mask_irqs(host, irqs);
119}
120
121static void sdhci_enable_card_detection(struct sdhci_host *host)
122{
123 sdhci_set_card_detection(host, true);
124}
125
126static void sdhci_disable_card_detection(struct sdhci_host *host)
127{
128 sdhci_set_card_detection(host, false);
129}
130
d129bceb
PO
131static void sdhci_reset(struct sdhci_host *host, u8 mask)
132{
e16514d8
PO
133 unsigned long timeout;
134
b8c86fc5 135 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
4e4141a5 136 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
8a4da143
PO
137 SDHCI_CARD_PRESENT))
138 return;
139 }
140
4e4141a5 141 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
d129bceb 142
e16514d8 143 if (mask & SDHCI_RESET_ALL)
d129bceb
PO
144 host->clock = 0;
145
e16514d8
PO
146 /* Wait max 100 ms */
147 timeout = 100;
148
149 /* hw clears the bit when it's done */
4e4141a5 150 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
e16514d8 151 if (timeout == 0) {
acf1da45 152 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
e16514d8
PO
153 mmc_hostname(host->mmc), (int)mask);
154 sdhci_dumpregs(host);
155 return;
156 }
157 timeout--;
158 mdelay(1);
d129bceb
PO
159 }
160}
161
162static void sdhci_init(struct sdhci_host *host)
163{
d129bceb
PO
164 sdhci_reset(host, SDHCI_RESET_ALL);
165
7260cf5e
AV
166 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
167 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
3192a28f
PO
168 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
169 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
a406f5a3 170 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
2134a922 171 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
7260cf5e
AV
172 SDHCI_INT_ADMA_ERROR);
173}
d129bceb 174
7260cf5e
AV
175static void sdhci_reinit(struct sdhci_host *host)
176{
177 sdhci_init(host);
178 sdhci_enable_card_detection(host);
d129bceb
PO
179}
180
181static void sdhci_activate_led(struct sdhci_host *host)
182{
183 u8 ctrl;
184
4e4141a5 185 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 186 ctrl |= SDHCI_CTRL_LED;
4e4141a5 187 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
188}
189
190static void sdhci_deactivate_led(struct sdhci_host *host)
191{
192 u8 ctrl;
193
4e4141a5 194 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 195 ctrl &= ~SDHCI_CTRL_LED;
4e4141a5 196 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
197}
198
f9134319 199#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
200static void sdhci_led_control(struct led_classdev *led,
201 enum led_brightness brightness)
202{
203 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
204 unsigned long flags;
205
206 spin_lock_irqsave(&host->lock, flags);
207
208 if (brightness == LED_OFF)
209 sdhci_deactivate_led(host);
210 else
211 sdhci_activate_led(host);
212
213 spin_unlock_irqrestore(&host->lock, flags);
214}
215#endif
216
d129bceb
PO
217/*****************************************************************************\
218 * *
219 * Core functions *
220 * *
221\*****************************************************************************/
222
a406f5a3 223static void sdhci_read_block_pio(struct sdhci_host *host)
d129bceb 224{
7659150c
PO
225 unsigned long flags;
226 size_t blksize, len, chunk;
7244b85b 227 u32 uninitialized_var(scratch);
7659150c 228 u8 *buf;
d129bceb 229
a406f5a3 230 DBG("PIO reading\n");
d129bceb 231
a406f5a3 232 blksize = host->data->blksz;
7659150c 233 chunk = 0;
d129bceb 234
7659150c 235 local_irq_save(flags);
d129bceb 236
a406f5a3 237 while (blksize) {
7659150c
PO
238 if (!sg_miter_next(&host->sg_miter))
239 BUG();
d129bceb 240
7659150c 241 len = min(host->sg_miter.length, blksize);
d129bceb 242
7659150c
PO
243 blksize -= len;
244 host->sg_miter.consumed = len;
14d836e7 245
7659150c 246 buf = host->sg_miter.addr;
d129bceb 247
7659150c
PO
248 while (len) {
249 if (chunk == 0) {
4e4141a5 250 scratch = sdhci_readl(host, SDHCI_BUFFER);
7659150c 251 chunk = 4;
a406f5a3 252 }
7659150c
PO
253
254 *buf = scratch & 0xFF;
255
256 buf++;
257 scratch >>= 8;
258 chunk--;
259 len--;
d129bceb 260 }
a406f5a3 261 }
7659150c
PO
262
263 sg_miter_stop(&host->sg_miter);
264
265 local_irq_restore(flags);
a406f5a3 266}
d129bceb 267
a406f5a3
PO
268static void sdhci_write_block_pio(struct sdhci_host *host)
269{
7659150c
PO
270 unsigned long flags;
271 size_t blksize, len, chunk;
272 u32 scratch;
273 u8 *buf;
d129bceb 274
a406f5a3
PO
275 DBG("PIO writing\n");
276
277 blksize = host->data->blksz;
7659150c
PO
278 chunk = 0;
279 scratch = 0;
d129bceb 280
7659150c 281 local_irq_save(flags);
d129bceb 282
a406f5a3 283 while (blksize) {
7659150c
PO
284 if (!sg_miter_next(&host->sg_miter))
285 BUG();
a406f5a3 286
7659150c
PO
287 len = min(host->sg_miter.length, blksize);
288
289 blksize -= len;
290 host->sg_miter.consumed = len;
291
292 buf = host->sg_miter.addr;
d129bceb 293
7659150c
PO
294 while (len) {
295 scratch |= (u32)*buf << (chunk * 8);
296
297 buf++;
298 chunk++;
299 len--;
300
301 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
4e4141a5 302 sdhci_writel(host, scratch, SDHCI_BUFFER);
7659150c
PO
303 chunk = 0;
304 scratch = 0;
d129bceb 305 }
d129bceb
PO
306 }
307 }
7659150c
PO
308
309 sg_miter_stop(&host->sg_miter);
310
311 local_irq_restore(flags);
a406f5a3
PO
312}
313
314static void sdhci_transfer_pio(struct sdhci_host *host)
315{
316 u32 mask;
317
318 BUG_ON(!host->data);
319
7659150c 320 if (host->blocks == 0)
a406f5a3
PO
321 return;
322
323 if (host->data->flags & MMC_DATA_READ)
324 mask = SDHCI_DATA_AVAILABLE;
325 else
326 mask = SDHCI_SPACE_AVAILABLE;
327
4a3cba32
PO
328 /*
329 * Some controllers (JMicron JMB38x) mess up the buffer bits
330 * for transfers < 4 bytes. As long as it is just one block,
331 * we can ignore the bits.
332 */
333 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
334 (host->data->blocks == 1))
335 mask = ~0;
336
4e4141a5 337 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
a406f5a3
PO
338 if (host->data->flags & MMC_DATA_READ)
339 sdhci_read_block_pio(host);
340 else
341 sdhci_write_block_pio(host);
d129bceb 342
7659150c
PO
343 host->blocks--;
344 if (host->blocks == 0)
a406f5a3 345 break;
a406f5a3 346 }
d129bceb 347
a406f5a3 348 DBG("PIO transfer complete.\n");
d129bceb
PO
349}
350
2134a922
PO
351static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
352{
353 local_irq_save(*flags);
354 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
355}
356
357static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
358{
359 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
360 local_irq_restore(*flags);
361}
362
8f1934ce 363static int sdhci_adma_table_pre(struct sdhci_host *host,
2134a922
PO
364 struct mmc_data *data)
365{
366 int direction;
367
368 u8 *desc;
369 u8 *align;
370 dma_addr_t addr;
371 dma_addr_t align_addr;
372 int len, offset;
373
374 struct scatterlist *sg;
375 int i;
376 char *buffer;
377 unsigned long flags;
378
379 /*
380 * The spec does not specify endianness of descriptor table.
381 * We currently guess that it is LE.
382 */
383
384 if (data->flags & MMC_DATA_READ)
385 direction = DMA_FROM_DEVICE;
386 else
387 direction = DMA_TO_DEVICE;
388
389 /*
390 * The ADMA descriptor table is mapped further down as we
391 * need to fill it with data first.
392 */
393
394 host->align_addr = dma_map_single(mmc_dev(host->mmc),
395 host->align_buffer, 128 * 4, direction);
8d8bb39b 396 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
8f1934ce 397 goto fail;
2134a922
PO
398 BUG_ON(host->align_addr & 0x3);
399
400 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
401 data->sg, data->sg_len, direction);
8f1934ce
PO
402 if (host->sg_count == 0)
403 goto unmap_align;
2134a922
PO
404
405 desc = host->adma_desc;
406 align = host->align_buffer;
407
408 align_addr = host->align_addr;
409
410 for_each_sg(data->sg, sg, host->sg_count, i) {
411 addr = sg_dma_address(sg);
412 len = sg_dma_len(sg);
413
414 /*
415 * The SDHCI specification states that ADMA
416 * addresses must be 32-bit aligned. If they
417 * aren't, then we use a bounce buffer for
418 * the (up to three) bytes that screw up the
419 * alignment.
420 */
421 offset = (4 - (addr & 0x3)) & 0x3;
422 if (offset) {
423 if (data->flags & MMC_DATA_WRITE) {
424 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 425 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
426 memcpy(align, buffer, offset);
427 sdhci_kunmap_atomic(buffer, &flags);
428 }
429
430 desc[7] = (align_addr >> 24) & 0xff;
431 desc[6] = (align_addr >> 16) & 0xff;
432 desc[5] = (align_addr >> 8) & 0xff;
433 desc[4] = (align_addr >> 0) & 0xff;
434
435 BUG_ON(offset > 65536);
436
437 desc[3] = (offset >> 8) & 0xff;
438 desc[2] = (offset >> 0) & 0xff;
439
440 desc[1] = 0x00;
441 desc[0] = 0x21; /* tran, valid */
442
443 align += 4;
444 align_addr += 4;
445
446 desc += 8;
447
448 addr += offset;
449 len -= offset;
450 }
451
452 desc[7] = (addr >> 24) & 0xff;
453 desc[6] = (addr >> 16) & 0xff;
454 desc[5] = (addr >> 8) & 0xff;
455 desc[4] = (addr >> 0) & 0xff;
456
457 BUG_ON(len > 65536);
458
459 desc[3] = (len >> 8) & 0xff;
460 desc[2] = (len >> 0) & 0xff;
461
462 desc[1] = 0x00;
463 desc[0] = 0x21; /* tran, valid */
464
465 desc += 8;
466
467 /*
468 * If this triggers then we have a calculation bug
469 * somewhere. :/
470 */
471 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
472 }
473
474 /*
475 * Add a terminating entry.
476 */
477 desc[7] = 0;
478 desc[6] = 0;
479 desc[5] = 0;
480 desc[4] = 0;
481
482 desc[3] = 0;
483 desc[2] = 0;
484
485 desc[1] = 0x00;
486 desc[0] = 0x03; /* nop, end, valid */
487
488 /*
489 * Resync align buffer as we might have changed it.
490 */
491 if (data->flags & MMC_DATA_WRITE) {
492 dma_sync_single_for_device(mmc_dev(host->mmc),
493 host->align_addr, 128 * 4, direction);
494 }
495
496 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
497 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
980167b7 498 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
8f1934ce 499 goto unmap_entries;
2134a922 500 BUG_ON(host->adma_addr & 0x3);
8f1934ce
PO
501
502 return 0;
503
504unmap_entries:
505 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
506 data->sg_len, direction);
507unmap_align:
508 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
509 128 * 4, direction);
510fail:
511 return -EINVAL;
2134a922
PO
512}
513
514static void sdhci_adma_table_post(struct sdhci_host *host,
515 struct mmc_data *data)
516{
517 int direction;
518
519 struct scatterlist *sg;
520 int i, size;
521 u8 *align;
522 char *buffer;
523 unsigned long flags;
524
525 if (data->flags & MMC_DATA_READ)
526 direction = DMA_FROM_DEVICE;
527 else
528 direction = DMA_TO_DEVICE;
529
530 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
531 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
532
533 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
534 128 * 4, direction);
535
536 if (data->flags & MMC_DATA_READ) {
537 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
538 data->sg_len, direction);
539
540 align = host->align_buffer;
541
542 for_each_sg(data->sg, sg, host->sg_count, i) {
543 if (sg_dma_address(sg) & 0x3) {
544 size = 4 - (sg_dma_address(sg) & 0x3);
545
546 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 547 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
548 memcpy(buffer, align, size);
549 sdhci_kunmap_atomic(buffer, &flags);
550
551 align += 4;
552 }
553 }
554 }
555
556 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
557 data->sg_len, direction);
558}
559
ee53ab5d 560static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
d129bceb 561{
1c8cde92
PO
562 u8 count;
563 unsigned target_timeout, current_timeout;
d129bceb 564
ee53ab5d
PO
565 /*
566 * If the host controller provides us with an incorrect timeout
567 * value, just skip the check and use 0xE. The hardware may take
568 * longer to time out, but that's much better than having a too-short
569 * timeout value.
570 */
571 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
572 return 0xE;
e538fbe8 573
1c8cde92
PO
574 /* timeout in us */
575 target_timeout = data->timeout_ns / 1000 +
576 data->timeout_clks / host->clock;
d129bceb 577
1c8cde92
PO
578 /*
579 * Figure out needed cycles.
580 * We do this in steps in order to fit inside a 32 bit int.
581 * The first step is the minimum timeout, which will have a
582 * minimum resolution of 6 bits:
583 * (1) 2^13*1000 > 2^22,
584 * (2) host->timeout_clk < 2^16
585 * =>
586 * (1) / (2) > 2^6
587 */
588 count = 0;
589 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
590 while (current_timeout < target_timeout) {
591 count++;
592 current_timeout <<= 1;
593 if (count >= 0xF)
594 break;
595 }
596
597 if (count >= 0xF) {
598 printk(KERN_WARNING "%s: Too large timeout requested!\n",
599 mmc_hostname(host->mmc));
600 count = 0xE;
601 }
602
ee53ab5d
PO
603 return count;
604}
605
606static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
607{
608 u8 count;
2134a922 609 u8 ctrl;
8f1934ce 610 int ret;
ee53ab5d
PO
611
612 WARN_ON(host->data);
613
614 if (data == NULL)
615 return;
616
617 /* Sanity checks */
618 BUG_ON(data->blksz * data->blocks > 524288);
619 BUG_ON(data->blksz > host->mmc->max_blk_size);
620 BUG_ON(data->blocks > 65535);
621
622 host->data = data;
623 host->data_early = 0;
624
625 count = sdhci_calc_timeout(host, data);
4e4141a5 626 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
d129bceb 627
c9fddbc4
PO
628 if (host->flags & SDHCI_USE_DMA)
629 host->flags |= SDHCI_REQ_USE_DMA;
630
2134a922
PO
631 /*
632 * FIXME: This doesn't account for merging when mapping the
633 * scatterlist.
634 */
635 if (host->flags & SDHCI_REQ_USE_DMA) {
636 int broken, i;
637 struct scatterlist *sg;
638
639 broken = 0;
640 if (host->flags & SDHCI_USE_ADMA) {
641 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
642 broken = 1;
643 } else {
644 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
645 broken = 1;
646 }
647
648 if (unlikely(broken)) {
649 for_each_sg(data->sg, sg, data->sg_len, i) {
650 if (sg->length & 0x3) {
651 DBG("Reverting to PIO because of "
652 "transfer size (%d)\n",
653 sg->length);
654 host->flags &= ~SDHCI_REQ_USE_DMA;
655 break;
656 }
657 }
658 }
c9fddbc4
PO
659 }
660
661 /*
662 * The assumption here being that alignment is the same after
663 * translation to device address space.
664 */
2134a922
PO
665 if (host->flags & SDHCI_REQ_USE_DMA) {
666 int broken, i;
667 struct scatterlist *sg;
668
669 broken = 0;
670 if (host->flags & SDHCI_USE_ADMA) {
671 /*
672 * As we use 3 byte chunks to work around
673 * alignment problems, we need to check this
674 * quirk.
675 */
676 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
677 broken = 1;
678 } else {
679 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
680 broken = 1;
681 }
682
683 if (unlikely(broken)) {
684 for_each_sg(data->sg, sg, data->sg_len, i) {
685 if (sg->offset & 0x3) {
686 DBG("Reverting to PIO because of "
687 "bad alignment\n");
688 host->flags &= ~SDHCI_REQ_USE_DMA;
689 break;
690 }
691 }
692 }
693 }
694
8f1934ce
PO
695 if (host->flags & SDHCI_REQ_USE_DMA) {
696 if (host->flags & SDHCI_USE_ADMA) {
697 ret = sdhci_adma_table_pre(host, data);
698 if (ret) {
699 /*
700 * This only happens when someone fed
701 * us an invalid request.
702 */
703 WARN_ON(1);
ebd6d357 704 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 705 } else {
4e4141a5
AV
706 sdhci_writel(host, host->adma_addr,
707 SDHCI_ADMA_ADDRESS);
8f1934ce
PO
708 }
709 } else {
c8b3e02e 710 int sg_cnt;
8f1934ce 711
c8b3e02e 712 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
8f1934ce
PO
713 data->sg, data->sg_len,
714 (data->flags & MMC_DATA_READ) ?
715 DMA_FROM_DEVICE :
716 DMA_TO_DEVICE);
c8b3e02e 717 if (sg_cnt == 0) {
8f1934ce
PO
718 /*
719 * This only happens when someone fed
720 * us an invalid request.
721 */
722 WARN_ON(1);
ebd6d357 723 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 724 } else {
719a61b4 725 WARN_ON(sg_cnt != 1);
4e4141a5
AV
726 sdhci_writel(host, sg_dma_address(data->sg),
727 SDHCI_DMA_ADDRESS);
8f1934ce
PO
728 }
729 }
730 }
731
2134a922
PO
732 /*
733 * Always adjust the DMA selection as some controllers
734 * (e.g. JMicron) can't do PIO properly when the selection
735 * is ADMA.
736 */
737 if (host->version >= SDHCI_SPEC_200) {
4e4141a5 738 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2134a922
PO
739 ctrl &= ~SDHCI_CTRL_DMA_MASK;
740 if ((host->flags & SDHCI_REQ_USE_DMA) &&
741 (host->flags & SDHCI_USE_ADMA))
742 ctrl |= SDHCI_CTRL_ADMA32;
743 else
744 ctrl |= SDHCI_CTRL_SDMA;
4e4141a5 745 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
c9fddbc4
PO
746 }
747
8f1934ce 748 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
7659150c
PO
749 sg_miter_start(&host->sg_miter,
750 data->sg, data->sg_len, SG_MITER_ATOMIC);
751 host->blocks = data->blocks;
d129bceb 752 }
c7fa9963 753
bab76961 754 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
4e4141a5
AV
755 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
756 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
c7fa9963
PO
757}
758
759static void sdhci_set_transfer_mode(struct sdhci_host *host,
760 struct mmc_data *data)
761{
762 u16 mode;
763
c7fa9963
PO
764 if (data == NULL)
765 return;
766
e538fbe8
PO
767 WARN_ON(!host->data);
768
c7fa9963
PO
769 mode = SDHCI_TRNS_BLK_CNT_EN;
770 if (data->blocks > 1)
771 mode |= SDHCI_TRNS_MULTI;
772 if (data->flags & MMC_DATA_READ)
773 mode |= SDHCI_TRNS_READ;
c9fddbc4 774 if (host->flags & SDHCI_REQ_USE_DMA)
c7fa9963
PO
775 mode |= SDHCI_TRNS_DMA;
776
4e4141a5 777 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
d129bceb
PO
778}
779
780static void sdhci_finish_data(struct sdhci_host *host)
781{
782 struct mmc_data *data;
d129bceb
PO
783
784 BUG_ON(!host->data);
785
786 data = host->data;
787 host->data = NULL;
788
c9fddbc4 789 if (host->flags & SDHCI_REQ_USE_DMA) {
2134a922
PO
790 if (host->flags & SDHCI_USE_ADMA)
791 sdhci_adma_table_post(host, data);
792 else {
793 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
794 data->sg_len, (data->flags & MMC_DATA_READ) ?
795 DMA_FROM_DEVICE : DMA_TO_DEVICE);
796 }
d129bceb
PO
797 }
798
799 /*
c9b74c5b
PO
800 * The specification states that the block count register must
801 * be updated, but it does not specify at what point in the
802 * data flow. That makes the register entirely useless to read
803 * back so we have to assume that nothing made it to the card
804 * in the event of an error.
d129bceb 805 */
c9b74c5b
PO
806 if (data->error)
807 data->bytes_xfered = 0;
d129bceb 808 else
c9b74c5b 809 data->bytes_xfered = data->blksz * data->blocks;
d129bceb 810
d129bceb
PO
811 if (data->stop) {
812 /*
813 * The controller needs a reset of internal state machines
814 * upon error conditions.
815 */
17b0429d 816 if (data->error) {
d129bceb
PO
817 sdhci_reset(host, SDHCI_RESET_CMD);
818 sdhci_reset(host, SDHCI_RESET_DATA);
819 }
820
821 sdhci_send_command(host, data->stop);
822 } else
823 tasklet_schedule(&host->finish_tasklet);
824}
825
826static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
827{
828 int flags;
fd2208d7 829 u32 mask;
7cb2c76f 830 unsigned long timeout;
d129bceb
PO
831
832 WARN_ON(host->cmd);
833
d129bceb 834 /* Wait max 10 ms */
7cb2c76f 835 timeout = 10;
fd2208d7
PO
836
837 mask = SDHCI_CMD_INHIBIT;
838 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
839 mask |= SDHCI_DATA_INHIBIT;
840
841 /* We shouldn't wait for data inihibit for stop commands, even
842 though they might use busy signaling */
843 if (host->mrq->data && (cmd == host->mrq->data->stop))
844 mask &= ~SDHCI_DATA_INHIBIT;
845
4e4141a5 846 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
7cb2c76f 847 if (timeout == 0) {
d129bceb 848 printk(KERN_ERR "%s: Controller never released "
acf1da45 849 "inhibit bit(s).\n", mmc_hostname(host->mmc));
d129bceb 850 sdhci_dumpregs(host);
17b0429d 851 cmd->error = -EIO;
d129bceb
PO
852 tasklet_schedule(&host->finish_tasklet);
853 return;
854 }
7cb2c76f
PO
855 timeout--;
856 mdelay(1);
857 }
d129bceb
PO
858
859 mod_timer(&host->timer, jiffies + 10 * HZ);
860
861 host->cmd = cmd;
862
863 sdhci_prepare_data(host, cmd->data);
864
4e4141a5 865 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
d129bceb 866
c7fa9963
PO
867 sdhci_set_transfer_mode(host, cmd->data);
868
d129bceb 869 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
acf1da45 870 printk(KERN_ERR "%s: Unsupported response type!\n",
d129bceb 871 mmc_hostname(host->mmc));
17b0429d 872 cmd->error = -EINVAL;
d129bceb
PO
873 tasklet_schedule(&host->finish_tasklet);
874 return;
875 }
876
877 if (!(cmd->flags & MMC_RSP_PRESENT))
878 flags = SDHCI_CMD_RESP_NONE;
879 else if (cmd->flags & MMC_RSP_136)
880 flags = SDHCI_CMD_RESP_LONG;
881 else if (cmd->flags & MMC_RSP_BUSY)
882 flags = SDHCI_CMD_RESP_SHORT_BUSY;
883 else
884 flags = SDHCI_CMD_RESP_SHORT;
885
886 if (cmd->flags & MMC_RSP_CRC)
887 flags |= SDHCI_CMD_CRC;
888 if (cmd->flags & MMC_RSP_OPCODE)
889 flags |= SDHCI_CMD_INDEX;
890 if (cmd->data)
891 flags |= SDHCI_CMD_DATA;
892
4e4141a5 893 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
d129bceb
PO
894}
895
896static void sdhci_finish_command(struct sdhci_host *host)
897{
898 int i;
899
900 BUG_ON(host->cmd == NULL);
901
902 if (host->cmd->flags & MMC_RSP_PRESENT) {
903 if (host->cmd->flags & MMC_RSP_136) {
904 /* CRC is stripped so we need to do some shifting. */
905 for (i = 0;i < 4;i++) {
4e4141a5 906 host->cmd->resp[i] = sdhci_readl(host,
d129bceb
PO
907 SDHCI_RESPONSE + (3-i)*4) << 8;
908 if (i != 3)
909 host->cmd->resp[i] |=
4e4141a5 910 sdhci_readb(host,
d129bceb
PO
911 SDHCI_RESPONSE + (3-i)*4-1);
912 }
913 } else {
4e4141a5 914 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
d129bceb
PO
915 }
916 }
917
17b0429d 918 host->cmd->error = 0;
d129bceb 919
e538fbe8
PO
920 if (host->data && host->data_early)
921 sdhci_finish_data(host);
922
923 if (!host->cmd->data)
d129bceb
PO
924 tasklet_schedule(&host->finish_tasklet);
925
926 host->cmd = NULL;
927}
928
929static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
930{
931 int div;
932 u16 clk;
7cb2c76f 933 unsigned long timeout;
d129bceb
PO
934
935 if (clock == host->clock)
936 return;
937
4e4141a5 938 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
d129bceb
PO
939
940 if (clock == 0)
941 goto out;
942
943 for (div = 1;div < 256;div *= 2) {
944 if ((host->max_clk / div) <= clock)
945 break;
946 }
947 div >>= 1;
948
949 clk = div << SDHCI_DIVIDER_SHIFT;
950 clk |= SDHCI_CLOCK_INT_EN;
4e4141a5 951 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb
PO
952
953 /* Wait max 10 ms */
7cb2c76f 954 timeout = 10;
4e4141a5 955 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
7cb2c76f
PO
956 & SDHCI_CLOCK_INT_STABLE)) {
957 if (timeout == 0) {
acf1da45
PO
958 printk(KERN_ERR "%s: Internal clock never "
959 "stabilised.\n", mmc_hostname(host->mmc));
d129bceb
PO
960 sdhci_dumpregs(host);
961 return;
962 }
7cb2c76f
PO
963 timeout--;
964 mdelay(1);
965 }
d129bceb
PO
966
967 clk |= SDHCI_CLOCK_CARD_EN;
4e4141a5 968 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb
PO
969
970out:
971 host->clock = clock;
972}
973
146ad66e
PO
974static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
975{
976 u8 pwr;
977
978 if (host->power == power)
979 return;
980
9e9dc5f2 981 if (power == (unsigned short)-1) {
4e4141a5 982 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
146ad66e 983 goto out;
9e9dc5f2
DS
984 }
985
986 /*
987 * Spec says that we should clear the power reg before setting
988 * a new value. Some controllers don't seem to like this though.
989 */
b8c86fc5 990 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
4e4141a5 991 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
146ad66e
PO
992
993 pwr = SDHCI_POWER_ON;
994
4be34c99 995 switch (1 << power) {
55556da0 996 case MMC_VDD_165_195:
146ad66e
PO
997 pwr |= SDHCI_POWER_180;
998 break;
4be34c99
PL
999 case MMC_VDD_29_30:
1000 case MMC_VDD_30_31:
146ad66e
PO
1001 pwr |= SDHCI_POWER_300;
1002 break;
4be34c99
PL
1003 case MMC_VDD_32_33:
1004 case MMC_VDD_33_34:
146ad66e
PO
1005 pwr |= SDHCI_POWER_330;
1006 break;
1007 default:
1008 BUG();
1009 }
1010
e08c1694 1011 /*
c71f6512 1012 * At least the Marvell CaFe chip gets confused if we set the voltage
e08c1694
AS
1013 * and set turn on power at the same time, so set the voltage first.
1014 */
b8c86fc5 1015 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
4e4141a5 1016 sdhci_writeb(host, pwr & ~SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
e08c1694 1017
4e4141a5 1018 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
146ad66e
PO
1019
1020out:
1021 host->power = power;
1022}
1023
d129bceb
PO
1024/*****************************************************************************\
1025 * *
1026 * MMC callbacks *
1027 * *
1028\*****************************************************************************/
1029
1030static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1031{
1032 struct sdhci_host *host;
1033 unsigned long flags;
1034
1035 host = mmc_priv(mmc);
1036
1037 spin_lock_irqsave(&host->lock, flags);
1038
1039 WARN_ON(host->mrq != NULL);
1040
f9134319 1041#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1042 sdhci_activate_led(host);
2f730fec 1043#endif
d129bceb
PO
1044
1045 host->mrq = mrq;
1046
4e4141a5 1047 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
1e72859e 1048 || (host->flags & SDHCI_DEVICE_DEAD)) {
17b0429d 1049 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1050 tasklet_schedule(&host->finish_tasklet);
1051 } else
1052 sdhci_send_command(host, mrq->cmd);
1053
5f25a66f 1054 mmiowb();
d129bceb
PO
1055 spin_unlock_irqrestore(&host->lock, flags);
1056}
1057
1058static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1059{
1060 struct sdhci_host *host;
1061 unsigned long flags;
1062 u8 ctrl;
1063
1064 host = mmc_priv(mmc);
1065
1066 spin_lock_irqsave(&host->lock, flags);
1067
1e72859e
PO
1068 if (host->flags & SDHCI_DEVICE_DEAD)
1069 goto out;
1070
d129bceb
PO
1071 /*
1072 * Reset the chip on each power off.
1073 * Should clear out any weird states.
1074 */
1075 if (ios->power_mode == MMC_POWER_OFF) {
4e4141a5 1076 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
7260cf5e 1077 sdhci_reinit(host);
d129bceb
PO
1078 }
1079
1080 sdhci_set_clock(host, ios->clock);
1081
1082 if (ios->power_mode == MMC_POWER_OFF)
146ad66e 1083 sdhci_set_power(host, -1);
d129bceb 1084 else
146ad66e 1085 sdhci_set_power(host, ios->vdd);
d129bceb 1086
4e4141a5 1087 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
cd9277c0 1088
d129bceb
PO
1089 if (ios->bus_width == MMC_BUS_WIDTH_4)
1090 ctrl |= SDHCI_CTRL_4BITBUS;
1091 else
1092 ctrl &= ~SDHCI_CTRL_4BITBUS;
cd9277c0
PO
1093
1094 if (ios->timing == MMC_TIMING_SD_HS)
1095 ctrl |= SDHCI_CTRL_HISPD;
1096 else
1097 ctrl &= ~SDHCI_CTRL_HISPD;
1098
4e4141a5 1099 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb 1100
b8352260
LD
1101 /*
1102 * Some (ENE) controllers go apeshit on some ios operation,
1103 * signalling timeout and CRC errors even on CMD0. Resetting
1104 * it on each ios seems to solve the problem.
1105 */
b8c86fc5 1106 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
b8352260
LD
1107 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1108
1e72859e 1109out:
5f25a66f 1110 mmiowb();
d129bceb
PO
1111 spin_unlock_irqrestore(&host->lock, flags);
1112}
1113
1114static int sdhci_get_ro(struct mmc_host *mmc)
1115{
1116 struct sdhci_host *host;
1117 unsigned long flags;
1118 int present;
1119
1120 host = mmc_priv(mmc);
1121
1122 spin_lock_irqsave(&host->lock, flags);
1123
1e72859e
PO
1124 if (host->flags & SDHCI_DEVICE_DEAD)
1125 present = 0;
1126 else
4e4141a5 1127 present = sdhci_readl(host, SDHCI_PRESENT_STATE);
d129bceb
PO
1128
1129 spin_unlock_irqrestore(&host->lock, flags);
1130
1131 return !(present & SDHCI_WRITE_PROTECT);
1132}
1133
f75979b7
PO
1134static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1135{
1136 struct sdhci_host *host;
1137 unsigned long flags;
f75979b7
PO
1138
1139 host = mmc_priv(mmc);
1140
1141 spin_lock_irqsave(&host->lock, flags);
1142
1e72859e
PO
1143 if (host->flags & SDHCI_DEVICE_DEAD)
1144 goto out;
1145
f75979b7 1146 if (enable)
7260cf5e
AV
1147 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1148 else
1149 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1e72859e 1150out:
f75979b7
PO
1151 mmiowb();
1152
1153 spin_unlock_irqrestore(&host->lock, flags);
1154}
1155
ab7aefd0 1156static const struct mmc_host_ops sdhci_ops = {
d129bceb
PO
1157 .request = sdhci_request,
1158 .set_ios = sdhci_set_ios,
1159 .get_ro = sdhci_get_ro,
f75979b7 1160 .enable_sdio_irq = sdhci_enable_sdio_irq,
d129bceb
PO
1161};
1162
1163/*****************************************************************************\
1164 * *
1165 * Tasklets *
1166 * *
1167\*****************************************************************************/
1168
1169static void sdhci_tasklet_card(unsigned long param)
1170{
1171 struct sdhci_host *host;
1172 unsigned long flags;
1173
1174 host = (struct sdhci_host*)param;
1175
1176 spin_lock_irqsave(&host->lock, flags);
1177
4e4141a5 1178 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
d129bceb
PO
1179 if (host->mrq) {
1180 printk(KERN_ERR "%s: Card removed during transfer!\n",
1181 mmc_hostname(host->mmc));
1182 printk(KERN_ERR "%s: Resetting controller.\n",
1183 mmc_hostname(host->mmc));
1184
1185 sdhci_reset(host, SDHCI_RESET_CMD);
1186 sdhci_reset(host, SDHCI_RESET_DATA);
1187
17b0429d 1188 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1189 tasklet_schedule(&host->finish_tasklet);
1190 }
1191 }
1192
1193 spin_unlock_irqrestore(&host->lock, flags);
1194
04cf585d 1195 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
d129bceb
PO
1196}
1197
1198static void sdhci_tasklet_finish(unsigned long param)
1199{
1200 struct sdhci_host *host;
1201 unsigned long flags;
1202 struct mmc_request *mrq;
1203
1204 host = (struct sdhci_host*)param;
1205
1206 spin_lock_irqsave(&host->lock, flags);
1207
1208 del_timer(&host->timer);
1209
1210 mrq = host->mrq;
1211
d129bceb
PO
1212 /*
1213 * The controller needs a reset of internal state machines
1214 * upon error conditions.
1215 */
1e72859e
PO
1216 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1217 (mrq->cmd->error ||
1218 (mrq->data && (mrq->data->error ||
1219 (mrq->data->stop && mrq->data->stop->error))) ||
1220 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
645289dc
PO
1221
1222 /* Some controllers need this kick or reset won't work here */
b8c86fc5 1223 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
645289dc
PO
1224 unsigned int clock;
1225
1226 /* This is to force an update */
1227 clock = host->clock;
1228 host->clock = 0;
1229 sdhci_set_clock(host, clock);
1230 }
1231
1232 /* Spec says we should do both at the same time, but Ricoh
1233 controllers do not like that. */
d129bceb
PO
1234 sdhci_reset(host, SDHCI_RESET_CMD);
1235 sdhci_reset(host, SDHCI_RESET_DATA);
1236 }
1237
1238 host->mrq = NULL;
1239 host->cmd = NULL;
1240 host->data = NULL;
1241
f9134319 1242#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1243 sdhci_deactivate_led(host);
2f730fec 1244#endif
d129bceb 1245
5f25a66f 1246 mmiowb();
d129bceb
PO
1247 spin_unlock_irqrestore(&host->lock, flags);
1248
1249 mmc_request_done(host->mmc, mrq);
1250}
1251
1252static void sdhci_timeout_timer(unsigned long data)
1253{
1254 struct sdhci_host *host;
1255 unsigned long flags;
1256
1257 host = (struct sdhci_host*)data;
1258
1259 spin_lock_irqsave(&host->lock, flags);
1260
1261 if (host->mrq) {
acf1da45
PO
1262 printk(KERN_ERR "%s: Timeout waiting for hardware "
1263 "interrupt.\n", mmc_hostname(host->mmc));
d129bceb
PO
1264 sdhci_dumpregs(host);
1265
1266 if (host->data) {
17b0429d 1267 host->data->error = -ETIMEDOUT;
d129bceb
PO
1268 sdhci_finish_data(host);
1269 } else {
1270 if (host->cmd)
17b0429d 1271 host->cmd->error = -ETIMEDOUT;
d129bceb 1272 else
17b0429d 1273 host->mrq->cmd->error = -ETIMEDOUT;
d129bceb
PO
1274
1275 tasklet_schedule(&host->finish_tasklet);
1276 }
1277 }
1278
5f25a66f 1279 mmiowb();
d129bceb
PO
1280 spin_unlock_irqrestore(&host->lock, flags);
1281}
1282
1283/*****************************************************************************\
1284 * *
1285 * Interrupt handling *
1286 * *
1287\*****************************************************************************/
1288
1289static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1290{
1291 BUG_ON(intmask == 0);
1292
1293 if (!host->cmd) {
b67ac3f3
PO
1294 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1295 "though no command operation was in progress.\n",
1296 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1297 sdhci_dumpregs(host);
1298 return;
1299 }
1300
43b58b36 1301 if (intmask & SDHCI_INT_TIMEOUT)
17b0429d
PO
1302 host->cmd->error = -ETIMEDOUT;
1303 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1304 SDHCI_INT_INDEX))
1305 host->cmd->error = -EILSEQ;
43b58b36 1306
e809517f 1307 if (host->cmd->error) {
d129bceb 1308 tasklet_schedule(&host->finish_tasklet);
e809517f
PO
1309 return;
1310 }
1311
1312 /*
1313 * The host can send and interrupt when the busy state has
1314 * ended, allowing us to wait without wasting CPU cycles.
1315 * Unfortunately this is overloaded on the "data complete"
1316 * interrupt, so we need to take some care when handling
1317 * it.
1318 *
1319 * Note: The 1.0 specification is a bit ambiguous about this
1320 * feature so there might be some problems with older
1321 * controllers.
1322 */
1323 if (host->cmd->flags & MMC_RSP_BUSY) {
1324 if (host->cmd->data)
1325 DBG("Cannot wait for busy signal when also "
1326 "doing a data transfer");
f945405c 1327 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
e809517f 1328 return;
f945405c
BD
1329
1330 /* The controller does not support the end-of-busy IRQ,
1331 * fall through and take the SDHCI_INT_RESPONSE */
e809517f
PO
1332 }
1333
1334 if (intmask & SDHCI_INT_RESPONSE)
43b58b36 1335 sdhci_finish_command(host);
d129bceb
PO
1336}
1337
1338static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1339{
1340 BUG_ON(intmask == 0);
1341
1342 if (!host->data) {
1343 /*
e809517f
PO
1344 * The "data complete" interrupt is also used to
1345 * indicate that a busy state has ended. See comment
1346 * above in sdhci_cmd_irq().
d129bceb 1347 */
e809517f
PO
1348 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1349 if (intmask & SDHCI_INT_DATA_END) {
1350 sdhci_finish_command(host);
1351 return;
1352 }
1353 }
d129bceb 1354
b67ac3f3
PO
1355 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1356 "though no data operation was in progress.\n",
1357 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1358 sdhci_dumpregs(host);
1359
1360 return;
1361 }
1362
1363 if (intmask & SDHCI_INT_DATA_TIMEOUT)
17b0429d
PO
1364 host->data->error = -ETIMEDOUT;
1365 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1366 host->data->error = -EILSEQ;
2134a922
PO
1367 else if (intmask & SDHCI_INT_ADMA_ERROR)
1368 host->data->error = -EIO;
d129bceb 1369
17b0429d 1370 if (host->data->error)
d129bceb
PO
1371 sdhci_finish_data(host);
1372 else {
a406f5a3 1373 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
d129bceb
PO
1374 sdhci_transfer_pio(host);
1375
6ba736a1
PO
1376 /*
1377 * We currently don't do anything fancy with DMA
1378 * boundaries, but as we can't disable the feature
1379 * we need to at least restart the transfer.
1380 */
1381 if (intmask & SDHCI_INT_DMA_END)
4e4141a5
AV
1382 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1383 SDHCI_DMA_ADDRESS);
6ba736a1 1384
e538fbe8
PO
1385 if (intmask & SDHCI_INT_DATA_END) {
1386 if (host->cmd) {
1387 /*
1388 * Data managed to finish before the
1389 * command completed. Make sure we do
1390 * things in the proper order.
1391 */
1392 host->data_early = 1;
1393 } else {
1394 sdhci_finish_data(host);
1395 }
1396 }
d129bceb
PO
1397 }
1398}
1399
7d12e780 1400static irqreturn_t sdhci_irq(int irq, void *dev_id)
d129bceb
PO
1401{
1402 irqreturn_t result;
1403 struct sdhci_host* host = dev_id;
1404 u32 intmask;
f75979b7 1405 int cardint = 0;
d129bceb
PO
1406
1407 spin_lock(&host->lock);
1408
4e4141a5 1409 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
d129bceb 1410
62df67a5 1411 if (!intmask || intmask == 0xffffffff) {
d129bceb
PO
1412 result = IRQ_NONE;
1413 goto out;
1414 }
1415
b69c9058
PO
1416 DBG("*** %s got interrupt: 0x%08x\n",
1417 mmc_hostname(host->mmc), intmask);
d129bceb 1418
3192a28f 1419 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
4e4141a5
AV
1420 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1421 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
d129bceb 1422 tasklet_schedule(&host->card_tasklet);
3192a28f 1423 }
d129bceb 1424
3192a28f 1425 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
d129bceb 1426
3192a28f 1427 if (intmask & SDHCI_INT_CMD_MASK) {
4e4141a5
AV
1428 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1429 SDHCI_INT_STATUS);
3192a28f 1430 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
d129bceb
PO
1431 }
1432
1433 if (intmask & SDHCI_INT_DATA_MASK) {
4e4141a5
AV
1434 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1435 SDHCI_INT_STATUS);
3192a28f 1436 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
d129bceb
PO
1437 }
1438
1439 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1440
964f9ce2
PO
1441 intmask &= ~SDHCI_INT_ERROR;
1442
d129bceb 1443 if (intmask & SDHCI_INT_BUS_POWER) {
3192a28f 1444 printk(KERN_ERR "%s: Card is consuming too much power!\n",
d129bceb 1445 mmc_hostname(host->mmc));
4e4141a5 1446 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
d129bceb
PO
1447 }
1448
9d26a5d3 1449 intmask &= ~SDHCI_INT_BUS_POWER;
3192a28f 1450
f75979b7
PO
1451 if (intmask & SDHCI_INT_CARD_INT)
1452 cardint = 1;
1453
1454 intmask &= ~SDHCI_INT_CARD_INT;
1455
3192a28f 1456 if (intmask) {
acf1da45 1457 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
3192a28f 1458 mmc_hostname(host->mmc), intmask);
d129bceb
PO
1459 sdhci_dumpregs(host);
1460
4e4141a5 1461 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3192a28f 1462 }
d129bceb
PO
1463
1464 result = IRQ_HANDLED;
1465
5f25a66f 1466 mmiowb();
d129bceb
PO
1467out:
1468 spin_unlock(&host->lock);
1469
f75979b7
PO
1470 /*
1471 * We have to delay this as it calls back into the driver.
1472 */
1473 if (cardint)
1474 mmc_signal_sdio_irq(host->mmc);
1475
d129bceb
PO
1476 return result;
1477}
1478
1479/*****************************************************************************\
1480 * *
1481 * Suspend/resume *
1482 * *
1483\*****************************************************************************/
1484
1485#ifdef CONFIG_PM
1486
b8c86fc5 1487int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
d129bceb 1488{
b8c86fc5 1489 int ret;
a715dfc7 1490
7260cf5e
AV
1491 sdhci_disable_card_detection(host);
1492
b8c86fc5
PO
1493 ret = mmc_suspend_host(host->mmc, state);
1494 if (ret)
1495 return ret;
a715dfc7 1496
b8c86fc5 1497 free_irq(host->irq, host);
d129bceb
PO
1498
1499 return 0;
1500}
1501
b8c86fc5 1502EXPORT_SYMBOL_GPL(sdhci_suspend_host);
d129bceb 1503
b8c86fc5
PO
1504int sdhci_resume_host(struct sdhci_host *host)
1505{
1506 int ret;
d129bceb 1507
b8c86fc5
PO
1508 if (host->flags & SDHCI_USE_DMA) {
1509 if (host->ops->enable_dma)
1510 host->ops->enable_dma(host);
1511 }
d129bceb 1512
b8c86fc5
PO
1513 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1514 mmc_hostname(host->mmc), host);
df1c4b7b
PO
1515 if (ret)
1516 return ret;
d129bceb 1517
b8c86fc5
PO
1518 sdhci_init(host);
1519 mmiowb();
1520
1521 ret = mmc_resume_host(host->mmc);
1522 if (ret)
1523 return ret;
d129bceb 1524
7260cf5e
AV
1525 sdhci_enable_card_detection(host);
1526
d129bceb
PO
1527 return 0;
1528}
1529
b8c86fc5 1530EXPORT_SYMBOL_GPL(sdhci_resume_host);
d129bceb
PO
1531
1532#endif /* CONFIG_PM */
1533
1534/*****************************************************************************\
1535 * *
b8c86fc5 1536 * Device allocation/registration *
d129bceb
PO
1537 * *
1538\*****************************************************************************/
1539
b8c86fc5
PO
1540struct sdhci_host *sdhci_alloc_host(struct device *dev,
1541 size_t priv_size)
d129bceb 1542{
d129bceb
PO
1543 struct mmc_host *mmc;
1544 struct sdhci_host *host;
1545
b8c86fc5 1546 WARN_ON(dev == NULL);
d129bceb 1547
b8c86fc5 1548 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
d129bceb 1549 if (!mmc)
b8c86fc5 1550 return ERR_PTR(-ENOMEM);
d129bceb
PO
1551
1552 host = mmc_priv(mmc);
1553 host->mmc = mmc;
1554
b8c86fc5
PO
1555 return host;
1556}
8a4da143 1557
b8c86fc5 1558EXPORT_SYMBOL_GPL(sdhci_alloc_host);
d129bceb 1559
b8c86fc5
PO
1560int sdhci_add_host(struct sdhci_host *host)
1561{
1562 struct mmc_host *mmc;
1563 unsigned int caps;
b8c86fc5 1564 int ret;
d129bceb 1565
b8c86fc5
PO
1566 WARN_ON(host == NULL);
1567 if (host == NULL)
1568 return -EINVAL;
d129bceb 1569
b8c86fc5 1570 mmc = host->mmc;
d129bceb 1571
b8c86fc5
PO
1572 if (debug_quirks)
1573 host->quirks = debug_quirks;
d129bceb 1574
d96649ed
PO
1575 sdhci_reset(host, SDHCI_RESET_ALL);
1576
4e4141a5 1577 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2134a922
PO
1578 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1579 >> SDHCI_SPEC_VER_SHIFT;
1580 if (host->version > SDHCI_SPEC_200) {
4a965505 1581 printk(KERN_ERR "%s: Unknown controller version (%d). "
b69c9058 1582 "You may experience problems.\n", mmc_hostname(mmc),
2134a922 1583 host->version);
4a965505
PO
1584 }
1585
4e4141a5 1586 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
d129bceb 1587
b8c86fc5 1588 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
98608076 1589 host->flags |= SDHCI_USE_DMA;
67435274
PO
1590 else if (!(caps & SDHCI_CAN_DO_DMA))
1591 DBG("Controller doesn't have DMA capability\n");
1592 else
d129bceb
PO
1593 host->flags |= SDHCI_USE_DMA;
1594
b8c86fc5 1595 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
7c168e3d 1596 (host->flags & SDHCI_USE_DMA)) {
cee687ce 1597 DBG("Disabling DMA as it is marked broken\n");
7c168e3d
FT
1598 host->flags &= ~SDHCI_USE_DMA;
1599 }
1600
2134a922
PO
1601 if (host->flags & SDHCI_USE_DMA) {
1602 if ((host->version >= SDHCI_SPEC_200) &&
1603 (caps & SDHCI_CAN_DO_ADMA2))
1604 host->flags |= SDHCI_USE_ADMA;
1605 }
1606
1607 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1608 (host->flags & SDHCI_USE_ADMA)) {
1609 DBG("Disabling ADMA as it is marked broken\n");
1610 host->flags &= ~SDHCI_USE_ADMA;
1611 }
1612
d129bceb 1613 if (host->flags & SDHCI_USE_DMA) {
b8c86fc5
PO
1614 if (host->ops->enable_dma) {
1615 if (host->ops->enable_dma(host)) {
1616 printk(KERN_WARNING "%s: No suitable DMA "
1617 "available. Falling back to PIO.\n",
1618 mmc_hostname(mmc));
2134a922 1619 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
b8c86fc5 1620 }
d129bceb
PO
1621 }
1622 }
1623
2134a922
PO
1624 if (host->flags & SDHCI_USE_ADMA) {
1625 /*
1626 * We need to allocate descriptors for all sg entries
1627 * (128) and potentially one alignment transfer for
1628 * each of those entries.
1629 */
1630 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1631 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1632 if (!host->adma_desc || !host->align_buffer) {
1633 kfree(host->adma_desc);
1634 kfree(host->align_buffer);
1635 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1636 "buffers. Falling back to standard DMA.\n",
1637 mmc_hostname(mmc));
1638 host->flags &= ~SDHCI_USE_ADMA;
1639 }
1640 }
1641
7659150c
PO
1642 /*
1643 * If we use DMA, then it's up to the caller to set the DMA
1644 * mask, but PIO does not need the hw shim so we set a new
1645 * mask here in that case.
1646 */
1647 if (!(host->flags & SDHCI_USE_DMA)) {
1648 host->dma_mask = DMA_BIT_MASK(64);
1649 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1650 }
d129bceb 1651
8ef1a143
PO
1652 host->max_clk =
1653 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1654 if (host->max_clk == 0) {
1655 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
b69c9058 1656 "frequency.\n", mmc_hostname(mmc));
b8c86fc5 1657 return -ENODEV;
8ef1a143 1658 }
d129bceb
PO
1659 host->max_clk *= 1000000;
1660
1c8cde92
PO
1661 host->timeout_clk =
1662 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1663 if (host->timeout_clk == 0) {
1664 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
b69c9058 1665 "frequency.\n", mmc_hostname(mmc));
b8c86fc5 1666 return -ENODEV;
1c8cde92
PO
1667 }
1668 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1669 host->timeout_clk *= 1000;
d129bceb
PO
1670
1671 /*
1672 * Set host parameters.
1673 */
1674 mmc->ops = &sdhci_ops;
1675 mmc->f_min = host->max_clk / 256;
1676 mmc->f_max = host->max_clk;
c9b74c5b 1677 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
d129bceb 1678
86a6a874 1679 if (caps & SDHCI_CAN_DO_HISPD)
cd9277c0
PO
1680 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1681
146ad66e
PO
1682 mmc->ocr_avail = 0;
1683 if (caps & SDHCI_CAN_VDD_330)
1684 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
c70840e8 1685 if (caps & SDHCI_CAN_VDD_300)
146ad66e 1686 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
c70840e8 1687 if (caps & SDHCI_CAN_VDD_180)
55556da0 1688 mmc->ocr_avail |= MMC_VDD_165_195;
146ad66e
PO
1689
1690 if (mmc->ocr_avail == 0) {
1691 printk(KERN_ERR "%s: Hardware doesn't report any "
b69c9058 1692 "support voltages.\n", mmc_hostname(mmc));
b8c86fc5 1693 return -ENODEV;
146ad66e
PO
1694 }
1695
d129bceb
PO
1696 spin_lock_init(&host->lock);
1697
1698 /*
2134a922
PO
1699 * Maximum number of segments. Depends on if the hardware
1700 * can do scatter/gather or not.
d129bceb 1701 */
2134a922
PO
1702 if (host->flags & SDHCI_USE_ADMA)
1703 mmc->max_hw_segs = 128;
1704 else if (host->flags & SDHCI_USE_DMA)
d129bceb 1705 mmc->max_hw_segs = 1;
2134a922
PO
1706 else /* PIO */
1707 mmc->max_hw_segs = 128;
1708 mmc->max_phys_segs = 128;
d129bceb
PO
1709
1710 /*
bab76961 1711 * Maximum number of sectors in one transfer. Limited by DMA boundary
55db890a 1712 * size (512KiB).
d129bceb 1713 */
55db890a 1714 mmc->max_req_size = 524288;
d129bceb
PO
1715
1716 /*
1717 * Maximum segment size. Could be one segment with the maximum number
2134a922
PO
1718 * of bytes. When doing hardware scatter/gather, each entry cannot
1719 * be larger than 64 KiB though.
d129bceb 1720 */
2134a922
PO
1721 if (host->flags & SDHCI_USE_ADMA)
1722 mmc->max_seg_size = 65536;
1723 else
1724 mmc->max_seg_size = mmc->max_req_size;
d129bceb 1725
fe4a3c7a
PO
1726 /*
1727 * Maximum block size. This varies from controller to controller and
1728 * is specified in the capabilities register.
1729 */
1730 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1731 if (mmc->max_blk_size >= 3) {
b69c9058
PO
1732 printk(KERN_WARNING "%s: Invalid maximum block size, "
1733 "assuming 512 bytes\n", mmc_hostname(mmc));
03f8590d
DV
1734 mmc->max_blk_size = 512;
1735 } else
1736 mmc->max_blk_size = 512 << mmc->max_blk_size;
fe4a3c7a 1737
55db890a
PO
1738 /*
1739 * Maximum block count.
1740 */
1741 mmc->max_blk_count = 65535;
1742
d129bceb
PO
1743 /*
1744 * Init tasklets.
1745 */
1746 tasklet_init(&host->card_tasklet,
1747 sdhci_tasklet_card, (unsigned long)host);
1748 tasklet_init(&host->finish_tasklet,
1749 sdhci_tasklet_finish, (unsigned long)host);
1750
e4cad1b5 1751 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
d129bceb 1752
dace1453 1753 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
b69c9058 1754 mmc_hostname(mmc), host);
d129bceb 1755 if (ret)
8ef1a143 1756 goto untasklet;
d129bceb
PO
1757
1758 sdhci_init(host);
1759
1760#ifdef CONFIG_MMC_DEBUG
1761 sdhci_dumpregs(host);
1762#endif
1763
f9134319 1764#ifdef SDHCI_USE_LEDS_CLASS
5dbace0c
HS
1765 snprintf(host->led_name, sizeof(host->led_name),
1766 "%s::", mmc_hostname(mmc));
1767 host->led.name = host->led_name;
2f730fec
PO
1768 host->led.brightness = LED_OFF;
1769 host->led.default_trigger = mmc_hostname(mmc);
1770 host->led.brightness_set = sdhci_led_control;
1771
b8c86fc5 1772 ret = led_classdev_register(mmc_dev(mmc), &host->led);
2f730fec
PO
1773 if (ret)
1774 goto reset;
1775#endif
1776
5f25a66f
PO
1777 mmiowb();
1778
d129bceb
PO
1779 mmc_add_host(mmc);
1780
2134a922 1781 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
d1b26863 1782 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2134a922 1783 (host->flags & SDHCI_USE_ADMA)?"A":"",
d129bceb
PO
1784 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1785
7260cf5e
AV
1786 sdhci_enable_card_detection(host);
1787
d129bceb
PO
1788 return 0;
1789
f9134319 1790#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
1791reset:
1792 sdhci_reset(host, SDHCI_RESET_ALL);
1793 free_irq(host->irq, host);
1794#endif
8ef1a143 1795untasklet:
d129bceb
PO
1796 tasklet_kill(&host->card_tasklet);
1797 tasklet_kill(&host->finish_tasklet);
d129bceb
PO
1798
1799 return ret;
1800}
1801
b8c86fc5 1802EXPORT_SYMBOL_GPL(sdhci_add_host);
d129bceb 1803
1e72859e 1804void sdhci_remove_host(struct sdhci_host *host, int dead)
b8c86fc5 1805{
1e72859e
PO
1806 unsigned long flags;
1807
1808 if (dead) {
1809 spin_lock_irqsave(&host->lock, flags);
1810
1811 host->flags |= SDHCI_DEVICE_DEAD;
1812
1813 if (host->mrq) {
1814 printk(KERN_ERR "%s: Controller removed during "
1815 " transfer!\n", mmc_hostname(host->mmc));
1816
1817 host->mrq->cmd->error = -ENOMEDIUM;
1818 tasklet_schedule(&host->finish_tasklet);
1819 }
1820
1821 spin_unlock_irqrestore(&host->lock, flags);
1822 }
1823
7260cf5e
AV
1824 sdhci_disable_card_detection(host);
1825
b8c86fc5 1826 mmc_remove_host(host->mmc);
d129bceb 1827
f9134319 1828#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
1829 led_classdev_unregister(&host->led);
1830#endif
1831
1e72859e
PO
1832 if (!dead)
1833 sdhci_reset(host, SDHCI_RESET_ALL);
d129bceb
PO
1834
1835 free_irq(host->irq, host);
1836
1837 del_timer_sync(&host->timer);
1838
1839 tasklet_kill(&host->card_tasklet);
1840 tasklet_kill(&host->finish_tasklet);
2134a922
PO
1841
1842 kfree(host->adma_desc);
1843 kfree(host->align_buffer);
1844
1845 host->adma_desc = NULL;
1846 host->align_buffer = NULL;
d129bceb
PO
1847}
1848
b8c86fc5 1849EXPORT_SYMBOL_GPL(sdhci_remove_host);
d129bceb 1850
b8c86fc5 1851void sdhci_free_host(struct sdhci_host *host)
d129bceb 1852{
b8c86fc5 1853 mmc_free_host(host->mmc);
d129bceb
PO
1854}
1855
b8c86fc5 1856EXPORT_SYMBOL_GPL(sdhci_free_host);
d129bceb
PO
1857
1858/*****************************************************************************\
1859 * *
1860 * Driver init/exit *
1861 * *
1862\*****************************************************************************/
1863
1864static int __init sdhci_drv_init(void)
1865{
1866 printk(KERN_INFO DRIVER_NAME
52fbf9c9 1867 ": Secure Digital Host Controller Interface driver\n");
d129bceb
PO
1868 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1869
b8c86fc5 1870 return 0;
d129bceb
PO
1871}
1872
1873static void __exit sdhci_drv_exit(void)
1874{
d129bceb
PO
1875}
1876
1877module_init(sdhci_drv_init);
1878module_exit(sdhci_drv_exit);
1879
df673b22 1880module_param(debug_quirks, uint, 0444);
67435274 1881
d129bceb 1882MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
b8c86fc5 1883MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
d129bceb 1884MODULE_LICENSE("GPL");
67435274 1885
df673b22 1886MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");