]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mmc/host/sdhci.c
sdio: introduce API for special power management features
[net-next-2.6.git] / drivers / mmc / host / sdhci.c
CommitLineData
d129bceb 1/*
70f10482 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
d129bceb 3 *
b69c9058 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
d129bceb
PO
5 *
6 * This program is free software; you can redistribute it and/or modify
643f720c
PO
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
84c46a53
PO
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
d129bceb
PO
14 */
15
d129bceb
PO
16#include <linux/delay.h>
17#include <linux/highmem.h>
b8c86fc5 18#include <linux/io.h>
d129bceb 19#include <linux/dma-mapping.h>
11763609 20#include <linux/scatterlist.h>
d129bceb 21
2f730fec
PO
22#include <linux/leds.h>
23
d129bceb 24#include <linux/mmc/host.h>
d129bceb 25
d129bceb
PO
26#include "sdhci.h"
27
28#define DRIVER_NAME "sdhci"
d129bceb 29
d129bceb 30#define DBG(f, x...) \
c6563178 31 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
d129bceb 32
f9134319
PO
33#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
34 defined(CONFIG_MMC_SDHCI_MODULE))
35#define SDHCI_USE_LEDS_CLASS
36#endif
37
df673b22 38static unsigned int debug_quirks = 0;
67435274 39
d129bceb
PO
40static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
41static void sdhci_finish_data(struct sdhci_host *);
42
43static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
44static void sdhci_finish_command(struct sdhci_host *);
45
46static void sdhci_dumpregs(struct sdhci_host *host)
47{
48 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
49
50 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
4e4141a5
AV
51 sdhci_readl(host, SDHCI_DMA_ADDRESS),
52 sdhci_readw(host, SDHCI_HOST_VERSION));
d129bceb 53 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
4e4141a5
AV
54 sdhci_readw(host, SDHCI_BLOCK_SIZE),
55 sdhci_readw(host, SDHCI_BLOCK_COUNT));
d129bceb 56 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
4e4141a5
AV
57 sdhci_readl(host, SDHCI_ARGUMENT),
58 sdhci_readw(host, SDHCI_TRANSFER_MODE));
d129bceb 59 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
4e4141a5
AV
60 sdhci_readl(host, SDHCI_PRESENT_STATE),
61 sdhci_readb(host, SDHCI_HOST_CONTROL));
d129bceb 62 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
4e4141a5
AV
63 sdhci_readb(host, SDHCI_POWER_CONTROL),
64 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
d129bceb 65 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
4e4141a5
AV
66 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
67 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
d129bceb 68 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
4e4141a5
AV
69 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
70 sdhci_readl(host, SDHCI_INT_STATUS));
d129bceb 71 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
4e4141a5
AV
72 sdhci_readl(host, SDHCI_INT_ENABLE),
73 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
d129bceb 74 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
4e4141a5
AV
75 sdhci_readw(host, SDHCI_ACMD12_ERR),
76 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
d129bceb 77 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
4e4141a5
AV
78 sdhci_readl(host, SDHCI_CAPABILITIES),
79 sdhci_readl(host, SDHCI_MAX_CURRENT));
d129bceb 80
be3f4ae0
BD
81 if (host->flags & SDHCI_USE_ADMA)
82 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
83 readl(host->ioaddr + SDHCI_ADMA_ERROR),
84 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
85
d129bceb
PO
86 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
87}
88
89/*****************************************************************************\
90 * *
91 * Low level functions *
92 * *
93\*****************************************************************************/
94
7260cf5e
AV
95static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
96{
97 u32 ier;
98
99 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
100 ier &= ~clear;
101 ier |= set;
102 sdhci_writel(host, ier, SDHCI_INT_ENABLE);
103 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
104}
105
106static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
107{
108 sdhci_clear_set_irqs(host, 0, irqs);
109}
110
111static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
112{
113 sdhci_clear_set_irqs(host, irqs, 0);
114}
115
116static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
117{
118 u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
119
68d1fb7e
AV
120 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
121 return;
122
7260cf5e
AV
123 if (enable)
124 sdhci_unmask_irqs(host, irqs);
125 else
126 sdhci_mask_irqs(host, irqs);
127}
128
129static void sdhci_enable_card_detection(struct sdhci_host *host)
130{
131 sdhci_set_card_detection(host, true);
132}
133
134static void sdhci_disable_card_detection(struct sdhci_host *host)
135{
136 sdhci_set_card_detection(host, false);
137}
138
d129bceb
PO
139static void sdhci_reset(struct sdhci_host *host, u8 mask)
140{
e16514d8 141 unsigned long timeout;
063a9dbb 142 u32 uninitialized_var(ier);
e16514d8 143
b8c86fc5 144 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
4e4141a5 145 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
8a4da143
PO
146 SDHCI_CARD_PRESENT))
147 return;
148 }
149
063a9dbb
AV
150 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
151 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
152
4e4141a5 153 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
d129bceb 154
e16514d8 155 if (mask & SDHCI_RESET_ALL)
d129bceb
PO
156 host->clock = 0;
157
e16514d8
PO
158 /* Wait max 100 ms */
159 timeout = 100;
160
161 /* hw clears the bit when it's done */
4e4141a5 162 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
e16514d8 163 if (timeout == 0) {
acf1da45 164 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
e16514d8
PO
165 mmc_hostname(host->mmc), (int)mask);
166 sdhci_dumpregs(host);
167 return;
168 }
169 timeout--;
170 mdelay(1);
d129bceb 171 }
063a9dbb
AV
172
173 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
174 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
d129bceb
PO
175}
176
177static void sdhci_init(struct sdhci_host *host)
178{
d129bceb
PO
179 sdhci_reset(host, SDHCI_RESET_ALL);
180
7260cf5e
AV
181 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
182 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
3192a28f
PO
183 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
184 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
6aa943ab 185 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
7260cf5e 186}
d129bceb 187
7260cf5e
AV
188static void sdhci_reinit(struct sdhci_host *host)
189{
190 sdhci_init(host);
191 sdhci_enable_card_detection(host);
d129bceb
PO
192}
193
194static void sdhci_activate_led(struct sdhci_host *host)
195{
196 u8 ctrl;
197
4e4141a5 198 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 199 ctrl |= SDHCI_CTRL_LED;
4e4141a5 200 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
201}
202
203static void sdhci_deactivate_led(struct sdhci_host *host)
204{
205 u8 ctrl;
206
4e4141a5 207 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 208 ctrl &= ~SDHCI_CTRL_LED;
4e4141a5 209 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
210}
211
f9134319 212#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
213static void sdhci_led_control(struct led_classdev *led,
214 enum led_brightness brightness)
215{
216 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
217 unsigned long flags;
218
219 spin_lock_irqsave(&host->lock, flags);
220
221 if (brightness == LED_OFF)
222 sdhci_deactivate_led(host);
223 else
224 sdhci_activate_led(host);
225
226 spin_unlock_irqrestore(&host->lock, flags);
227}
228#endif
229
d129bceb
PO
230/*****************************************************************************\
231 * *
232 * Core functions *
233 * *
234\*****************************************************************************/
235
a406f5a3 236static void sdhci_read_block_pio(struct sdhci_host *host)
d129bceb 237{
7659150c
PO
238 unsigned long flags;
239 size_t blksize, len, chunk;
7244b85b 240 u32 uninitialized_var(scratch);
7659150c 241 u8 *buf;
d129bceb 242
a406f5a3 243 DBG("PIO reading\n");
d129bceb 244
a406f5a3 245 blksize = host->data->blksz;
7659150c 246 chunk = 0;
d129bceb 247
7659150c 248 local_irq_save(flags);
d129bceb 249
a406f5a3 250 while (blksize) {
7659150c
PO
251 if (!sg_miter_next(&host->sg_miter))
252 BUG();
d129bceb 253
7659150c 254 len = min(host->sg_miter.length, blksize);
d129bceb 255
7659150c
PO
256 blksize -= len;
257 host->sg_miter.consumed = len;
14d836e7 258
7659150c 259 buf = host->sg_miter.addr;
d129bceb 260
7659150c
PO
261 while (len) {
262 if (chunk == 0) {
4e4141a5 263 scratch = sdhci_readl(host, SDHCI_BUFFER);
7659150c 264 chunk = 4;
a406f5a3 265 }
7659150c
PO
266
267 *buf = scratch & 0xFF;
268
269 buf++;
270 scratch >>= 8;
271 chunk--;
272 len--;
d129bceb 273 }
a406f5a3 274 }
7659150c
PO
275
276 sg_miter_stop(&host->sg_miter);
277
278 local_irq_restore(flags);
a406f5a3 279}
d129bceb 280
a406f5a3
PO
281static void sdhci_write_block_pio(struct sdhci_host *host)
282{
7659150c
PO
283 unsigned long flags;
284 size_t blksize, len, chunk;
285 u32 scratch;
286 u8 *buf;
d129bceb 287
a406f5a3
PO
288 DBG("PIO writing\n");
289
290 blksize = host->data->blksz;
7659150c
PO
291 chunk = 0;
292 scratch = 0;
d129bceb 293
7659150c 294 local_irq_save(flags);
d129bceb 295
a406f5a3 296 while (blksize) {
7659150c
PO
297 if (!sg_miter_next(&host->sg_miter))
298 BUG();
a406f5a3 299
7659150c
PO
300 len = min(host->sg_miter.length, blksize);
301
302 blksize -= len;
303 host->sg_miter.consumed = len;
304
305 buf = host->sg_miter.addr;
d129bceb 306
7659150c
PO
307 while (len) {
308 scratch |= (u32)*buf << (chunk * 8);
309
310 buf++;
311 chunk++;
312 len--;
313
314 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
4e4141a5 315 sdhci_writel(host, scratch, SDHCI_BUFFER);
7659150c
PO
316 chunk = 0;
317 scratch = 0;
d129bceb 318 }
d129bceb
PO
319 }
320 }
7659150c
PO
321
322 sg_miter_stop(&host->sg_miter);
323
324 local_irq_restore(flags);
a406f5a3
PO
325}
326
327static void sdhci_transfer_pio(struct sdhci_host *host)
328{
329 u32 mask;
330
331 BUG_ON(!host->data);
332
7659150c 333 if (host->blocks == 0)
a406f5a3
PO
334 return;
335
336 if (host->data->flags & MMC_DATA_READ)
337 mask = SDHCI_DATA_AVAILABLE;
338 else
339 mask = SDHCI_SPACE_AVAILABLE;
340
4a3cba32
PO
341 /*
342 * Some controllers (JMicron JMB38x) mess up the buffer bits
343 * for transfers < 4 bytes. As long as it is just one block,
344 * we can ignore the bits.
345 */
346 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
347 (host->data->blocks == 1))
348 mask = ~0;
349
4e4141a5 350 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
3e3bf207
AV
351 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
352 udelay(100);
353
a406f5a3
PO
354 if (host->data->flags & MMC_DATA_READ)
355 sdhci_read_block_pio(host);
356 else
357 sdhci_write_block_pio(host);
d129bceb 358
7659150c
PO
359 host->blocks--;
360 if (host->blocks == 0)
a406f5a3 361 break;
a406f5a3 362 }
d129bceb 363
a406f5a3 364 DBG("PIO transfer complete.\n");
d129bceb
PO
365}
366
2134a922
PO
367static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
368{
369 local_irq_save(*flags);
370 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
371}
372
373static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
374{
375 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
376 local_irq_restore(*flags);
377}
378
118cd17d
BD
379static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
380{
9e506f35
BD
381 __le32 *dataddr = (__le32 __force *)(desc + 4);
382 __le16 *cmdlen = (__le16 __force *)desc;
118cd17d 383
9e506f35
BD
384 /* SDHCI specification says ADMA descriptors should be 4 byte
385 * aligned, so using 16 or 32bit operations should be safe. */
118cd17d 386
9e506f35
BD
387 cmdlen[0] = cpu_to_le16(cmd);
388 cmdlen[1] = cpu_to_le16(len);
389
390 dataddr[0] = cpu_to_le32(addr);
118cd17d
BD
391}
392
8f1934ce 393static int sdhci_adma_table_pre(struct sdhci_host *host,
2134a922
PO
394 struct mmc_data *data)
395{
396 int direction;
397
398 u8 *desc;
399 u8 *align;
400 dma_addr_t addr;
401 dma_addr_t align_addr;
402 int len, offset;
403
404 struct scatterlist *sg;
405 int i;
406 char *buffer;
407 unsigned long flags;
408
409 /*
410 * The spec does not specify endianness of descriptor table.
411 * We currently guess that it is LE.
412 */
413
414 if (data->flags & MMC_DATA_READ)
415 direction = DMA_FROM_DEVICE;
416 else
417 direction = DMA_TO_DEVICE;
418
419 /*
420 * The ADMA descriptor table is mapped further down as we
421 * need to fill it with data first.
422 */
423
424 host->align_addr = dma_map_single(mmc_dev(host->mmc),
425 host->align_buffer, 128 * 4, direction);
8d8bb39b 426 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
8f1934ce 427 goto fail;
2134a922
PO
428 BUG_ON(host->align_addr & 0x3);
429
430 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
431 data->sg, data->sg_len, direction);
8f1934ce
PO
432 if (host->sg_count == 0)
433 goto unmap_align;
2134a922
PO
434
435 desc = host->adma_desc;
436 align = host->align_buffer;
437
438 align_addr = host->align_addr;
439
440 for_each_sg(data->sg, sg, host->sg_count, i) {
441 addr = sg_dma_address(sg);
442 len = sg_dma_len(sg);
443
444 /*
445 * The SDHCI specification states that ADMA
446 * addresses must be 32-bit aligned. If they
447 * aren't, then we use a bounce buffer for
448 * the (up to three) bytes that screw up the
449 * alignment.
450 */
451 offset = (4 - (addr & 0x3)) & 0x3;
452 if (offset) {
453 if (data->flags & MMC_DATA_WRITE) {
454 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 455 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
456 memcpy(align, buffer, offset);
457 sdhci_kunmap_atomic(buffer, &flags);
458 }
459
118cd17d
BD
460 /* tran, valid */
461 sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
2134a922
PO
462
463 BUG_ON(offset > 65536);
464
2134a922
PO
465 align += 4;
466 align_addr += 4;
467
468 desc += 8;
469
470 addr += offset;
471 len -= offset;
472 }
473
2134a922
PO
474 BUG_ON(len > 65536);
475
118cd17d
BD
476 /* tran, valid */
477 sdhci_set_adma_desc(desc, addr, len, 0x21);
2134a922
PO
478 desc += 8;
479
480 /*
481 * If this triggers then we have a calculation bug
482 * somewhere. :/
483 */
484 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
485 }
486
487 /*
488 * Add a terminating entry.
489 */
2134a922 490
118cd17d
BD
491 /* nop, end, valid */
492 sdhci_set_adma_desc(desc, 0, 0, 0x3);
2134a922
PO
493
494 /*
495 * Resync align buffer as we might have changed it.
496 */
497 if (data->flags & MMC_DATA_WRITE) {
498 dma_sync_single_for_device(mmc_dev(host->mmc),
499 host->align_addr, 128 * 4, direction);
500 }
501
502 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
503 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
980167b7 504 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
8f1934ce 505 goto unmap_entries;
2134a922 506 BUG_ON(host->adma_addr & 0x3);
8f1934ce
PO
507
508 return 0;
509
510unmap_entries:
511 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
512 data->sg_len, direction);
513unmap_align:
514 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
515 128 * 4, direction);
516fail:
517 return -EINVAL;
2134a922
PO
518}
519
520static void sdhci_adma_table_post(struct sdhci_host *host,
521 struct mmc_data *data)
522{
523 int direction;
524
525 struct scatterlist *sg;
526 int i, size;
527 u8 *align;
528 char *buffer;
529 unsigned long flags;
530
531 if (data->flags & MMC_DATA_READ)
532 direction = DMA_FROM_DEVICE;
533 else
534 direction = DMA_TO_DEVICE;
535
536 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
537 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
538
539 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
540 128 * 4, direction);
541
542 if (data->flags & MMC_DATA_READ) {
543 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
544 data->sg_len, direction);
545
546 align = host->align_buffer;
547
548 for_each_sg(data->sg, sg, host->sg_count, i) {
549 if (sg_dma_address(sg) & 0x3) {
550 size = 4 - (sg_dma_address(sg) & 0x3);
551
552 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 553 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
554 memcpy(buffer, align, size);
555 sdhci_kunmap_atomic(buffer, &flags);
556
557 align += 4;
558 }
559 }
560 }
561
562 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
563 data->sg_len, direction);
564}
565
ee53ab5d 566static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
d129bceb 567{
1c8cde92
PO
568 u8 count;
569 unsigned target_timeout, current_timeout;
d129bceb 570
ee53ab5d
PO
571 /*
572 * If the host controller provides us with an incorrect timeout
573 * value, just skip the check and use 0xE. The hardware may take
574 * longer to time out, but that's much better than having a too-short
575 * timeout value.
576 */
11a2f1b7 577 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
ee53ab5d 578 return 0xE;
e538fbe8 579
1c8cde92
PO
580 /* timeout in us */
581 target_timeout = data->timeout_ns / 1000 +
582 data->timeout_clks / host->clock;
d129bceb 583
81b39802
AV
584 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
585 host->timeout_clk = host->clock / 1000;
586
1c8cde92
PO
587 /*
588 * Figure out needed cycles.
589 * We do this in steps in order to fit inside a 32 bit int.
590 * The first step is the minimum timeout, which will have a
591 * minimum resolution of 6 bits:
592 * (1) 2^13*1000 > 2^22,
593 * (2) host->timeout_clk < 2^16
594 * =>
595 * (1) / (2) > 2^6
596 */
597 count = 0;
598 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
599 while (current_timeout < target_timeout) {
600 count++;
601 current_timeout <<= 1;
602 if (count >= 0xF)
603 break;
604 }
605
606 if (count >= 0xF) {
607 printk(KERN_WARNING "%s: Too large timeout requested!\n",
608 mmc_hostname(host->mmc));
609 count = 0xE;
610 }
611
ee53ab5d
PO
612 return count;
613}
614
6aa943ab
AV
615static void sdhci_set_transfer_irqs(struct sdhci_host *host)
616{
617 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
618 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
619
620 if (host->flags & SDHCI_REQ_USE_DMA)
621 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
622 else
623 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
624}
625
ee53ab5d
PO
626static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
627{
628 u8 count;
2134a922 629 u8 ctrl;
8f1934ce 630 int ret;
ee53ab5d
PO
631
632 WARN_ON(host->data);
633
634 if (data == NULL)
635 return;
636
637 /* Sanity checks */
638 BUG_ON(data->blksz * data->blocks > 524288);
639 BUG_ON(data->blksz > host->mmc->max_blk_size);
640 BUG_ON(data->blocks > 65535);
641
642 host->data = data;
643 host->data_early = 0;
644
645 count = sdhci_calc_timeout(host, data);
4e4141a5 646 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
d129bceb 647
a13abc7b 648 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
c9fddbc4
PO
649 host->flags |= SDHCI_REQ_USE_DMA;
650
2134a922
PO
651 /*
652 * FIXME: This doesn't account for merging when mapping the
653 * scatterlist.
654 */
655 if (host->flags & SDHCI_REQ_USE_DMA) {
656 int broken, i;
657 struct scatterlist *sg;
658
659 broken = 0;
660 if (host->flags & SDHCI_USE_ADMA) {
661 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
662 broken = 1;
663 } else {
664 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
665 broken = 1;
666 }
667
668 if (unlikely(broken)) {
669 for_each_sg(data->sg, sg, data->sg_len, i) {
670 if (sg->length & 0x3) {
671 DBG("Reverting to PIO because of "
672 "transfer size (%d)\n",
673 sg->length);
674 host->flags &= ~SDHCI_REQ_USE_DMA;
675 break;
676 }
677 }
678 }
c9fddbc4
PO
679 }
680
681 /*
682 * The assumption here being that alignment is the same after
683 * translation to device address space.
684 */
2134a922
PO
685 if (host->flags & SDHCI_REQ_USE_DMA) {
686 int broken, i;
687 struct scatterlist *sg;
688
689 broken = 0;
690 if (host->flags & SDHCI_USE_ADMA) {
691 /*
692 * As we use 3 byte chunks to work around
693 * alignment problems, we need to check this
694 * quirk.
695 */
696 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
697 broken = 1;
698 } else {
699 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
700 broken = 1;
701 }
702
703 if (unlikely(broken)) {
704 for_each_sg(data->sg, sg, data->sg_len, i) {
705 if (sg->offset & 0x3) {
706 DBG("Reverting to PIO because of "
707 "bad alignment\n");
708 host->flags &= ~SDHCI_REQ_USE_DMA;
709 break;
710 }
711 }
712 }
713 }
714
8f1934ce
PO
715 if (host->flags & SDHCI_REQ_USE_DMA) {
716 if (host->flags & SDHCI_USE_ADMA) {
717 ret = sdhci_adma_table_pre(host, data);
718 if (ret) {
719 /*
720 * This only happens when someone fed
721 * us an invalid request.
722 */
723 WARN_ON(1);
ebd6d357 724 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 725 } else {
4e4141a5
AV
726 sdhci_writel(host, host->adma_addr,
727 SDHCI_ADMA_ADDRESS);
8f1934ce
PO
728 }
729 } else {
c8b3e02e 730 int sg_cnt;
8f1934ce 731
c8b3e02e 732 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
8f1934ce
PO
733 data->sg, data->sg_len,
734 (data->flags & MMC_DATA_READ) ?
735 DMA_FROM_DEVICE :
736 DMA_TO_DEVICE);
c8b3e02e 737 if (sg_cnt == 0) {
8f1934ce
PO
738 /*
739 * This only happens when someone fed
740 * us an invalid request.
741 */
742 WARN_ON(1);
ebd6d357 743 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 744 } else {
719a61b4 745 WARN_ON(sg_cnt != 1);
4e4141a5
AV
746 sdhci_writel(host, sg_dma_address(data->sg),
747 SDHCI_DMA_ADDRESS);
8f1934ce
PO
748 }
749 }
750 }
751
2134a922
PO
752 /*
753 * Always adjust the DMA selection as some controllers
754 * (e.g. JMicron) can't do PIO properly when the selection
755 * is ADMA.
756 */
757 if (host->version >= SDHCI_SPEC_200) {
4e4141a5 758 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2134a922
PO
759 ctrl &= ~SDHCI_CTRL_DMA_MASK;
760 if ((host->flags & SDHCI_REQ_USE_DMA) &&
761 (host->flags & SDHCI_USE_ADMA))
762 ctrl |= SDHCI_CTRL_ADMA32;
763 else
764 ctrl |= SDHCI_CTRL_SDMA;
4e4141a5 765 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
c9fddbc4
PO
766 }
767
8f1934ce 768 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
da60a91d
SAS
769 int flags;
770
771 flags = SG_MITER_ATOMIC;
772 if (host->data->flags & MMC_DATA_READ)
773 flags |= SG_MITER_TO_SG;
774 else
775 flags |= SG_MITER_FROM_SG;
776 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
7659150c 777 host->blocks = data->blocks;
d129bceb 778 }
c7fa9963 779
6aa943ab
AV
780 sdhci_set_transfer_irqs(host);
781
bab76961 782 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
4e4141a5
AV
783 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
784 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
c7fa9963
PO
785}
786
787static void sdhci_set_transfer_mode(struct sdhci_host *host,
788 struct mmc_data *data)
789{
790 u16 mode;
791
c7fa9963
PO
792 if (data == NULL)
793 return;
794
e538fbe8
PO
795 WARN_ON(!host->data);
796
c7fa9963
PO
797 mode = SDHCI_TRNS_BLK_CNT_EN;
798 if (data->blocks > 1)
799 mode |= SDHCI_TRNS_MULTI;
800 if (data->flags & MMC_DATA_READ)
801 mode |= SDHCI_TRNS_READ;
c9fddbc4 802 if (host->flags & SDHCI_REQ_USE_DMA)
c7fa9963
PO
803 mode |= SDHCI_TRNS_DMA;
804
4e4141a5 805 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
d129bceb
PO
806}
807
808static void sdhci_finish_data(struct sdhci_host *host)
809{
810 struct mmc_data *data;
d129bceb
PO
811
812 BUG_ON(!host->data);
813
814 data = host->data;
815 host->data = NULL;
816
c9fddbc4 817 if (host->flags & SDHCI_REQ_USE_DMA) {
2134a922
PO
818 if (host->flags & SDHCI_USE_ADMA)
819 sdhci_adma_table_post(host, data);
820 else {
821 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
822 data->sg_len, (data->flags & MMC_DATA_READ) ?
823 DMA_FROM_DEVICE : DMA_TO_DEVICE);
824 }
d129bceb
PO
825 }
826
827 /*
c9b74c5b
PO
828 * The specification states that the block count register must
829 * be updated, but it does not specify at what point in the
830 * data flow. That makes the register entirely useless to read
831 * back so we have to assume that nothing made it to the card
832 * in the event of an error.
d129bceb 833 */
c9b74c5b
PO
834 if (data->error)
835 data->bytes_xfered = 0;
d129bceb 836 else
c9b74c5b 837 data->bytes_xfered = data->blksz * data->blocks;
d129bceb 838
d129bceb
PO
839 if (data->stop) {
840 /*
841 * The controller needs a reset of internal state machines
842 * upon error conditions.
843 */
17b0429d 844 if (data->error) {
d129bceb
PO
845 sdhci_reset(host, SDHCI_RESET_CMD);
846 sdhci_reset(host, SDHCI_RESET_DATA);
847 }
848
849 sdhci_send_command(host, data->stop);
850 } else
851 tasklet_schedule(&host->finish_tasklet);
852}
853
854static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
855{
856 int flags;
fd2208d7 857 u32 mask;
7cb2c76f 858 unsigned long timeout;
d129bceb
PO
859
860 WARN_ON(host->cmd);
861
d129bceb 862 /* Wait max 10 ms */
7cb2c76f 863 timeout = 10;
fd2208d7
PO
864
865 mask = SDHCI_CMD_INHIBIT;
866 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
867 mask |= SDHCI_DATA_INHIBIT;
868
869 /* We shouldn't wait for data inihibit for stop commands, even
870 though they might use busy signaling */
871 if (host->mrq->data && (cmd == host->mrq->data->stop))
872 mask &= ~SDHCI_DATA_INHIBIT;
873
4e4141a5 874 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
7cb2c76f 875 if (timeout == 0) {
d129bceb 876 printk(KERN_ERR "%s: Controller never released "
acf1da45 877 "inhibit bit(s).\n", mmc_hostname(host->mmc));
d129bceb 878 sdhci_dumpregs(host);
17b0429d 879 cmd->error = -EIO;
d129bceb
PO
880 tasklet_schedule(&host->finish_tasklet);
881 return;
882 }
7cb2c76f
PO
883 timeout--;
884 mdelay(1);
885 }
d129bceb
PO
886
887 mod_timer(&host->timer, jiffies + 10 * HZ);
888
889 host->cmd = cmd;
890
891 sdhci_prepare_data(host, cmd->data);
892
4e4141a5 893 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
d129bceb 894
c7fa9963
PO
895 sdhci_set_transfer_mode(host, cmd->data);
896
d129bceb 897 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
acf1da45 898 printk(KERN_ERR "%s: Unsupported response type!\n",
d129bceb 899 mmc_hostname(host->mmc));
17b0429d 900 cmd->error = -EINVAL;
d129bceb
PO
901 tasklet_schedule(&host->finish_tasklet);
902 return;
903 }
904
905 if (!(cmd->flags & MMC_RSP_PRESENT))
906 flags = SDHCI_CMD_RESP_NONE;
907 else if (cmd->flags & MMC_RSP_136)
908 flags = SDHCI_CMD_RESP_LONG;
909 else if (cmd->flags & MMC_RSP_BUSY)
910 flags = SDHCI_CMD_RESP_SHORT_BUSY;
911 else
912 flags = SDHCI_CMD_RESP_SHORT;
913
914 if (cmd->flags & MMC_RSP_CRC)
915 flags |= SDHCI_CMD_CRC;
916 if (cmd->flags & MMC_RSP_OPCODE)
917 flags |= SDHCI_CMD_INDEX;
918 if (cmd->data)
919 flags |= SDHCI_CMD_DATA;
920
4e4141a5 921 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
d129bceb
PO
922}
923
924static void sdhci_finish_command(struct sdhci_host *host)
925{
926 int i;
927
928 BUG_ON(host->cmd == NULL);
929
930 if (host->cmd->flags & MMC_RSP_PRESENT) {
931 if (host->cmd->flags & MMC_RSP_136) {
932 /* CRC is stripped so we need to do some shifting. */
933 for (i = 0;i < 4;i++) {
4e4141a5 934 host->cmd->resp[i] = sdhci_readl(host,
d129bceb
PO
935 SDHCI_RESPONSE + (3-i)*4) << 8;
936 if (i != 3)
937 host->cmd->resp[i] |=
4e4141a5 938 sdhci_readb(host,
d129bceb
PO
939 SDHCI_RESPONSE + (3-i)*4-1);
940 }
941 } else {
4e4141a5 942 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
d129bceb
PO
943 }
944 }
945
17b0429d 946 host->cmd->error = 0;
d129bceb 947
e538fbe8
PO
948 if (host->data && host->data_early)
949 sdhci_finish_data(host);
950
951 if (!host->cmd->data)
d129bceb
PO
952 tasklet_schedule(&host->finish_tasklet);
953
954 host->cmd = NULL;
955}
956
957static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
958{
959 int div;
960 u16 clk;
7cb2c76f 961 unsigned long timeout;
d129bceb
PO
962
963 if (clock == host->clock)
964 return;
965
8114634c
AV
966 if (host->ops->set_clock) {
967 host->ops->set_clock(host, clock);
968 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
969 return;
970 }
971
4e4141a5 972 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
d129bceb
PO
973
974 if (clock == 0)
975 goto out;
976
977 for (div = 1;div < 256;div *= 2) {
978 if ((host->max_clk / div) <= clock)
979 break;
980 }
981 div >>= 1;
982
983 clk = div << SDHCI_DIVIDER_SHIFT;
984 clk |= SDHCI_CLOCK_INT_EN;
4e4141a5 985 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb 986
27f6cb16
CB
987 /* Wait max 20 ms */
988 timeout = 20;
4e4141a5 989 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
7cb2c76f
PO
990 & SDHCI_CLOCK_INT_STABLE)) {
991 if (timeout == 0) {
acf1da45
PO
992 printk(KERN_ERR "%s: Internal clock never "
993 "stabilised.\n", mmc_hostname(host->mmc));
d129bceb
PO
994 sdhci_dumpregs(host);
995 return;
996 }
7cb2c76f
PO
997 timeout--;
998 mdelay(1);
999 }
d129bceb
PO
1000
1001 clk |= SDHCI_CLOCK_CARD_EN;
4e4141a5 1002 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb
PO
1003
1004out:
1005 host->clock = clock;
1006}
1007
146ad66e
PO
1008static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1009{
1010 u8 pwr;
1011
ae628903
PO
1012 if (power == (unsigned short)-1)
1013 pwr = 0;
1014 else {
1015 switch (1 << power) {
1016 case MMC_VDD_165_195:
1017 pwr = SDHCI_POWER_180;
1018 break;
1019 case MMC_VDD_29_30:
1020 case MMC_VDD_30_31:
1021 pwr = SDHCI_POWER_300;
1022 break;
1023 case MMC_VDD_32_33:
1024 case MMC_VDD_33_34:
1025 pwr = SDHCI_POWER_330;
1026 break;
1027 default:
1028 BUG();
1029 }
1030 }
1031
1032 if (host->pwr == pwr)
146ad66e
PO
1033 return;
1034
ae628903
PO
1035 host->pwr = pwr;
1036
1037 if (pwr == 0) {
4e4141a5 1038 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
ae628903 1039 return;
9e9dc5f2
DS
1040 }
1041
1042 /*
1043 * Spec says that we should clear the power reg before setting
1044 * a new value. Some controllers don't seem to like this though.
1045 */
b8c86fc5 1046 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
4e4141a5 1047 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
146ad66e 1048
e08c1694 1049 /*
c71f6512 1050 * At least the Marvell CaFe chip gets confused if we set the voltage
e08c1694
AS
1051 * and set turn on power at the same time, so set the voltage first.
1052 */
11a2f1b7 1053 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
ae628903 1054 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
e08c1694 1055
ae628903 1056 pwr |= SDHCI_POWER_ON;
146ad66e 1057
ae628903 1058 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
557b0697
HW
1059
1060 /*
1061 * Some controllers need an extra 10ms delay of 10ms before they
1062 * can apply clock after applying power
1063 */
11a2f1b7 1064 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
557b0697 1065 mdelay(10);
146ad66e
PO
1066}
1067
d129bceb
PO
1068/*****************************************************************************\
1069 * *
1070 * MMC callbacks *
1071 * *
1072\*****************************************************************************/
1073
1074static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1075{
1076 struct sdhci_host *host;
68d1fb7e 1077 bool present;
d129bceb
PO
1078 unsigned long flags;
1079
1080 host = mmc_priv(mmc);
1081
1082 spin_lock_irqsave(&host->lock, flags);
1083
1084 WARN_ON(host->mrq != NULL);
1085
f9134319 1086#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1087 sdhci_activate_led(host);
2f730fec 1088#endif
d129bceb
PO
1089
1090 host->mrq = mrq;
1091
68d1fb7e
AV
1092 /* If polling, assume that the card is always present. */
1093 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1094 present = true;
1095 else
1096 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1097 SDHCI_CARD_PRESENT;
1098
1099 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
17b0429d 1100 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1101 tasklet_schedule(&host->finish_tasklet);
1102 } else
1103 sdhci_send_command(host, mrq->cmd);
1104
5f25a66f 1105 mmiowb();
d129bceb
PO
1106 spin_unlock_irqrestore(&host->lock, flags);
1107}
1108
1109static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1110{
1111 struct sdhci_host *host;
1112 unsigned long flags;
1113 u8 ctrl;
1114
1115 host = mmc_priv(mmc);
1116
1117 spin_lock_irqsave(&host->lock, flags);
1118
1e72859e
PO
1119 if (host->flags & SDHCI_DEVICE_DEAD)
1120 goto out;
1121
d129bceb
PO
1122 /*
1123 * Reset the chip on each power off.
1124 * Should clear out any weird states.
1125 */
1126 if (ios->power_mode == MMC_POWER_OFF) {
4e4141a5 1127 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
7260cf5e 1128 sdhci_reinit(host);
d129bceb
PO
1129 }
1130
1131 sdhci_set_clock(host, ios->clock);
1132
1133 if (ios->power_mode == MMC_POWER_OFF)
146ad66e 1134 sdhci_set_power(host, -1);
d129bceb 1135 else
146ad66e 1136 sdhci_set_power(host, ios->vdd);
d129bceb 1137
4e4141a5 1138 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
cd9277c0 1139
d129bceb
PO
1140 if (ios->bus_width == MMC_BUS_WIDTH_4)
1141 ctrl |= SDHCI_CTRL_4BITBUS;
1142 else
1143 ctrl &= ~SDHCI_CTRL_4BITBUS;
cd9277c0
PO
1144
1145 if (ios->timing == MMC_TIMING_SD_HS)
1146 ctrl |= SDHCI_CTRL_HISPD;
1147 else
1148 ctrl &= ~SDHCI_CTRL_HISPD;
1149
4e4141a5 1150 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb 1151
b8352260
LD
1152 /*
1153 * Some (ENE) controllers go apeshit on some ios operation,
1154 * signalling timeout and CRC errors even on CMD0. Resetting
1155 * it on each ios seems to solve the problem.
1156 */
b8c86fc5 1157 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
b8352260
LD
1158 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1159
1e72859e 1160out:
5f25a66f 1161 mmiowb();
d129bceb
PO
1162 spin_unlock_irqrestore(&host->lock, flags);
1163}
1164
1165static int sdhci_get_ro(struct mmc_host *mmc)
1166{
1167 struct sdhci_host *host;
1168 unsigned long flags;
1169 int present;
1170
1171 host = mmc_priv(mmc);
1172
1173 spin_lock_irqsave(&host->lock, flags);
1174
1e72859e
PO
1175 if (host->flags & SDHCI_DEVICE_DEAD)
1176 present = 0;
1177 else
4e4141a5 1178 present = sdhci_readl(host, SDHCI_PRESENT_STATE);
d129bceb
PO
1179
1180 spin_unlock_irqrestore(&host->lock, flags);
1181
c5075a10
AV
1182 if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
1183 return !!(present & SDHCI_WRITE_PROTECT);
d129bceb
PO
1184 return !(present & SDHCI_WRITE_PROTECT);
1185}
1186
f75979b7
PO
1187static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1188{
1189 struct sdhci_host *host;
1190 unsigned long flags;
f75979b7
PO
1191
1192 host = mmc_priv(mmc);
1193
1194 spin_lock_irqsave(&host->lock, flags);
1195
1e72859e
PO
1196 if (host->flags & SDHCI_DEVICE_DEAD)
1197 goto out;
1198
f75979b7 1199 if (enable)
7260cf5e
AV
1200 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1201 else
1202 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1e72859e 1203out:
f75979b7
PO
1204 mmiowb();
1205
1206 spin_unlock_irqrestore(&host->lock, flags);
1207}
1208
ab7aefd0 1209static const struct mmc_host_ops sdhci_ops = {
d129bceb
PO
1210 .request = sdhci_request,
1211 .set_ios = sdhci_set_ios,
1212 .get_ro = sdhci_get_ro,
f75979b7 1213 .enable_sdio_irq = sdhci_enable_sdio_irq,
d129bceb
PO
1214};
1215
1216/*****************************************************************************\
1217 * *
1218 * Tasklets *
1219 * *
1220\*****************************************************************************/
1221
1222static void sdhci_tasklet_card(unsigned long param)
1223{
1224 struct sdhci_host *host;
1225 unsigned long flags;
1226
1227 host = (struct sdhci_host*)param;
1228
1229 spin_lock_irqsave(&host->lock, flags);
1230
4e4141a5 1231 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
d129bceb
PO
1232 if (host->mrq) {
1233 printk(KERN_ERR "%s: Card removed during transfer!\n",
1234 mmc_hostname(host->mmc));
1235 printk(KERN_ERR "%s: Resetting controller.\n",
1236 mmc_hostname(host->mmc));
1237
1238 sdhci_reset(host, SDHCI_RESET_CMD);
1239 sdhci_reset(host, SDHCI_RESET_DATA);
1240
17b0429d 1241 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1242 tasklet_schedule(&host->finish_tasklet);
1243 }
1244 }
1245
1246 spin_unlock_irqrestore(&host->lock, flags);
1247
04cf585d 1248 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
d129bceb
PO
1249}
1250
1251static void sdhci_tasklet_finish(unsigned long param)
1252{
1253 struct sdhci_host *host;
1254 unsigned long flags;
1255 struct mmc_request *mrq;
1256
1257 host = (struct sdhci_host*)param;
1258
1259 spin_lock_irqsave(&host->lock, flags);
1260
1261 del_timer(&host->timer);
1262
1263 mrq = host->mrq;
1264
d129bceb
PO
1265 /*
1266 * The controller needs a reset of internal state machines
1267 * upon error conditions.
1268 */
1e72859e
PO
1269 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1270 (mrq->cmd->error ||
1271 (mrq->data && (mrq->data->error ||
1272 (mrq->data->stop && mrq->data->stop->error))) ||
1273 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
645289dc
PO
1274
1275 /* Some controllers need this kick or reset won't work here */
b8c86fc5 1276 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
645289dc
PO
1277 unsigned int clock;
1278
1279 /* This is to force an update */
1280 clock = host->clock;
1281 host->clock = 0;
1282 sdhci_set_clock(host, clock);
1283 }
1284
1285 /* Spec says we should do both at the same time, but Ricoh
1286 controllers do not like that. */
d129bceb
PO
1287 sdhci_reset(host, SDHCI_RESET_CMD);
1288 sdhci_reset(host, SDHCI_RESET_DATA);
1289 }
1290
1291 host->mrq = NULL;
1292 host->cmd = NULL;
1293 host->data = NULL;
1294
f9134319 1295#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1296 sdhci_deactivate_led(host);
2f730fec 1297#endif
d129bceb 1298
5f25a66f 1299 mmiowb();
d129bceb
PO
1300 spin_unlock_irqrestore(&host->lock, flags);
1301
1302 mmc_request_done(host->mmc, mrq);
1303}
1304
1305static void sdhci_timeout_timer(unsigned long data)
1306{
1307 struct sdhci_host *host;
1308 unsigned long flags;
1309
1310 host = (struct sdhci_host*)data;
1311
1312 spin_lock_irqsave(&host->lock, flags);
1313
1314 if (host->mrq) {
acf1da45
PO
1315 printk(KERN_ERR "%s: Timeout waiting for hardware "
1316 "interrupt.\n", mmc_hostname(host->mmc));
d129bceb
PO
1317 sdhci_dumpregs(host);
1318
1319 if (host->data) {
17b0429d 1320 host->data->error = -ETIMEDOUT;
d129bceb
PO
1321 sdhci_finish_data(host);
1322 } else {
1323 if (host->cmd)
17b0429d 1324 host->cmd->error = -ETIMEDOUT;
d129bceb 1325 else
17b0429d 1326 host->mrq->cmd->error = -ETIMEDOUT;
d129bceb
PO
1327
1328 tasklet_schedule(&host->finish_tasklet);
1329 }
1330 }
1331
5f25a66f 1332 mmiowb();
d129bceb
PO
1333 spin_unlock_irqrestore(&host->lock, flags);
1334}
1335
1336/*****************************************************************************\
1337 * *
1338 * Interrupt handling *
1339 * *
1340\*****************************************************************************/
1341
1342static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1343{
1344 BUG_ON(intmask == 0);
1345
1346 if (!host->cmd) {
b67ac3f3
PO
1347 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1348 "though no command operation was in progress.\n",
1349 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1350 sdhci_dumpregs(host);
1351 return;
1352 }
1353
43b58b36 1354 if (intmask & SDHCI_INT_TIMEOUT)
17b0429d
PO
1355 host->cmd->error = -ETIMEDOUT;
1356 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1357 SDHCI_INT_INDEX))
1358 host->cmd->error = -EILSEQ;
43b58b36 1359
e809517f 1360 if (host->cmd->error) {
d129bceb 1361 tasklet_schedule(&host->finish_tasklet);
e809517f
PO
1362 return;
1363 }
1364
1365 /*
1366 * The host can send and interrupt when the busy state has
1367 * ended, allowing us to wait without wasting CPU cycles.
1368 * Unfortunately this is overloaded on the "data complete"
1369 * interrupt, so we need to take some care when handling
1370 * it.
1371 *
1372 * Note: The 1.0 specification is a bit ambiguous about this
1373 * feature so there might be some problems with older
1374 * controllers.
1375 */
1376 if (host->cmd->flags & MMC_RSP_BUSY) {
1377 if (host->cmd->data)
1378 DBG("Cannot wait for busy signal when also "
1379 "doing a data transfer");
f945405c 1380 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
e809517f 1381 return;
f945405c
BD
1382
1383 /* The controller does not support the end-of-busy IRQ,
1384 * fall through and take the SDHCI_INT_RESPONSE */
e809517f
PO
1385 }
1386
1387 if (intmask & SDHCI_INT_RESPONSE)
43b58b36 1388 sdhci_finish_command(host);
d129bceb
PO
1389}
1390
6882a8c0
BD
1391#ifdef DEBUG
1392static void sdhci_show_adma_error(struct sdhci_host *host)
1393{
1394 const char *name = mmc_hostname(host->mmc);
1395 u8 *desc = host->adma_desc;
1396 __le32 *dma;
1397 __le16 *len;
1398 u8 attr;
1399
1400 sdhci_dumpregs(host);
1401
1402 while (true) {
1403 dma = (__le32 *)(desc + 4);
1404 len = (__le16 *)(desc + 2);
1405 attr = *desc;
1406
1407 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1408 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1409
1410 desc += 8;
1411
1412 if (attr & 2)
1413 break;
1414 }
1415}
1416#else
1417static void sdhci_show_adma_error(struct sdhci_host *host) { }
1418#endif
1419
d129bceb
PO
1420static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1421{
1422 BUG_ON(intmask == 0);
1423
1424 if (!host->data) {
1425 /*
e809517f
PO
1426 * The "data complete" interrupt is also used to
1427 * indicate that a busy state has ended. See comment
1428 * above in sdhci_cmd_irq().
d129bceb 1429 */
e809517f
PO
1430 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1431 if (intmask & SDHCI_INT_DATA_END) {
1432 sdhci_finish_command(host);
1433 return;
1434 }
1435 }
d129bceb 1436
b67ac3f3
PO
1437 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1438 "though no data operation was in progress.\n",
1439 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1440 sdhci_dumpregs(host);
1441
1442 return;
1443 }
1444
1445 if (intmask & SDHCI_INT_DATA_TIMEOUT)
17b0429d
PO
1446 host->data->error = -ETIMEDOUT;
1447 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1448 host->data->error = -EILSEQ;
6882a8c0
BD
1449 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1450 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1451 sdhci_show_adma_error(host);
2134a922 1452 host->data->error = -EIO;
6882a8c0 1453 }
d129bceb 1454
17b0429d 1455 if (host->data->error)
d129bceb
PO
1456 sdhci_finish_data(host);
1457 else {
a406f5a3 1458 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
d129bceb
PO
1459 sdhci_transfer_pio(host);
1460
6ba736a1
PO
1461 /*
1462 * We currently don't do anything fancy with DMA
1463 * boundaries, but as we can't disable the feature
1464 * we need to at least restart the transfer.
1465 */
1466 if (intmask & SDHCI_INT_DMA_END)
4e4141a5
AV
1467 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1468 SDHCI_DMA_ADDRESS);
6ba736a1 1469
e538fbe8
PO
1470 if (intmask & SDHCI_INT_DATA_END) {
1471 if (host->cmd) {
1472 /*
1473 * Data managed to finish before the
1474 * command completed. Make sure we do
1475 * things in the proper order.
1476 */
1477 host->data_early = 1;
1478 } else {
1479 sdhci_finish_data(host);
1480 }
1481 }
d129bceb
PO
1482 }
1483}
1484
7d12e780 1485static irqreturn_t sdhci_irq(int irq, void *dev_id)
d129bceb
PO
1486{
1487 irqreturn_t result;
1488 struct sdhci_host* host = dev_id;
1489 u32 intmask;
f75979b7 1490 int cardint = 0;
d129bceb
PO
1491
1492 spin_lock(&host->lock);
1493
4e4141a5 1494 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
d129bceb 1495
62df67a5 1496 if (!intmask || intmask == 0xffffffff) {
d129bceb
PO
1497 result = IRQ_NONE;
1498 goto out;
1499 }
1500
b69c9058
PO
1501 DBG("*** %s got interrupt: 0x%08x\n",
1502 mmc_hostname(host->mmc), intmask);
d129bceb 1503
3192a28f 1504 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
4e4141a5
AV
1505 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1506 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
d129bceb 1507 tasklet_schedule(&host->card_tasklet);
3192a28f 1508 }
d129bceb 1509
3192a28f 1510 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
d129bceb 1511
3192a28f 1512 if (intmask & SDHCI_INT_CMD_MASK) {
4e4141a5
AV
1513 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1514 SDHCI_INT_STATUS);
3192a28f 1515 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
d129bceb
PO
1516 }
1517
1518 if (intmask & SDHCI_INT_DATA_MASK) {
4e4141a5
AV
1519 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1520 SDHCI_INT_STATUS);
3192a28f 1521 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
d129bceb
PO
1522 }
1523
1524 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1525
964f9ce2
PO
1526 intmask &= ~SDHCI_INT_ERROR;
1527
d129bceb 1528 if (intmask & SDHCI_INT_BUS_POWER) {
3192a28f 1529 printk(KERN_ERR "%s: Card is consuming too much power!\n",
d129bceb 1530 mmc_hostname(host->mmc));
4e4141a5 1531 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
d129bceb
PO
1532 }
1533
9d26a5d3 1534 intmask &= ~SDHCI_INT_BUS_POWER;
3192a28f 1535
f75979b7
PO
1536 if (intmask & SDHCI_INT_CARD_INT)
1537 cardint = 1;
1538
1539 intmask &= ~SDHCI_INT_CARD_INT;
1540
3192a28f 1541 if (intmask) {
acf1da45 1542 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
3192a28f 1543 mmc_hostname(host->mmc), intmask);
d129bceb
PO
1544 sdhci_dumpregs(host);
1545
4e4141a5 1546 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3192a28f 1547 }
d129bceb
PO
1548
1549 result = IRQ_HANDLED;
1550
5f25a66f 1551 mmiowb();
d129bceb
PO
1552out:
1553 spin_unlock(&host->lock);
1554
f75979b7
PO
1555 /*
1556 * We have to delay this as it calls back into the driver.
1557 */
1558 if (cardint)
1559 mmc_signal_sdio_irq(host->mmc);
1560
d129bceb
PO
1561 return result;
1562}
1563
1564/*****************************************************************************\
1565 * *
1566 * Suspend/resume *
1567 * *
1568\*****************************************************************************/
1569
1570#ifdef CONFIG_PM
1571
b8c86fc5 1572int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
d129bceb 1573{
b8c86fc5 1574 int ret;
a715dfc7 1575
7260cf5e
AV
1576 sdhci_disable_card_detection(host);
1577
b8c86fc5
PO
1578 ret = mmc_suspend_host(host->mmc, state);
1579 if (ret)
1580 return ret;
a715dfc7 1581
b8c86fc5 1582 free_irq(host->irq, host);
d129bceb
PO
1583
1584 return 0;
1585}
1586
b8c86fc5 1587EXPORT_SYMBOL_GPL(sdhci_suspend_host);
d129bceb 1588
b8c86fc5
PO
1589int sdhci_resume_host(struct sdhci_host *host)
1590{
1591 int ret;
d129bceb 1592
a13abc7b 1593 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
b8c86fc5
PO
1594 if (host->ops->enable_dma)
1595 host->ops->enable_dma(host);
1596 }
d129bceb 1597
b8c86fc5
PO
1598 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1599 mmc_hostname(host->mmc), host);
df1c4b7b
PO
1600 if (ret)
1601 return ret;
d129bceb 1602
b8c86fc5
PO
1603 sdhci_init(host);
1604 mmiowb();
1605
1606 ret = mmc_resume_host(host->mmc);
1607 if (ret)
1608 return ret;
d129bceb 1609
7260cf5e
AV
1610 sdhci_enable_card_detection(host);
1611
d129bceb
PO
1612 return 0;
1613}
1614
b8c86fc5 1615EXPORT_SYMBOL_GPL(sdhci_resume_host);
d129bceb
PO
1616
1617#endif /* CONFIG_PM */
1618
1619/*****************************************************************************\
1620 * *
b8c86fc5 1621 * Device allocation/registration *
d129bceb
PO
1622 * *
1623\*****************************************************************************/
1624
b8c86fc5
PO
1625struct sdhci_host *sdhci_alloc_host(struct device *dev,
1626 size_t priv_size)
d129bceb 1627{
d129bceb
PO
1628 struct mmc_host *mmc;
1629 struct sdhci_host *host;
1630
b8c86fc5 1631 WARN_ON(dev == NULL);
d129bceb 1632
b8c86fc5 1633 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
d129bceb 1634 if (!mmc)
b8c86fc5 1635 return ERR_PTR(-ENOMEM);
d129bceb
PO
1636
1637 host = mmc_priv(mmc);
1638 host->mmc = mmc;
1639
b8c86fc5
PO
1640 return host;
1641}
8a4da143 1642
b8c86fc5 1643EXPORT_SYMBOL_GPL(sdhci_alloc_host);
d129bceb 1644
b8c86fc5
PO
1645int sdhci_add_host(struct sdhci_host *host)
1646{
1647 struct mmc_host *mmc;
1648 unsigned int caps;
b8c86fc5 1649 int ret;
d129bceb 1650
b8c86fc5
PO
1651 WARN_ON(host == NULL);
1652 if (host == NULL)
1653 return -EINVAL;
d129bceb 1654
b8c86fc5 1655 mmc = host->mmc;
d129bceb 1656
b8c86fc5
PO
1657 if (debug_quirks)
1658 host->quirks = debug_quirks;
d129bceb 1659
d96649ed
PO
1660 sdhci_reset(host, SDHCI_RESET_ALL);
1661
4e4141a5 1662 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2134a922
PO
1663 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1664 >> SDHCI_SPEC_VER_SHIFT;
1665 if (host->version > SDHCI_SPEC_200) {
4a965505 1666 printk(KERN_ERR "%s: Unknown controller version (%d). "
b69c9058 1667 "You may experience problems.\n", mmc_hostname(mmc),
2134a922 1668 host->version);
4a965505
PO
1669 }
1670
4e4141a5 1671 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
d129bceb 1672
b8c86fc5 1673 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
a13abc7b
RR
1674 host->flags |= SDHCI_USE_SDMA;
1675 else if (!(caps & SDHCI_CAN_DO_SDMA))
1676 DBG("Controller doesn't have SDMA capability\n");
67435274 1677 else
a13abc7b 1678 host->flags |= SDHCI_USE_SDMA;
d129bceb 1679
b8c86fc5 1680 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
a13abc7b 1681 (host->flags & SDHCI_USE_SDMA)) {
cee687ce 1682 DBG("Disabling DMA as it is marked broken\n");
a13abc7b 1683 host->flags &= ~SDHCI_USE_SDMA;
7c168e3d
FT
1684 }
1685
a13abc7b
RR
1686 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1687 host->flags |= SDHCI_USE_ADMA;
2134a922
PO
1688
1689 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1690 (host->flags & SDHCI_USE_ADMA)) {
1691 DBG("Disabling ADMA as it is marked broken\n");
1692 host->flags &= ~SDHCI_USE_ADMA;
1693 }
1694
a13abc7b 1695 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
b8c86fc5
PO
1696 if (host->ops->enable_dma) {
1697 if (host->ops->enable_dma(host)) {
1698 printk(KERN_WARNING "%s: No suitable DMA "
1699 "available. Falling back to PIO.\n",
1700 mmc_hostname(mmc));
a13abc7b
RR
1701 host->flags &=
1702 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
b8c86fc5 1703 }
d129bceb
PO
1704 }
1705 }
1706
2134a922
PO
1707 if (host->flags & SDHCI_USE_ADMA) {
1708 /*
1709 * We need to allocate descriptors for all sg entries
1710 * (128) and potentially one alignment transfer for
1711 * each of those entries.
1712 */
1713 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1714 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1715 if (!host->adma_desc || !host->align_buffer) {
1716 kfree(host->adma_desc);
1717 kfree(host->align_buffer);
1718 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1719 "buffers. Falling back to standard DMA.\n",
1720 mmc_hostname(mmc));
1721 host->flags &= ~SDHCI_USE_ADMA;
1722 }
1723 }
1724
7659150c
PO
1725 /*
1726 * If we use DMA, then it's up to the caller to set the DMA
1727 * mask, but PIO does not need the hw shim so we set a new
1728 * mask here in that case.
1729 */
a13abc7b 1730 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
7659150c
PO
1731 host->dma_mask = DMA_BIT_MASK(64);
1732 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1733 }
d129bceb 1734
8ef1a143
PO
1735 host->max_clk =
1736 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
4240ff0a 1737 host->max_clk *= 1000000;
8ef1a143 1738 if (host->max_clk == 0) {
4240ff0a
BD
1739 if (!host->ops->get_max_clock) {
1740 printk(KERN_ERR
1741 "%s: Hardware doesn't specify base clock "
1742 "frequency.\n", mmc_hostname(mmc));
1743 return -ENODEV;
1744 }
1745 host->max_clk = host->ops->get_max_clock(host);
8ef1a143 1746 }
d129bceb 1747
1c8cde92
PO
1748 host->timeout_clk =
1749 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1750 if (host->timeout_clk == 0) {
81b39802
AV
1751 if (host->ops->get_timeout_clock) {
1752 host->timeout_clk = host->ops->get_timeout_clock(host);
1753 } else if (!(host->quirks &
1754 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4240ff0a
BD
1755 printk(KERN_ERR
1756 "%s: Hardware doesn't specify timeout clock "
1757 "frequency.\n", mmc_hostname(mmc));
1758 return -ENODEV;
1759 }
1c8cde92
PO
1760 }
1761 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1762 host->timeout_clk *= 1000;
d129bceb
PO
1763
1764 /*
1765 * Set host parameters.
1766 */
1767 mmc->ops = &sdhci_ops;
e9510176
AV
1768 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
1769 host->ops->set_clock && host->ops->get_min_clock)
a9e58f25
AV
1770 mmc->f_min = host->ops->get_min_clock(host);
1771 else
1772 mmc->f_min = host->max_clk / 256;
d129bceb 1773 mmc->f_max = host->max_clk;
5fe23c7f
AV
1774 mmc->caps = MMC_CAP_SDIO_IRQ;
1775
1776 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1777 mmc->caps |= MMC_CAP_4_BIT_DATA;
d129bceb 1778
86a6a874 1779 if (caps & SDHCI_CAN_DO_HISPD)
cd9277c0
PO
1780 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1781
68d1fb7e
AV
1782 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1783 mmc->caps |= MMC_CAP_NEEDS_POLL;
1784
146ad66e
PO
1785 mmc->ocr_avail = 0;
1786 if (caps & SDHCI_CAN_VDD_330)
1787 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
c70840e8 1788 if (caps & SDHCI_CAN_VDD_300)
146ad66e 1789 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
c70840e8 1790 if (caps & SDHCI_CAN_VDD_180)
55556da0 1791 mmc->ocr_avail |= MMC_VDD_165_195;
146ad66e
PO
1792
1793 if (mmc->ocr_avail == 0) {
1794 printk(KERN_ERR "%s: Hardware doesn't report any "
b69c9058 1795 "support voltages.\n", mmc_hostname(mmc));
b8c86fc5 1796 return -ENODEV;
146ad66e
PO
1797 }
1798
d129bceb
PO
1799 spin_lock_init(&host->lock);
1800
1801 /*
2134a922
PO
1802 * Maximum number of segments. Depends on if the hardware
1803 * can do scatter/gather or not.
d129bceb 1804 */
2134a922
PO
1805 if (host->flags & SDHCI_USE_ADMA)
1806 mmc->max_hw_segs = 128;
a13abc7b 1807 else if (host->flags & SDHCI_USE_SDMA)
d129bceb 1808 mmc->max_hw_segs = 1;
2134a922
PO
1809 else /* PIO */
1810 mmc->max_hw_segs = 128;
1811 mmc->max_phys_segs = 128;
d129bceb
PO
1812
1813 /*
bab76961 1814 * Maximum number of sectors in one transfer. Limited by DMA boundary
55db890a 1815 * size (512KiB).
d129bceb 1816 */
55db890a 1817 mmc->max_req_size = 524288;
d129bceb
PO
1818
1819 /*
1820 * Maximum segment size. Could be one segment with the maximum number
2134a922
PO
1821 * of bytes. When doing hardware scatter/gather, each entry cannot
1822 * be larger than 64 KiB though.
d129bceb 1823 */
2134a922
PO
1824 if (host->flags & SDHCI_USE_ADMA)
1825 mmc->max_seg_size = 65536;
1826 else
1827 mmc->max_seg_size = mmc->max_req_size;
d129bceb 1828
fe4a3c7a
PO
1829 /*
1830 * Maximum block size. This varies from controller to controller and
1831 * is specified in the capabilities register.
1832 */
0633f654
AV
1833 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1834 mmc->max_blk_size = 2;
1835 } else {
1836 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
1837 SDHCI_MAX_BLOCK_SHIFT;
1838 if (mmc->max_blk_size >= 3) {
1839 printk(KERN_WARNING "%s: Invalid maximum block size, "
1840 "assuming 512 bytes\n", mmc_hostname(mmc));
1841 mmc->max_blk_size = 0;
1842 }
1843 }
1844
1845 mmc->max_blk_size = 512 << mmc->max_blk_size;
fe4a3c7a 1846
55db890a
PO
1847 /*
1848 * Maximum block count.
1849 */
1388eefd 1850 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
55db890a 1851
d129bceb
PO
1852 /*
1853 * Init tasklets.
1854 */
1855 tasklet_init(&host->card_tasklet,
1856 sdhci_tasklet_card, (unsigned long)host);
1857 tasklet_init(&host->finish_tasklet,
1858 sdhci_tasklet_finish, (unsigned long)host);
1859
e4cad1b5 1860 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
d129bceb 1861
dace1453 1862 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
b69c9058 1863 mmc_hostname(mmc), host);
d129bceb 1864 if (ret)
8ef1a143 1865 goto untasklet;
d129bceb
PO
1866
1867 sdhci_init(host);
1868
1869#ifdef CONFIG_MMC_DEBUG
1870 sdhci_dumpregs(host);
1871#endif
1872
f9134319 1873#ifdef SDHCI_USE_LEDS_CLASS
5dbace0c
HS
1874 snprintf(host->led_name, sizeof(host->led_name),
1875 "%s::", mmc_hostname(mmc));
1876 host->led.name = host->led_name;
2f730fec
PO
1877 host->led.brightness = LED_OFF;
1878 host->led.default_trigger = mmc_hostname(mmc);
1879 host->led.brightness_set = sdhci_led_control;
1880
b8c86fc5 1881 ret = led_classdev_register(mmc_dev(mmc), &host->led);
2f730fec
PO
1882 if (ret)
1883 goto reset;
1884#endif
1885
5f25a66f
PO
1886 mmiowb();
1887
d129bceb
PO
1888 mmc_add_host(mmc);
1889
a13abc7b 1890 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
d1b26863 1891 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
a13abc7b
RR
1892 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
1893 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
d129bceb 1894
7260cf5e
AV
1895 sdhci_enable_card_detection(host);
1896
d129bceb
PO
1897 return 0;
1898
f9134319 1899#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
1900reset:
1901 sdhci_reset(host, SDHCI_RESET_ALL);
1902 free_irq(host->irq, host);
1903#endif
8ef1a143 1904untasklet:
d129bceb
PO
1905 tasklet_kill(&host->card_tasklet);
1906 tasklet_kill(&host->finish_tasklet);
d129bceb
PO
1907
1908 return ret;
1909}
1910
b8c86fc5 1911EXPORT_SYMBOL_GPL(sdhci_add_host);
d129bceb 1912
1e72859e 1913void sdhci_remove_host(struct sdhci_host *host, int dead)
b8c86fc5 1914{
1e72859e
PO
1915 unsigned long flags;
1916
1917 if (dead) {
1918 spin_lock_irqsave(&host->lock, flags);
1919
1920 host->flags |= SDHCI_DEVICE_DEAD;
1921
1922 if (host->mrq) {
1923 printk(KERN_ERR "%s: Controller removed during "
1924 " transfer!\n", mmc_hostname(host->mmc));
1925
1926 host->mrq->cmd->error = -ENOMEDIUM;
1927 tasklet_schedule(&host->finish_tasklet);
1928 }
1929
1930 spin_unlock_irqrestore(&host->lock, flags);
1931 }
1932
7260cf5e
AV
1933 sdhci_disable_card_detection(host);
1934
b8c86fc5 1935 mmc_remove_host(host->mmc);
d129bceb 1936
f9134319 1937#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
1938 led_classdev_unregister(&host->led);
1939#endif
1940
1e72859e
PO
1941 if (!dead)
1942 sdhci_reset(host, SDHCI_RESET_ALL);
d129bceb
PO
1943
1944 free_irq(host->irq, host);
1945
1946 del_timer_sync(&host->timer);
1947
1948 tasklet_kill(&host->card_tasklet);
1949 tasklet_kill(&host->finish_tasklet);
2134a922
PO
1950
1951 kfree(host->adma_desc);
1952 kfree(host->align_buffer);
1953
1954 host->adma_desc = NULL;
1955 host->align_buffer = NULL;
d129bceb
PO
1956}
1957
b8c86fc5 1958EXPORT_SYMBOL_GPL(sdhci_remove_host);
d129bceb 1959
b8c86fc5 1960void sdhci_free_host(struct sdhci_host *host)
d129bceb 1961{
b8c86fc5 1962 mmc_free_host(host->mmc);
d129bceb
PO
1963}
1964
b8c86fc5 1965EXPORT_SYMBOL_GPL(sdhci_free_host);
d129bceb
PO
1966
1967/*****************************************************************************\
1968 * *
1969 * Driver init/exit *
1970 * *
1971\*****************************************************************************/
1972
1973static int __init sdhci_drv_init(void)
1974{
1975 printk(KERN_INFO DRIVER_NAME
52fbf9c9 1976 ": Secure Digital Host Controller Interface driver\n");
d129bceb
PO
1977 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1978
b8c86fc5 1979 return 0;
d129bceb
PO
1980}
1981
1982static void __exit sdhci_drv_exit(void)
1983{
d129bceb
PO
1984}
1985
1986module_init(sdhci_drv_init);
1987module_exit(sdhci_drv_exit);
1988
df673b22 1989module_param(debug_quirks, uint, 0444);
67435274 1990
32710e8f 1991MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
b8c86fc5 1992MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
d129bceb 1993MODULE_LICENSE("GPL");
67435274 1994
df673b22 1995MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");