]>
Commit | Line | Data |
---|---|---|
4a48998f IM |
1 | /* |
2 | * linux/drivers/mmc/tmio_mmc.c | |
3 | * | |
4 | * Copyright (C) 2004 Ian Molton | |
5 | * Copyright (C) 2007 Ian Molton | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Driver for the MMC / SD / SDIO cell found in: | |
12 | * | |
e6f2c7ad | 13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 |
4a48998f IM |
14 | * |
15 | * This driver draws mainly on scattered spec sheets, Reverse engineering | |
16 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | |
17 | * support). (Further 4 bit support from a later datasheet). | |
18 | * | |
19 | * TODO: | |
20 | * Investigate using a workqueue for PIO transfers | |
21 | * Eliminate FIXMEs | |
22 | * SDIO support | |
23 | * Better Power management | |
24 | * Handle MMC errors better | |
25 | * double buffer support | |
26 | * | |
27 | */ | |
28 | #include <linux/module.h> | |
29 | #include <linux/irq.h> | |
30 | #include <linux/device.h> | |
31 | #include <linux/delay.h> | |
311f3ac7 | 32 | #include <linux/dmaengine.h> |
4a48998f IM |
33 | #include <linux/mmc/host.h> |
34 | #include <linux/mfd/core.h> | |
35 | #include <linux/mfd/tmio.h> | |
36 | ||
37 | #include "tmio_mmc.h" | |
38 | ||
4a48998f IM |
39 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) |
40 | { | |
da46a0bd | 41 | u32 clk = 0, clock; |
4a48998f IM |
42 | |
43 | if (new_clock) { | |
da46a0bd IM |
44 | for (clock = host->mmc->f_min, clk = 0x80000080; |
45 | new_clock >= (clock<<1); clk >>= 1) | |
4a48998f | 46 | clock <<= 1; |
4a48998f IM |
47 | clk |= 0x100; |
48 | } | |
49 | ||
64e8867b IM |
50 | if (host->set_clk_div) |
51 | host->set_clk_div(host->pdev, (clk>>22) & 1); | |
52 | ||
da46a0bd | 53 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); |
4a48998f IM |
54 | } |
55 | ||
56 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | |
57 | { | |
5e74672c | 58 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); |
4a48998f | 59 | msleep(10); |
5e74672c PZ |
60 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & |
61 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | |
4a48998f IM |
62 | msleep(10); |
63 | } | |
64 | ||
65 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | |
66 | { | |
5e74672c PZ |
67 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | |
68 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | |
4a48998f | 69 | msleep(10); |
5e74672c | 70 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); |
4a48998f IM |
71 | msleep(10); |
72 | } | |
73 | ||
74 | static void reset(struct tmio_mmc_host *host) | |
75 | { | |
4a48998f | 76 | /* FIXME - should we set stop clock reg here */ |
5e74672c PZ |
77 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); |
78 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | |
4a48998f | 79 | msleep(10); |
5e74672c PZ |
80 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); |
81 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | |
4a48998f IM |
82 | msleep(10); |
83 | } | |
84 | ||
85 | static void | |
86 | tmio_mmc_finish_request(struct tmio_mmc_host *host) | |
87 | { | |
88 | struct mmc_request *mrq = host->mrq; | |
89 | ||
90 | host->mrq = NULL; | |
91 | host->cmd = NULL; | |
92 | host->data = NULL; | |
93 | ||
94 | mmc_request_done(host->mmc, mrq); | |
95 | } | |
96 | ||
97 | /* These are the bitmasks the tmio chip requires to implement the MMC response | |
98 | * types. Note that R1 and R6 are the same in this scheme. */ | |
99 | #define APP_CMD 0x0040 | |
100 | #define RESP_NONE 0x0300 | |
101 | #define RESP_R1 0x0400 | |
102 | #define RESP_R1B 0x0500 | |
103 | #define RESP_R2 0x0600 | |
104 | #define RESP_R3 0x0700 | |
105 | #define DATA_PRESENT 0x0800 | |
106 | #define TRANSFER_READ 0x1000 | |
107 | #define TRANSFER_MULTI 0x2000 | |
108 | #define SECURITY_CMD 0x4000 | |
109 | ||
110 | static int | |
111 | tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | |
112 | { | |
4a48998f IM |
113 | struct mmc_data *data = host->data; |
114 | int c = cmd->opcode; | |
115 | ||
116 | /* Command 12 is handled by hardware */ | |
117 | if (cmd->opcode == 12 && !cmd->arg) { | |
5e74672c | 118 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); |
4a48998f IM |
119 | return 0; |
120 | } | |
121 | ||
122 | switch (mmc_resp_type(cmd)) { | |
123 | case MMC_RSP_NONE: c |= RESP_NONE; break; | |
124 | case MMC_RSP_R1: c |= RESP_R1; break; | |
125 | case MMC_RSP_R1B: c |= RESP_R1B; break; | |
126 | case MMC_RSP_R2: c |= RESP_R2; break; | |
127 | case MMC_RSP_R3: c |= RESP_R3; break; | |
128 | default: | |
129 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | |
130 | return -EINVAL; | |
131 | } | |
132 | ||
133 | host->cmd = cmd; | |
134 | ||
311f3ac7 GL |
135 | /* FIXME - this seems to be ok commented out but the spec suggest this bit |
136 | * should be set when issuing app commands. | |
4a48998f IM |
137 | * if(cmd->flags & MMC_FLAG_ACMD) |
138 | * c |= APP_CMD; | |
139 | */ | |
140 | if (data) { | |
141 | c |= DATA_PRESENT; | |
142 | if (data->blocks > 1) { | |
5e74672c | 143 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); |
4a48998f IM |
144 | c |= TRANSFER_MULTI; |
145 | } | |
146 | if (data->flags & MMC_DATA_READ) | |
147 | c |= TRANSFER_READ; | |
148 | } | |
149 | ||
5e74672c | 150 | enable_mmc_irqs(host, TMIO_MASK_CMD); |
4a48998f IM |
151 | |
152 | /* Fire off the command */ | |
5e74672c PZ |
153 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); |
154 | sd_ctrl_write16(host, CTL_SD_CMD, c); | |
4a48998f IM |
155 | |
156 | return 0; | |
157 | } | |
158 | ||
311f3ac7 GL |
159 | /* |
160 | * This chip always returns (at least?) as much data as you ask for. | |
4a48998f IM |
161 | * I'm unsure what happens if you ask for less than a block. This should be |
162 | * looked into to ensure that a funny length read doesnt hose the controller. | |
4a48998f | 163 | */ |
311f3ac7 | 164 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) |
4a48998f | 165 | { |
4a48998f | 166 | struct mmc_data *data = host->data; |
5600efb1 | 167 | void *sg_virt; |
4a48998f IM |
168 | unsigned short *buf; |
169 | unsigned int count; | |
170 | unsigned long flags; | |
171 | ||
172 | if (!data) { | |
173 | pr_debug("Spurious PIO IRQ\n"); | |
174 | return; | |
175 | } | |
176 | ||
5600efb1 GL |
177 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); |
178 | buf = (unsigned short *)(sg_virt + host->sg_off); | |
4a48998f IM |
179 | |
180 | count = host->sg_ptr->length - host->sg_off; | |
181 | if (count > data->blksz) | |
182 | count = data->blksz; | |
183 | ||
184 | pr_debug("count: %08x offset: %08x flags %08x\n", | |
311f3ac7 | 185 | count, host->sg_off, data->flags); |
4a48998f IM |
186 | |
187 | /* Transfer the data */ | |
188 | if (data->flags & MMC_DATA_READ) | |
5e74672c | 189 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); |
4a48998f | 190 | else |
5e74672c | 191 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); |
4a48998f IM |
192 | |
193 | host->sg_off += count; | |
194 | ||
5600efb1 | 195 | tmio_mmc_kunmap_atomic(sg_virt, &flags); |
4a48998f IM |
196 | |
197 | if (host->sg_off == host->sg_ptr->length) | |
198 | tmio_mmc_next_sg(host); | |
199 | ||
200 | return; | |
201 | } | |
202 | ||
311f3ac7 | 203 | static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) |
4a48998f | 204 | { |
4a48998f | 205 | struct mmc_data *data = host->data; |
a0d045ca | 206 | struct mmc_command *stop; |
4a48998f IM |
207 | |
208 | host->data = NULL; | |
209 | ||
210 | if (!data) { | |
311f3ac7 | 211 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); |
4a48998f IM |
212 | return; |
213 | } | |
a0d045ca | 214 | stop = data->stop; |
4a48998f IM |
215 | |
216 | /* FIXME - return correct transfer count on errors */ | |
217 | if (!data->error) | |
218 | data->bytes_xfered = data->blocks * data->blksz; | |
219 | else | |
220 | data->bytes_xfered = 0; | |
221 | ||
222 | pr_debug("Completed data request\n"); | |
223 | ||
311f3ac7 GL |
224 | /* |
225 | * FIXME: other drivers allow an optional stop command of any given type | |
4a48998f IM |
226 | * which we dont do, as the chip can auto generate them. |
227 | * Perhaps we can be smarter about when to use auto CMD12 and | |
228 | * only issue the auto request when we know this is the desired | |
229 | * stop command, allowing fallback to the stop command the | |
230 | * upper layers expect. For now, we do what works. | |
231 | */ | |
232 | ||
311f3ac7 GL |
233 | if (data->flags & MMC_DATA_READ) { |
234 | if (!host->chan_rx) | |
235 | disable_mmc_irqs(host, TMIO_MASK_READOP); | |
236 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | |
237 | host->mrq); | |
238 | } else { | |
239 | if (!host->chan_tx) | |
240 | disable_mmc_irqs(host, TMIO_MASK_WRITEOP); | |
241 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | |
242 | host->mrq); | |
243 | } | |
4a48998f IM |
244 | |
245 | if (stop) { | |
246 | if (stop->opcode == 12 && !stop->arg) | |
5e74672c | 247 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); |
4a48998f IM |
248 | else |
249 | BUG(); | |
250 | } | |
251 | ||
252 | tmio_mmc_finish_request(host); | |
253 | } | |
254 | ||
311f3ac7 GL |
255 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) |
256 | { | |
257 | struct mmc_data *data = host->data; | |
258 | ||
259 | if (!data) | |
260 | return; | |
261 | ||
262 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { | |
263 | /* | |
264 | * Has all data been written out yet? Testing on SuperH showed, | |
265 | * that in most cases the first interrupt comes already with the | |
266 | * BUSY status bit clear, but on some operations, like mount or | |
267 | * in the beginning of a write / sync / umount, there is one | |
268 | * DATAEND interrupt with the BUSY bit set, in this cases | |
269 | * waiting for one more interrupt fixes the problem. | |
270 | */ | |
271 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | |
272 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
273 | tasklet_schedule(&host->dma_complete); | |
274 | } | |
275 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { | |
276 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
277 | tasklet_schedule(&host->dma_complete); | |
278 | } else { | |
279 | tmio_mmc_do_data_irq(host); | |
280 | } | |
281 | } | |
282 | ||
283 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | |
4a48998f IM |
284 | unsigned int stat) |
285 | { | |
4a48998f | 286 | struct mmc_command *cmd = host->cmd; |
5e74672c | 287 | int i, addr; |
4a48998f IM |
288 | |
289 | if (!host->cmd) { | |
290 | pr_debug("Spurious CMD irq\n"); | |
291 | return; | |
292 | } | |
293 | ||
294 | host->cmd = NULL; | |
295 | ||
296 | /* This controller is sicker than the PXA one. Not only do we need to | |
297 | * drop the top 8 bits of the first response word, we also need to | |
298 | * modify the order of the response for short response command types. | |
299 | */ | |
300 | ||
5e74672c PZ |
301 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) |
302 | cmd->resp[i] = sd_ctrl_read32(host, addr); | |
4a48998f IM |
303 | |
304 | if (cmd->flags & MMC_RSP_136) { | |
305 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | |
306 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | |
307 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | |
308 | cmd->resp[3] <<= 8; | |
309 | } else if (cmd->flags & MMC_RSP_R3) { | |
310 | cmd->resp[0] = cmd->resp[3]; | |
311 | } | |
312 | ||
313 | if (stat & TMIO_STAT_CMDTIMEOUT) | |
314 | cmd->error = -ETIMEDOUT; | |
315 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | |
316 | cmd->error = -EILSEQ; | |
317 | ||
318 | /* If there is data to handle we enable data IRQs here, and | |
319 | * we will ultimatley finish the request in the data_end handler. | |
320 | * If theres no data or we encountered an error, finish now. | |
321 | */ | |
322 | if (host->data && !cmd->error) { | |
311f3ac7 GL |
323 | if (host->data->flags & MMC_DATA_READ) { |
324 | if (!host->chan_rx) | |
325 | enable_mmc_irqs(host, TMIO_MASK_READOP); | |
326 | } else { | |
327 | struct dma_chan *chan = host->chan_tx; | |
328 | if (!chan) | |
329 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | |
330 | else | |
331 | tasklet_schedule(&host->dma_issue); | |
332 | } | |
4a48998f IM |
333 | } else { |
334 | tmio_mmc_finish_request(host); | |
335 | } | |
336 | ||
337 | return; | |
338 | } | |
339 | ||
4a48998f IM |
340 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) |
341 | { | |
342 | struct tmio_mmc_host *host = devid; | |
4a48998f IM |
343 | unsigned int ireg, irq_mask, status; |
344 | ||
345 | pr_debug("MMC IRQ begin\n"); | |
346 | ||
5e74672c PZ |
347 | status = sd_ctrl_read32(host, CTL_STATUS); |
348 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | |
4a48998f IM |
349 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; |
350 | ||
351 | pr_debug_status(status); | |
352 | pr_debug_status(ireg); | |
353 | ||
354 | if (!ireg) { | |
5e74672c | 355 | disable_mmc_irqs(host, status & ~irq_mask); |
4a48998f | 356 | |
311f3ac7 | 357 | pr_warning("tmio_mmc: Spurious irq, disabling! " |
4a48998f IM |
358 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); |
359 | pr_debug_status(status); | |
360 | ||
361 | goto out; | |
362 | } | |
363 | ||
364 | while (ireg) { | |
365 | /* Card insert / remove attempts */ | |
366 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | |
5e74672c | 367 | ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | |
4a48998f | 368 | TMIO_STAT_CARD_REMOVE); |
6d9af5af | 369 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); |
4a48998f IM |
370 | } |
371 | ||
372 | /* CRC and other errors */ | |
373 | /* if (ireg & TMIO_STAT_ERR_IRQ) | |
374 | * handled |= tmio_error_irq(host, irq, stat); | |
375 | */ | |
376 | ||
377 | /* Command completion */ | |
378 | if (ireg & TMIO_MASK_CMD) { | |
5e74672c | 379 | ack_mmc_irqs(host, TMIO_MASK_CMD); |
4a48998f IM |
380 | tmio_mmc_cmd_irq(host, status); |
381 | } | |
382 | ||
383 | /* Data transfer */ | |
384 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | |
5e74672c | 385 | ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); |
4a48998f IM |
386 | tmio_mmc_pio_irq(host); |
387 | } | |
388 | ||
389 | /* Data transfer completion */ | |
390 | if (ireg & TMIO_STAT_DATAEND) { | |
5e74672c | 391 | ack_mmc_irqs(host, TMIO_STAT_DATAEND); |
4a48998f IM |
392 | tmio_mmc_data_irq(host); |
393 | } | |
394 | ||
395 | /* Check status - keep going until we've handled it all */ | |
5e74672c PZ |
396 | status = sd_ctrl_read32(host, CTL_STATUS); |
397 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | |
4a48998f IM |
398 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; |
399 | ||
400 | pr_debug("Status at end of loop: %08x\n", status); | |
401 | pr_debug_status(status); | |
402 | } | |
403 | pr_debug("MMC IRQ end\n"); | |
404 | ||
405 | out: | |
406 | return IRQ_HANDLED; | |
407 | } | |
408 | ||
311f3ac7 GL |
409 | #ifdef CONFIG_TMIO_MMC_DMA |
410 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | |
411 | { | |
412 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | |
413 | /* Switch DMA mode on or off - SuperH specific? */ | |
414 | sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); | |
415 | #endif | |
416 | } | |
417 | ||
418 | static void tmio_dma_complete(void *arg) | |
419 | { | |
420 | struct tmio_mmc_host *host = arg; | |
421 | ||
422 | dev_dbg(&host->pdev->dev, "Command completed\n"); | |
423 | ||
424 | if (!host->data) | |
425 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | |
426 | else | |
427 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
428 | } | |
429 | ||
430 | static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |
431 | { | |
432 | struct scatterlist *sg = host->sg_ptr; | |
433 | struct dma_async_tx_descriptor *desc = NULL; | |
434 | struct dma_chan *chan = host->chan_rx; | |
435 | int ret; | |
436 | ||
437 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); | |
438 | if (ret > 0) { | |
439 | host->dma_sglen = ret; | |
440 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | |
441 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
442 | } | |
443 | ||
444 | if (desc) { | |
445 | host->desc = desc; | |
446 | desc->callback = tmio_dma_complete; | |
447 | desc->callback_param = host; | |
448 | host->cookie = desc->tx_submit(desc); | |
449 | if (host->cookie < 0) { | |
450 | host->desc = NULL; | |
451 | ret = host->cookie; | |
452 | } else { | |
453 | chan->device->device_issue_pending(chan); | |
454 | } | |
455 | } | |
456 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
457 | __func__, host->sg_len, ret, host->cookie, host->mrq); | |
458 | ||
459 | if (!host->desc) { | |
460 | /* DMA failed, fall back to PIO */ | |
461 | if (ret >= 0) | |
462 | ret = -EIO; | |
463 | host->chan_rx = NULL; | |
464 | dma_release_channel(chan); | |
465 | /* Free the Tx channel too */ | |
466 | chan = host->chan_tx; | |
467 | if (chan) { | |
468 | host->chan_tx = NULL; | |
469 | dma_release_channel(chan); | |
470 | } | |
471 | dev_warn(&host->pdev->dev, | |
472 | "DMA failed: %d, falling back to PIO\n", ret); | |
473 | tmio_mmc_enable_dma(host, false); | |
474 | reset(host); | |
475 | /* Fail this request, let above layers recover */ | |
476 | host->mrq->cmd->error = ret; | |
477 | tmio_mmc_finish_request(host); | |
478 | } | |
479 | ||
480 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | |
481 | desc, host->cookie, host->sg_len); | |
482 | ||
483 | return ret > 0 ? 0 : ret; | |
484 | } | |
485 | ||
486 | static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |
487 | { | |
488 | struct scatterlist *sg = host->sg_ptr; | |
489 | struct dma_async_tx_descriptor *desc = NULL; | |
490 | struct dma_chan *chan = host->chan_tx; | |
491 | int ret; | |
492 | ||
493 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); | |
494 | if (ret > 0) { | |
495 | host->dma_sglen = ret; | |
496 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | |
497 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
498 | } | |
499 | ||
500 | if (desc) { | |
501 | host->desc = desc; | |
502 | desc->callback = tmio_dma_complete; | |
503 | desc->callback_param = host; | |
504 | host->cookie = desc->tx_submit(desc); | |
505 | if (host->cookie < 0) { | |
506 | host->desc = NULL; | |
507 | ret = host->cookie; | |
508 | } | |
509 | } | |
510 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | |
511 | __func__, host->sg_len, ret, host->cookie, host->mrq); | |
512 | ||
513 | if (!host->desc) { | |
514 | /* DMA failed, fall back to PIO */ | |
515 | if (ret >= 0) | |
516 | ret = -EIO; | |
517 | host->chan_tx = NULL; | |
518 | dma_release_channel(chan); | |
519 | /* Free the Rx channel too */ | |
520 | chan = host->chan_rx; | |
521 | if (chan) { | |
522 | host->chan_rx = NULL; | |
523 | dma_release_channel(chan); | |
524 | } | |
525 | dev_warn(&host->pdev->dev, | |
526 | "DMA failed: %d, falling back to PIO\n", ret); | |
527 | tmio_mmc_enable_dma(host, false); | |
528 | reset(host); | |
529 | /* Fail this request, let above layers recover */ | |
530 | host->mrq->cmd->error = ret; | |
531 | tmio_mmc_finish_request(host); | |
532 | } | |
533 | ||
534 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | |
535 | desc, host->cookie); | |
536 | ||
537 | return ret > 0 ? 0 : ret; | |
538 | } | |
539 | ||
540 | static int tmio_mmc_start_dma(struct tmio_mmc_host *host, | |
541 | struct mmc_data *data) | |
542 | { | |
543 | if (data->flags & MMC_DATA_READ) { | |
544 | if (host->chan_rx) | |
545 | return tmio_mmc_start_dma_rx(host); | |
546 | } else { | |
547 | if (host->chan_tx) | |
548 | return tmio_mmc_start_dma_tx(host); | |
549 | } | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | static void tmio_issue_tasklet_fn(unsigned long priv) | |
555 | { | |
556 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | |
557 | struct dma_chan *chan = host->chan_tx; | |
558 | ||
559 | chan->device->device_issue_pending(chan); | |
560 | } | |
561 | ||
562 | static void tmio_tasklet_fn(unsigned long arg) | |
563 | { | |
564 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | |
565 | ||
566 | if (host->data->flags & MMC_DATA_READ) | |
567 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, | |
568 | DMA_FROM_DEVICE); | |
569 | else | |
570 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, | |
571 | DMA_TO_DEVICE); | |
572 | ||
573 | tmio_mmc_do_data_irq(host); | |
574 | } | |
575 | ||
576 | /* It might be necessary to make filter MFD specific */ | |
577 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | |
578 | { | |
579 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | |
580 | chan->private = arg; | |
581 | return true; | |
582 | } | |
583 | ||
584 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | |
585 | struct tmio_mmc_data *pdata) | |
586 | { | |
587 | host->cookie = -EINVAL; | |
588 | host->desc = NULL; | |
589 | ||
590 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | |
591 | if (pdata->dma) { | |
592 | dma_cap_mask_t mask; | |
593 | ||
594 | dma_cap_zero(mask); | |
595 | dma_cap_set(DMA_SLAVE, mask); | |
596 | ||
597 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | |
598 | pdata->dma->chan_priv_tx); | |
599 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | |
600 | host->chan_tx); | |
601 | ||
602 | if (!host->chan_tx) | |
603 | return; | |
604 | ||
605 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | |
606 | pdata->dma->chan_priv_rx); | |
607 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | |
608 | host->chan_rx); | |
609 | ||
610 | if (!host->chan_rx) { | |
611 | dma_release_channel(host->chan_tx); | |
612 | host->chan_tx = NULL; | |
613 | return; | |
614 | } | |
615 | ||
616 | tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); | |
617 | tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); | |
618 | ||
619 | tmio_mmc_enable_dma(host, true); | |
620 | } | |
621 | } | |
622 | ||
623 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |
624 | { | |
625 | if (host->chan_tx) { | |
626 | struct dma_chan *chan = host->chan_tx; | |
627 | host->chan_tx = NULL; | |
628 | dma_release_channel(chan); | |
629 | } | |
630 | if (host->chan_rx) { | |
631 | struct dma_chan *chan = host->chan_rx; | |
632 | host->chan_rx = NULL; | |
633 | dma_release_channel(chan); | |
634 | } | |
635 | ||
636 | host->cookie = -EINVAL; | |
637 | host->desc = NULL; | |
638 | } | |
639 | #else | |
640 | static int tmio_mmc_start_dma(struct tmio_mmc_host *host, | |
641 | struct mmc_data *data) | |
642 | { | |
643 | return 0; | |
644 | } | |
645 | ||
646 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | |
647 | struct tmio_mmc_data *pdata) | |
648 | { | |
649 | host->chan_tx = NULL; | |
650 | host->chan_rx = NULL; | |
651 | } | |
652 | ||
653 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |
654 | { | |
655 | } | |
656 | #endif | |
657 | ||
4a48998f IM |
658 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, |
659 | struct mmc_data *data) | |
660 | { | |
4a48998f | 661 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", |
311f3ac7 | 662 | data->blksz, data->blocks); |
4a48998f IM |
663 | |
664 | /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ | |
665 | if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | |
311f3ac7 GL |
666 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", |
667 | mmc_hostname(host->mmc), data->blksz); | |
4a48998f IM |
668 | return -EINVAL; |
669 | } | |
670 | ||
671 | tmio_mmc_init_sg(host, data); | |
672 | host->data = data; | |
673 | ||
674 | /* Set transfer length / blocksize */ | |
5e74672c PZ |
675 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); |
676 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | |
4a48998f | 677 | |
311f3ac7 | 678 | return tmio_mmc_start_dma(host, data); |
4a48998f IM |
679 | } |
680 | ||
681 | /* Process requests from the MMC layer */ | |
682 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |
683 | { | |
684 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
685 | int ret; | |
686 | ||
687 | if (host->mrq) | |
688 | pr_debug("request not null\n"); | |
689 | ||
690 | host->mrq = mrq; | |
691 | ||
692 | if (mrq->data) { | |
693 | ret = tmio_mmc_start_data(host, mrq->data); | |
694 | if (ret) | |
695 | goto fail; | |
696 | } | |
697 | ||
698 | ret = tmio_mmc_start_command(host, mrq->cmd); | |
4a48998f IM |
699 | if (!ret) |
700 | return; | |
701 | ||
702 | fail: | |
703 | mrq->cmd->error = ret; | |
704 | mmc_request_done(mmc, mrq); | |
705 | } | |
706 | ||
707 | /* Set MMC clock / power. | |
708 | * Note: This controller uses a simple divider scheme therefore it cannot | |
709 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | |
710 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | |
711 | * slowest setting. | |
712 | */ | |
713 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |
714 | { | |
715 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
4a48998f IM |
716 | |
717 | if (ios->clock) | |
718 | tmio_mmc_set_clock(host, ios->clock); | |
719 | ||
720 | /* Power sequence - OFF -> ON -> UP */ | |
721 | switch (ios->power_mode) { | |
722 | case MMC_POWER_OFF: /* power down SD bus */ | |
64e8867b IM |
723 | if (host->set_pwr) |
724 | host->set_pwr(host->pdev, 0); | |
4a48998f IM |
725 | tmio_mmc_clk_stop(host); |
726 | break; | |
727 | case MMC_POWER_ON: /* power up SD bus */ | |
64e8867b IM |
728 | if (host->set_pwr) |
729 | host->set_pwr(host->pdev, 1); | |
4a48998f IM |
730 | break; |
731 | case MMC_POWER_UP: /* start bus clock */ | |
732 | tmio_mmc_clk_start(host); | |
733 | break; | |
734 | } | |
735 | ||
736 | switch (ios->bus_width) { | |
737 | case MMC_BUS_WIDTH_1: | |
5e74672c | 738 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); |
4a48998f IM |
739 | break; |
740 | case MMC_BUS_WIDTH_4: | |
5e74672c | 741 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); |
4a48998f IM |
742 | break; |
743 | } | |
744 | ||
745 | /* Let things settle. delay taken from winCE driver */ | |
746 | udelay(140); | |
747 | } | |
748 | ||
749 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | |
750 | { | |
751 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
ac8fb3e8 GL |
752 | struct mfd_cell *cell = host->pdev->dev.platform_data; |
753 | struct tmio_mmc_data *pdata = cell->driver_data; | |
4a48998f | 754 | |
ac8fb3e8 GL |
755 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || |
756 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; | |
4a48998f IM |
757 | } |
758 | ||
311f3ac7 | 759 | static const struct mmc_host_ops tmio_mmc_ops = { |
4a48998f IM |
760 | .request = tmio_mmc_request, |
761 | .set_ios = tmio_mmc_set_ios, | |
762 | .get_ro = tmio_mmc_get_ro, | |
763 | }; | |
764 | ||
765 | #ifdef CONFIG_PM | |
766 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | |
767 | { | |
768 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | |
769 | struct mmc_host *mmc = platform_get_drvdata(dev); | |
770 | int ret; | |
771 | ||
1a13f8fa | 772 | ret = mmc_suspend_host(mmc); |
4a48998f IM |
773 | |
774 | /* Tell MFD core it can disable us now.*/ | |
775 | if (!ret && cell->disable) | |
776 | cell->disable(dev); | |
777 | ||
778 | return ret; | |
779 | } | |
780 | ||
781 | static int tmio_mmc_resume(struct platform_device *dev) | |
782 | { | |
783 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | |
784 | struct mmc_host *mmc = platform_get_drvdata(dev); | |
4a48998f IM |
785 | int ret = 0; |
786 | ||
4a48998f | 787 | /* Tell the MFD core we are ready to be enabled */ |
64e8867b IM |
788 | if (cell->resume) { |
789 | ret = cell->resume(dev); | |
4a48998f IM |
790 | if (ret) |
791 | goto out; | |
792 | } | |
793 | ||
794 | mmc_resume_host(mmc); | |
795 | ||
796 | out: | |
797 | return ret; | |
798 | } | |
799 | #else | |
800 | #define tmio_mmc_suspend NULL | |
801 | #define tmio_mmc_resume NULL | |
802 | #endif | |
803 | ||
804 | static int __devinit tmio_mmc_probe(struct platform_device *dev) | |
805 | { | |
806 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; | |
f0e46cc4 | 807 | struct tmio_mmc_data *pdata; |
64e8867b | 808 | struct resource *res_ctl; |
4a48998f IM |
809 | struct tmio_mmc_host *host; |
810 | struct mmc_host *mmc; | |
d6c9b5ed | 811 | int ret = -EINVAL; |
311f3ac7 | 812 | u32 irq_mask = TMIO_MASK_CMD; |
4a48998f | 813 | |
64e8867b | 814 | if (dev->num_resources != 2) |
4a48998f IM |
815 | goto out; |
816 | ||
817 | res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); | |
64e8867b | 818 | if (!res_ctl) |
4a48998f | 819 | goto out; |
4a48998f | 820 | |
f0e46cc4 | 821 | pdata = cell->driver_data; |
d6c9b5ed | 822 | if (!pdata || !pdata->hclk) |
f0e46cc4 | 823 | goto out; |
d6c9b5ed PZ |
824 | |
825 | ret = -ENOMEM; | |
f0e46cc4 | 826 | |
4a48998f IM |
827 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); |
828 | if (!mmc) | |
829 | goto out; | |
830 | ||
831 | host = mmc_priv(mmc); | |
832 | host->mmc = mmc; | |
64e8867b | 833 | host->pdev = dev; |
4a48998f IM |
834 | platform_set_drvdata(dev, mmc); |
835 | ||
64e8867b IM |
836 | host->set_pwr = pdata->set_pwr; |
837 | host->set_clk_div = pdata->set_clk_div; | |
838 | ||
5e74672c PZ |
839 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ |
840 | host->bus_shift = resource_size(res_ctl) >> 10; | |
841 | ||
bc6772a0 | 842 | host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); |
4a48998f IM |
843 | if (!host->ctl) |
844 | goto host_free; | |
845 | ||
4a48998f IM |
846 | mmc->ops = &tmio_mmc_ops; |
847 | mmc->caps = MMC_CAP_4_BIT_DATA; | |
b741d440 | 848 | mmc->caps |= pdata->capabilities; |
f0e46cc4 PZ |
849 | mmc->f_max = pdata->hclk; |
850 | mmc->f_min = mmc->f_max / 512; | |
a2b14dc9 GL |
851 | if (pdata->ocr_mask) |
852 | mmc->ocr_avail = pdata->ocr_mask; | |
853 | else | |
854 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | |
4a48998f | 855 | |
4a48998f IM |
856 | /* Tell the MFD core we are ready to be enabled */ |
857 | if (cell->enable) { | |
858 | ret = cell->enable(dev); | |
859 | if (ret) | |
64e8867b | 860 | goto unmap_ctl; |
4a48998f IM |
861 | } |
862 | ||
4a48998f IM |
863 | tmio_mmc_clk_stop(host); |
864 | reset(host); | |
865 | ||
866 | ret = platform_get_irq(dev, 0); | |
867 | if (ret >= 0) | |
868 | host->irq = ret; | |
869 | else | |
7ee422dc | 870 | goto cell_disable; |
4a48998f | 871 | |
5e74672c | 872 | disable_mmc_irqs(host, TMIO_MASK_ALL); |
4a48998f | 873 | |
6c413cc7 | 874 | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | |
14f1b75b | 875 | IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); |
4a48998f | 876 | if (ret) |
7ee422dc | 877 | goto cell_disable; |
4a48998f | 878 | |
311f3ac7 GL |
879 | /* See if we also get DMA */ |
880 | tmio_mmc_request_dma(host, pdata); | |
881 | ||
4a48998f IM |
882 | mmc_add_host(mmc); |
883 | ||
311f3ac7 GL |
884 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
885 | (unsigned long)host->ctl, host->irq); | |
4a48998f IM |
886 | |
887 | /* Unmask the IRQs we want to know about */ | |
311f3ac7 GL |
888 | if (!host->chan_rx) |
889 | irq_mask |= TMIO_MASK_READOP; | |
890 | if (!host->chan_tx) | |
891 | irq_mask |= TMIO_MASK_WRITEOP; | |
892 | enable_mmc_irqs(host, irq_mask); | |
4a48998f IM |
893 | |
894 | return 0; | |
895 | ||
7ee422dc MD |
896 | cell_disable: |
897 | if (cell->disable) | |
898 | cell->disable(dev); | |
4a48998f IM |
899 | unmap_ctl: |
900 | iounmap(host->ctl); | |
901 | host_free: | |
902 | mmc_free_host(mmc); | |
903 | out: | |
904 | return ret; | |
905 | } | |
906 | ||
907 | static int __devexit tmio_mmc_remove(struct platform_device *dev) | |
908 | { | |
7ee422dc | 909 | struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; |
4a48998f IM |
910 | struct mmc_host *mmc = platform_get_drvdata(dev); |
911 | ||
912 | platform_set_drvdata(dev, NULL); | |
913 | ||
914 | if (mmc) { | |
915 | struct tmio_mmc_host *host = mmc_priv(mmc); | |
916 | mmc_remove_host(mmc); | |
311f3ac7 | 917 | tmio_mmc_release_dma(host); |
4a48998f | 918 | free_irq(host->irq, host); |
7ee422dc MD |
919 | if (cell->disable) |
920 | cell->disable(dev); | |
4a48998f | 921 | iounmap(host->ctl); |
bedcc45c | 922 | mmc_free_host(mmc); |
4a48998f IM |
923 | } |
924 | ||
925 | return 0; | |
926 | } | |
927 | ||
928 | /* ------------------- device registration ----------------------- */ | |
929 | ||
930 | static struct platform_driver tmio_mmc_driver = { | |
931 | .driver = { | |
932 | .name = "tmio-mmc", | |
933 | .owner = THIS_MODULE, | |
934 | }, | |
935 | .probe = tmio_mmc_probe, | |
936 | .remove = __devexit_p(tmio_mmc_remove), | |
937 | .suspend = tmio_mmc_suspend, | |
938 | .resume = tmio_mmc_resume, | |
939 | }; | |
940 | ||
941 | ||
942 | static int __init tmio_mmc_init(void) | |
943 | { | |
944 | return platform_driver_register(&tmio_mmc_driver); | |
945 | } | |
946 | ||
947 | static void __exit tmio_mmc_exit(void) | |
948 | { | |
949 | platform_driver_unregister(&tmio_mmc_driver); | |
950 | } | |
951 | ||
952 | module_init(tmio_mmc_init); | |
953 | module_exit(tmio_mmc_exit); | |
954 | ||
955 | MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); | |
956 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | |
957 | MODULE_LICENSE("GPL v2"); | |
958 | MODULE_ALIAS("platform:tmio-mmc"); |