]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
70f10482 | 2 | * linux/drivers/mmc/card/queue.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
98ac2162 | 5 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/blkdev.h> | |
83144186 | 15 | #include <linux/freezer.h> |
87598a2b | 16 | #include <linux/kthread.h> |
45711f1a | 17 | #include <linux/scatterlist.h> |
1da177e4 LT |
18 | |
19 | #include <linux/mmc/card.h> | |
20 | #include <linux/mmc/host.h> | |
98ac2162 | 21 | #include "queue.h" |
1da177e4 | 22 | |
98ccf149 PO |
23 | #define MMC_QUEUE_BOUNCESZ 65536 |
24 | ||
87598a2b | 25 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
1da177e4 LT |
26 | |
27 | /* | |
9c9f2d63 | 28 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
29 | */ |
30 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
31 | { | |
9c9f2d63 | 32 | /* |
bd788c96 | 33 | * We only like normal block requests and discards. |
9c9f2d63 | 34 | */ |
bd788c96 | 35 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
1da177e4 | 36 | blk_dump_rq_flags(req, "MMC bad request"); |
9c9f2d63 | 37 | return BLKPREP_KILL; |
1da177e4 LT |
38 | } |
39 | ||
9c9f2d63 | 40 | req->cmd_flags |= REQ_DONTPREP; |
1da177e4 | 41 | |
9c9f2d63 | 42 | return BLKPREP_OK; |
1da177e4 LT |
43 | } |
44 | ||
45 | static int mmc_queue_thread(void *d) | |
46 | { | |
47 | struct mmc_queue *mq = d; | |
48 | struct request_queue *q = mq->queue; | |
1da177e4 | 49 | |
83144186 | 50 | current->flags |= PF_MEMALLOC; |
1da177e4 | 51 | |
1da177e4 | 52 | down(&mq->thread_sem); |
1da177e4 LT |
53 | do { |
54 | struct request *req = NULL; | |
55 | ||
56 | spin_lock_irq(q->queue_lock); | |
57 | set_current_state(TASK_INTERRUPTIBLE); | |
9934c8c0 TH |
58 | if (!blk_queue_plugged(q)) |
59 | req = blk_fetch_request(q); | |
c723e08a | 60 | mq->req = req; |
1da177e4 LT |
61 | spin_unlock_irq(q->queue_lock); |
62 | ||
63 | if (!req) { | |
7b30d281 VW |
64 | if (kthread_should_stop()) { |
65 | set_current_state(TASK_RUNNING); | |
1da177e4 | 66 | break; |
7b30d281 | 67 | } |
1da177e4 LT |
68 | up(&mq->thread_sem); |
69 | schedule(); | |
70 | down(&mq->thread_sem); | |
71 | continue; | |
72 | } | |
73 | set_current_state(TASK_RUNNING); | |
74 | ||
75 | mq->issue_fn(mq, req); | |
76 | } while (1); | |
1da177e4 LT |
77 | up(&mq->thread_sem); |
78 | ||
1da177e4 LT |
79 | return 0; |
80 | } | |
81 | ||
82 | /* | |
83 | * Generic MMC request handler. This is called for any queue on a | |
84 | * particular host. When the host is not busy, we look for a request | |
85 | * on any queue on this host, and attempt to issue it. This may | |
86 | * not be the queue we were asked to process. | |
87 | */ | |
165125e1 | 88 | static void mmc_request(struct request_queue *q) |
1da177e4 LT |
89 | { |
90 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 91 | struct request *req; |
89b4e133 PO |
92 | |
93 | if (!mq) { | |
5fa83ce2 AH |
94 | while ((req = blk_fetch_request(q)) != NULL) { |
95 | req->cmd_flags |= REQ_QUIET; | |
296b2f6a | 96 | __blk_end_request_all(req, -EIO); |
5fa83ce2 | 97 | } |
89b4e133 PO |
98 | return; |
99 | } | |
1da177e4 LT |
100 | |
101 | if (!mq->req) | |
87598a2b | 102 | wake_up_process(mq->thread); |
1da177e4 LT |
103 | } |
104 | ||
105 | /** | |
106 | * mmc_init_queue - initialise a queue structure. | |
107 | * @mq: mmc queue | |
108 | * @card: mmc card to attach this queue | |
109 | * @lock: queue lock | |
110 | * | |
111 | * Initialise a MMC card request queue. | |
112 | */ | |
113 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | |
114 | { | |
115 | struct mmc_host *host = card->host; | |
116 | u64 limit = BLK_BOUNCE_HIGH; | |
117 | int ret; | |
118 | ||
fcaf71fd GKH |
119 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
120 | limit = *mmc_dev(host)->dma_mask; | |
1da177e4 LT |
121 | |
122 | mq->card = card; | |
123 | mq->queue = blk_init_queue(mmc_request, lock); | |
124 | if (!mq->queue) | |
125 | return -ENOMEM; | |
126 | ||
1da177e4 LT |
127 | mq->queue->queuedata = mq; |
128 | mq->req = NULL; | |
129 | ||
98ccf149 | 130 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 131 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
bd788c96 AH |
132 | if (mmc_can_erase(card)) { |
133 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); | |
134 | mq->queue->limits.max_discard_sectors = UINT_MAX; | |
135 | if (card->erased_byte == 0) | |
136 | mq->queue->limits.discard_zeroes_data = 1; | |
137 | if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { | |
138 | mq->queue->limits.discard_granularity = | |
139 | card->erase_size << 9; | |
140 | mq->queue->limits.discard_alignment = | |
141 | card->erase_size << 9; | |
142 | } | |
49804548 AH |
143 | if (mmc_can_secure_erase_trim(card)) |
144 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, | |
145 | mq->queue); | |
bd788c96 | 146 | } |
98ccf149 PO |
147 | |
148 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
a36274e0 | 149 | if (host->max_segs == 1) { |
aafabfab PO |
150 | unsigned int bouncesz; |
151 | ||
98ccf149 PO |
152 | bouncesz = MMC_QUEUE_BOUNCESZ; |
153 | ||
154 | if (bouncesz > host->max_req_size) | |
155 | bouncesz = host->max_req_size; | |
156 | if (bouncesz > host->max_seg_size) | |
157 | bouncesz = host->max_seg_size; | |
f3eb0aaa PO |
158 | if (bouncesz > (host->max_blk_count * 512)) |
159 | bouncesz = host->max_blk_count * 512; | |
160 | ||
161 | if (bouncesz > 512) { | |
162 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | |
163 | if (!mq->bounce_buf) { | |
164 | printk(KERN_WARNING "%s: unable to " | |
165 | "allocate bounce buffer\n", | |
166 | mmc_card_name(card)); | |
167 | } | |
168 | } | |
98ccf149 | 169 | |
f3eb0aaa | 170 | if (mq->bounce_buf) { |
2ff1fa67 | 171 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
086fa5ff | 172 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
8a78362c | 173 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
98ccf149 PO |
174 | blk_queue_max_segment_size(mq->queue, bouncesz); |
175 | ||
45711f1a | 176 | mq->sg = kmalloc(sizeof(struct scatterlist), |
98ccf149 PO |
177 | GFP_KERNEL); |
178 | if (!mq->sg) { | |
179 | ret = -ENOMEM; | |
aafabfab | 180 | goto cleanup_queue; |
98ccf149 | 181 | } |
45711f1a | 182 | sg_init_table(mq->sg, 1); |
98ccf149 | 183 | |
45711f1a | 184 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * |
98ccf149 PO |
185 | bouncesz / 512, GFP_KERNEL); |
186 | if (!mq->bounce_sg) { | |
187 | ret = -ENOMEM; | |
aafabfab | 188 | goto cleanup_queue; |
98ccf149 | 189 | } |
45711f1a | 190 | sg_init_table(mq->bounce_sg, bouncesz / 512); |
98ccf149 PO |
191 | } |
192 | } | |
193 | #endif | |
194 | ||
195 | if (!mq->bounce_buf) { | |
196 | blk_queue_bounce_limit(mq->queue, limit); | |
086fa5ff | 197 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 198 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 199 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 PO |
200 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
201 | ||
05e5b136 | 202 | mq->sg = kmalloc(sizeof(struct scatterlist) * |
a36274e0 | 203 | host->max_segs, GFP_KERNEL); |
98ccf149 PO |
204 | if (!mq->sg) { |
205 | ret = -ENOMEM; | |
206 | goto cleanup_queue; | |
207 | } | |
a36274e0 | 208 | sg_init_table(mq->sg, host->max_segs); |
1da177e4 LT |
209 | } |
210 | ||
632cf92a | 211 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 212 | |
87598a2b CH |
213 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); |
214 | if (IS_ERR(mq->thread)) { | |
215 | ret = PTR_ERR(mq->thread); | |
98ccf149 | 216 | goto free_bounce_sg; |
1da177e4 LT |
217 | } |
218 | ||
87598a2b | 219 | return 0; |
98ccf149 PO |
220 | free_bounce_sg: |
221 | if (mq->bounce_sg) | |
222 | kfree(mq->bounce_sg); | |
223 | mq->bounce_sg = NULL; | |
aafabfab PO |
224 | cleanup_queue: |
225 | if (mq->sg) | |
226 | kfree(mq->sg); | |
1da177e4 | 227 | mq->sg = NULL; |
98ccf149 PO |
228 | if (mq->bounce_buf) |
229 | kfree(mq->bounce_buf); | |
230 | mq->bounce_buf = NULL; | |
1da177e4 | 231 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
232 | return ret; |
233 | } | |
1da177e4 LT |
234 | |
235 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
236 | { | |
165125e1 | 237 | struct request_queue *q = mq->queue; |
89b4e133 PO |
238 | unsigned long flags; |
239 | ||
d2b46f66 PO |
240 | /* Make sure the queue isn't suspended, as that will deadlock */ |
241 | mmc_queue_resume(mq); | |
242 | ||
89b4e133 | 243 | /* Then terminate our worker thread */ |
87598a2b | 244 | kthread_stop(mq->thread); |
1da177e4 | 245 | |
5fa83ce2 AH |
246 | /* Empty the queue */ |
247 | spin_lock_irqsave(q->queue_lock, flags); | |
248 | q->queuedata = NULL; | |
249 | blk_start_queue(q); | |
250 | spin_unlock_irqrestore(q->queue_lock, flags); | |
251 | ||
98ccf149 PO |
252 | if (mq->bounce_sg) |
253 | kfree(mq->bounce_sg); | |
254 | mq->bounce_sg = NULL; | |
255 | ||
1da177e4 LT |
256 | kfree(mq->sg); |
257 | mq->sg = NULL; | |
258 | ||
98ccf149 PO |
259 | if (mq->bounce_buf) |
260 | kfree(mq->bounce_buf); | |
261 | mq->bounce_buf = NULL; | |
262 | ||
1da177e4 LT |
263 | mq->card = NULL; |
264 | } | |
265 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
266 | ||
267 | /** | |
268 | * mmc_queue_suspend - suspend a MMC request queue | |
269 | * @mq: MMC queue to suspend | |
270 | * | |
271 | * Stop the block request queue, and wait for our thread to | |
272 | * complete any outstanding requests. This ensures that we | |
273 | * won't suspend while a request is being processed. | |
274 | */ | |
275 | void mmc_queue_suspend(struct mmc_queue *mq) | |
276 | { | |
165125e1 | 277 | struct request_queue *q = mq->queue; |
1da177e4 LT |
278 | unsigned long flags; |
279 | ||
280 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
281 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
282 | ||
283 | spin_lock_irqsave(q->queue_lock, flags); | |
284 | blk_stop_queue(q); | |
285 | spin_unlock_irqrestore(q->queue_lock, flags); | |
286 | ||
287 | down(&mq->thread_sem); | |
288 | } | |
289 | } | |
1da177e4 LT |
290 | |
291 | /** | |
292 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
293 | * @mq: MMC queue to resume | |
294 | */ | |
295 | void mmc_queue_resume(struct mmc_queue *mq) | |
296 | { | |
165125e1 | 297 | struct request_queue *q = mq->queue; |
1da177e4 LT |
298 | unsigned long flags; |
299 | ||
300 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
301 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
302 | ||
303 | up(&mq->thread_sem); | |
304 | ||
305 | spin_lock_irqsave(q->queue_lock, flags); | |
306 | blk_start_queue(q); | |
307 | spin_unlock_irqrestore(q->queue_lock, flags); | |
308 | } | |
309 | } | |
98ac2162 | 310 | |
2ff1fa67 PO |
311 | /* |
312 | * Prepare the sg list(s) to be handed of to the host driver | |
313 | */ | |
98ccf149 PO |
314 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
315 | { | |
316 | unsigned int sg_len; | |
2ff1fa67 PO |
317 | size_t buflen; |
318 | struct scatterlist *sg; | |
319 | int i; | |
98ccf149 PO |
320 | |
321 | if (!mq->bounce_buf) | |
322 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | |
323 | ||
324 | BUG_ON(!mq->bounce_sg); | |
325 | ||
326 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | |
327 | ||
328 | mq->bounce_sg_len = sg_len; | |
329 | ||
2ff1fa67 PO |
330 | buflen = 0; |
331 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | |
332 | buflen += sg->length; | |
98ccf149 | 333 | |
2ff1fa67 | 334 | sg_init_one(mq->sg, mq->bounce_buf, buflen); |
98ccf149 PO |
335 | |
336 | return 1; | |
337 | } | |
338 | ||
2ff1fa67 PO |
339 | /* |
340 | * If writing, bounce the data to the buffer before the request | |
341 | * is sent to the host driver | |
342 | */ | |
98ccf149 PO |
343 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
344 | { | |
2ff1fa67 PO |
345 | unsigned long flags; |
346 | ||
98ccf149 PO |
347 | if (!mq->bounce_buf) |
348 | return; | |
349 | ||
98ccf149 PO |
350 | if (rq_data_dir(mq->req) != WRITE) |
351 | return; | |
352 | ||
2ff1fa67 PO |
353 | local_irq_save(flags); |
354 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
355 | mq->bounce_buf, mq->sg[0].length); | |
356 | local_irq_restore(flags); | |
98ccf149 PO |
357 | } |
358 | ||
2ff1fa67 PO |
359 | /* |
360 | * If reading, bounce the data from the buffer after the request | |
361 | * has been handled by the host driver | |
362 | */ | |
98ccf149 PO |
363 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
364 | { | |
2ff1fa67 PO |
365 | unsigned long flags; |
366 | ||
98ccf149 PO |
367 | if (!mq->bounce_buf) |
368 | return; | |
369 | ||
98ccf149 PO |
370 | if (rq_data_dir(mq->req) != READ) |
371 | return; | |
372 | ||
2ff1fa67 PO |
373 | local_irq_save(flags); |
374 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
375 | mq->bounce_buf, mq->sg[0].length); | |
376 | local_irq_restore(flags); | |
98ccf149 PO |
377 | } |
378 |