]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/drivers/mmc/card/queue.c | |
3 | * | |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
5 | * Copyright 2006-2007 Pierre Ossman | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
12 | #include <linux/slab.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/freezer.h> | |
16 | #include <linux/kthread.h> | |
17 | #include <linux/scatterlist.h> | |
18 | ||
19 | #include <linux/mmc/card.h> | |
20 | #include <linux/mmc/host.h> | |
21 | #include "queue.h" | |
22 | ||
23 | #define MMC_QUEUE_BOUNCESZ 65536 | |
24 | ||
25 | #define MMC_QUEUE_SUSPENDED (1 << 0) | |
26 | ||
27 | /* | |
28 | * Prepare a MMC request. This just filters out odd stuff. | |
29 | */ | |
30 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
31 | { | |
32 | /* | |
33 | * We only like normal block requests. | |
34 | */ | |
35 | if (!blk_fs_request(req)) { | |
36 | blk_dump_rq_flags(req, "MMC bad request"); | |
37 | return BLKPREP_KILL; | |
38 | } | |
39 | ||
40 | req->cmd_flags |= REQ_DONTPREP; | |
41 | ||
42 | return BLKPREP_OK; | |
43 | } | |
44 | ||
45 | static int mmc_queue_thread(void *d) | |
46 | { | |
47 | struct mmc_queue *mq = d; | |
48 | struct request_queue *q = mq->queue; | |
49 | ||
50 | current->flags |= PF_MEMALLOC; | |
51 | ||
52 | down(&mq->thread_sem); | |
53 | do { | |
54 | struct request *req = NULL; | |
55 | ||
56 | spin_lock_irq(q->queue_lock); | |
57 | set_current_state(TASK_INTERRUPTIBLE); | |
58 | if (!blk_queue_plugged(q)) | |
59 | req = blk_fetch_request(q); | |
60 | mq->req = req; | |
61 | spin_unlock_irq(q->queue_lock); | |
62 | ||
63 | if (!req) { | |
64 | if (kthread_should_stop()) { | |
65 | set_current_state(TASK_RUNNING); | |
66 | break; | |
67 | } | |
68 | up(&mq->thread_sem); | |
69 | schedule(); | |
70 | down(&mq->thread_sem); | |
71 | continue; | |
72 | } | |
73 | set_current_state(TASK_RUNNING); | |
74 | ||
75 | mq->issue_fn(mq, req); | |
76 | } while (1); | |
77 | up(&mq->thread_sem); | |
78 | ||
79 | return 0; | |
80 | } | |
81 | ||
82 | /* | |
83 | * Generic MMC request handler. This is called for any queue on a | |
84 | * particular host. When the host is not busy, we look for a request | |
85 | * on any queue on this host, and attempt to issue it. This may | |
86 | * not be the queue we were asked to process. | |
87 | */ | |
88 | static void mmc_request(struct request_queue *q) | |
89 | { | |
90 | struct mmc_queue *mq = q->queuedata; | |
91 | struct request *req; | |
92 | ||
93 | if (!mq) { | |
94 | while ((req = blk_fetch_request(q)) != NULL) { | |
95 | req->cmd_flags |= REQ_QUIET; | |
96 | __blk_end_request_all(req, -EIO); | |
97 | } | |
98 | return; | |
99 | } | |
100 | ||
101 | if (!mq->req) | |
102 | wake_up_process(mq->thread); | |
103 | } | |
104 | ||
105 | /** | |
106 | * mmc_init_queue - initialise a queue structure. | |
107 | * @mq: mmc queue | |
108 | * @card: mmc card to attach this queue | |
109 | * @lock: queue lock | |
110 | * | |
111 | * Initialise a MMC card request queue. | |
112 | */ | |
113 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | |
114 | { | |
115 | struct mmc_host *host = card->host; | |
116 | u64 limit = BLK_BOUNCE_HIGH; | |
117 | int ret; | |
118 | ||
119 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | |
120 | limit = *mmc_dev(host)->dma_mask; | |
121 | ||
122 | mq->card = card; | |
123 | mq->queue = blk_init_queue(mmc_request, lock); | |
124 | if (!mq->queue) | |
125 | return -ENOMEM; | |
126 | ||
127 | mq->queue->queuedata = mq; | |
128 | mq->req = NULL; | |
129 | ||
130 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | |
131 | blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); | |
132 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | |
133 | ||
134 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
135 | if (host->max_hw_segs == 1) { | |
136 | unsigned int bouncesz; | |
137 | ||
138 | bouncesz = MMC_QUEUE_BOUNCESZ; | |
139 | ||
140 | if (bouncesz > host->max_req_size) | |
141 | bouncesz = host->max_req_size; | |
142 | if (bouncesz > host->max_seg_size) | |
143 | bouncesz = host->max_seg_size; | |
144 | if (bouncesz > (host->max_blk_count * 512)) | |
145 | bouncesz = host->max_blk_count * 512; | |
146 | ||
147 | if (bouncesz > 512) { | |
148 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | |
149 | if (!mq->bounce_buf) { | |
150 | printk(KERN_WARNING "%s: unable to " | |
151 | "allocate bounce buffer\n", | |
152 | mmc_card_name(card)); | |
153 | } | |
154 | } | |
155 | ||
156 | if (mq->bounce_buf) { | |
157 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | |
158 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | |
159 | blk_queue_max_segments(mq->queue, bouncesz / 512); | |
160 | blk_queue_max_segment_size(mq->queue, bouncesz); | |
161 | ||
162 | mq->sg = kmalloc(sizeof(struct scatterlist), | |
163 | GFP_KERNEL); | |
164 | if (!mq->sg) { | |
165 | ret = -ENOMEM; | |
166 | goto cleanup_queue; | |
167 | } | |
168 | sg_init_table(mq->sg, 1); | |
169 | ||
170 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | |
171 | bouncesz / 512, GFP_KERNEL); | |
172 | if (!mq->bounce_sg) { | |
173 | ret = -ENOMEM; | |
174 | goto cleanup_queue; | |
175 | } | |
176 | sg_init_table(mq->bounce_sg, bouncesz / 512); | |
177 | } | |
178 | } | |
179 | #endif | |
180 | ||
181 | if (!mq->bounce_buf) { | |
182 | blk_queue_bounce_limit(mq->queue, limit); | |
183 | blk_queue_max_hw_sectors(mq->queue, | |
184 | min(host->max_blk_count, host->max_req_size / 512)); | |
185 | blk_queue_max_segments(mq->queue, host->max_hw_segs); | |
186 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | |
187 | ||
188 | mq->sg = kmalloc(sizeof(struct scatterlist) * | |
189 | host->max_phys_segs, GFP_KERNEL); | |
190 | if (!mq->sg) { | |
191 | ret = -ENOMEM; | |
192 | goto cleanup_queue; | |
193 | } | |
194 | sg_init_table(mq->sg, host->max_phys_segs); | |
195 | } | |
196 | ||
197 | init_MUTEX(&mq->thread_sem); | |
198 | ||
199 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | |
200 | if (IS_ERR(mq->thread)) { | |
201 | ret = PTR_ERR(mq->thread); | |
202 | goto free_bounce_sg; | |
203 | } | |
204 | ||
205 | return 0; | |
206 | free_bounce_sg: | |
207 | if (mq->bounce_sg) | |
208 | kfree(mq->bounce_sg); | |
209 | mq->bounce_sg = NULL; | |
210 | cleanup_queue: | |
211 | if (mq->sg) | |
212 | kfree(mq->sg); | |
213 | mq->sg = NULL; | |
214 | if (mq->bounce_buf) | |
215 | kfree(mq->bounce_buf); | |
216 | mq->bounce_buf = NULL; | |
217 | blk_cleanup_queue(mq->queue); | |
218 | return ret; | |
219 | } | |
220 | ||
221 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
222 | { | |
223 | struct request_queue *q = mq->queue; | |
224 | unsigned long flags; | |
225 | ||
226 | /* Make sure the queue isn't suspended, as that will deadlock */ | |
227 | mmc_queue_resume(mq); | |
228 | ||
229 | /* Then terminate our worker thread */ | |
230 | kthread_stop(mq->thread); | |
231 | ||
232 | /* Empty the queue */ | |
233 | spin_lock_irqsave(q->queue_lock, flags); | |
234 | q->queuedata = NULL; | |
235 | blk_start_queue(q); | |
236 | spin_unlock_irqrestore(q->queue_lock, flags); | |
237 | ||
238 | if (mq->bounce_sg) | |
239 | kfree(mq->bounce_sg); | |
240 | mq->bounce_sg = NULL; | |
241 | ||
242 | kfree(mq->sg); | |
243 | mq->sg = NULL; | |
244 | ||
245 | if (mq->bounce_buf) | |
246 | kfree(mq->bounce_buf); | |
247 | mq->bounce_buf = NULL; | |
248 | ||
249 | mq->card = NULL; | |
250 | } | |
251 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
252 | ||
253 | /** | |
254 | * mmc_queue_suspend - suspend a MMC request queue | |
255 | * @mq: MMC queue to suspend | |
256 | * | |
257 | * Stop the block request queue, and wait for our thread to | |
258 | * complete any outstanding requests. This ensures that we | |
259 | * won't suspend while a request is being processed. | |
260 | */ | |
261 | void mmc_queue_suspend(struct mmc_queue *mq) | |
262 | { | |
263 | struct request_queue *q = mq->queue; | |
264 | unsigned long flags; | |
265 | ||
266 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
267 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
268 | ||
269 | spin_lock_irqsave(q->queue_lock, flags); | |
270 | blk_stop_queue(q); | |
271 | spin_unlock_irqrestore(q->queue_lock, flags); | |
272 | ||
273 | down(&mq->thread_sem); | |
274 | } | |
275 | } | |
276 | ||
277 | /** | |
278 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
279 | * @mq: MMC queue to resume | |
280 | */ | |
281 | void mmc_queue_resume(struct mmc_queue *mq) | |
282 | { | |
283 | struct request_queue *q = mq->queue; | |
284 | unsigned long flags; | |
285 | ||
286 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
287 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
288 | ||
289 | up(&mq->thread_sem); | |
290 | ||
291 | spin_lock_irqsave(q->queue_lock, flags); | |
292 | blk_start_queue(q); | |
293 | spin_unlock_irqrestore(q->queue_lock, flags); | |
294 | } | |
295 | } | |
296 | ||
297 | /* | |
298 | * Prepare the sg list(s) to be handed of to the host driver | |
299 | */ | |
300 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |
301 | { | |
302 | unsigned int sg_len; | |
303 | size_t buflen; | |
304 | struct scatterlist *sg; | |
305 | int i; | |
306 | ||
307 | if (!mq->bounce_buf) | |
308 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | |
309 | ||
310 | BUG_ON(!mq->bounce_sg); | |
311 | ||
312 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | |
313 | ||
314 | mq->bounce_sg_len = sg_len; | |
315 | ||
316 | buflen = 0; | |
317 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | |
318 | buflen += sg->length; | |
319 | ||
320 | sg_init_one(mq->sg, mq->bounce_buf, buflen); | |
321 | ||
322 | return 1; | |
323 | } | |
324 | ||
325 | /* | |
326 | * If writing, bounce the data to the buffer before the request | |
327 | * is sent to the host driver | |
328 | */ | |
329 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | |
330 | { | |
331 | unsigned long flags; | |
332 | ||
333 | if (!mq->bounce_buf) | |
334 | return; | |
335 | ||
336 | if (rq_data_dir(mq->req) != WRITE) | |
337 | return; | |
338 | ||
339 | local_irq_save(flags); | |
340 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
341 | mq->bounce_buf, mq->sg[0].length); | |
342 | local_irq_restore(flags); | |
343 | } | |
344 | ||
345 | /* | |
346 | * If reading, bounce the data from the buffer after the request | |
347 | * has been handled by the host driver | |
348 | */ | |
349 | void mmc_queue_bounce_post(struct mmc_queue *mq) | |
350 | { | |
351 | unsigned long flags; | |
352 | ||
353 | if (!mq->bounce_buf) | |
354 | return; | |
355 | ||
356 | if (rq_data_dir(mq->req) != READ) | |
357 | return; | |
358 | ||
359 | local_irq_save(flags); | |
360 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
361 | mq->bounce_buf, mq->sg[0].length); | |
362 | local_irq_restore(flags); | |
363 | } | |
364 |