]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
staging: Final semaphore cleanup
[net-next-2.6.git] / drivers / staging / westbridge / astoria / block / cyasblkdev_queue.c
CommitLineData
81eb669b
DC
1/* cyanblkdev_queue.h - Antioch Linux Block Driver queue source file
2## ===========================
3## Copyright (C) 2010 Cypress Semiconductor
4##
5## This program is free software; you can redistribute it and/or
6## modify it under the terms of the GNU General Public License
7## as published by the Free Software Foundation; either version 2
8## of the License, or (at your option) any later version.
9##
10## This program is distributed in the hope that it will be useful,
11## but WITHOUT ANY WARRANTY; without even the implied warranty of
12## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13## GNU General Public License for more details.
14##
15## You should have received a copy of the GNU General Public License
16## along with this program; if not, write to the Free Software
17## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18## Boston, MA 02110-1301, USA.
19## ===========================
20*/
21
22/*
23 * Request queue handling for Antioch block device driver.
24 * Based on the mmc queue handling code by Russell King in the
25 * linux 2.6.10 kernel.
26 */
27
28/*
29 * linux/drivers/mmc/mmc_queue.c
30 *
31 * Copyright (C) 2003 Russell King, All Rights Reserved.
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License version 2 as
35 * published by the Free Software Foundation.
36 *
37 */
38
39#include <linux/module.h>
40#include <linux/blkdev.h>
41
42#include "cyasblkdev_queue.h"
43
44#define CYASBLKDEV_QUEUE_EXIT (1 << 0)
45#define CYASBLKDEV_QUEUE_SUSPENDED (1 << 1)
46#define CY_AS_USE_ASYNC_API
47
48
49
50/* print flags by name */
51const char *rq_flag_bit_names[] = {
52 "REQ_RW", /* not set, read. set, write */
53 "REQ_FAILFAST", /* no low level driver retries */
54 "REQ_SORTED", /* elevator knows about this request */
55 "REQ_SOFTBARRIER", /* may not be passed by ioscheduler */
56 "REQ_HARDBARRIER", /* may not be passed by drive either */
57 "REQ_FUA", /* forced unit access */
58 "REQ_NOMERGE", /* don't touch this for merging */
59 "REQ_STARTED", /* drive already may have started this one */
60 "REQ_DONTPREP", /* don't call prep for this one */
61 "REQ_QUEUED", /* uses queueing */
62 "REQ_ELVPRIV", /* elevator private data attached */
63 "REQ_FAILED", /* set if the request failed */
64 "REQ_QUIET", /* don't worry about errors */
65 "REQ_PREEMPT", /* set for "ide_preempt" requests */
66 "REQ_ORDERED_COLOR",/* is before or after barrier */
67 "REQ_RW_SYNC", /* request is sync (O_DIRECT) */
68 "REQ_ALLOCED", /* request came from our alloc pool */
69 "REQ_RW_META", /* metadata io request */
70 "REQ_COPY_USER", /* contains copies of user pages */
71 "REQ_NR_BITS", /* stops here */
72};
73
74void verbose_rq_flags(int flags)
75{
76 int i;
77 uint32_t j;
78 j = 1;
79 for (i = 0; i < 32; i++) {
80 if (flags & j)
81 DBGPRN("<1>%s", rq_flag_bit_names[i]);
82 j = j << 1;
83 }
84}
85
86
87/*
88 * Prepare a -BLK_DEV request. Essentially, this means passing the
89 * preparation off to the media driver. The media driver will
90 * create request to CyAsDev.
91 */
92static int cyasblkdev_prep_request(
93 struct request_queue *q, struct request *req)
94{
95 DBGPRN_FUNC_NAME;
96
97 /* we only like normal block requests.*/
72bbd9bc 98 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
81eb669b
DC
99 #ifndef WESTBRIDGE_NDEBUG
100 cy_as_hal_print_message("%s:%x bad request received\n",
0769c38d 101 __func__, current->pid);
81eb669b
DC
102 #endif
103
104 blk_dump_rq_flags(req, "cyasblkdev bad request");
105 return BLKPREP_KILL;
106 }
107
108 req->cmd_flags |= REQ_DONTPREP;
109
110 return BLKPREP_OK;
111}
112
113/* queue worker thread */
114static int cyasblkdev_queue_thread(void *d)
115{
116 DECLARE_WAITQUEUE(wait, current);
117 struct cyasblkdev_queue *bq = d;
118 struct request_queue *q = bq->queue;
119 u32 qth_pid;
120
121 DBGPRN_FUNC_NAME;
122
123 /*
124 * set iothread to ensure that we aren't put to sleep by
125 * the process freezing. we handle suspension ourselves.
126 */
127 daemonize("cyasblkdev_queue_thread");
128
129 /* signal to queue_init() so it could contnue */
130 complete(&bq->thread_complete);
131
132 down(&bq->thread_sem);
133 add_wait_queue(&bq->thread_wq, &wait);
134
135 qth_pid = current->pid;
136
137 #ifndef WESTBRIDGE_NDEBUG
138 cy_as_hal_print_message(
0769c38d 139 "%s:%x started, bq:%p, q:%p\n", __func__, qth_pid, bq, q);
81eb669b
DC
140 #endif
141
142 do {
143 struct request *req = NULL;
144
145 /* the thread wants to be woken up by signals as well */
146 set_current_state(TASK_INTERRUPTIBLE);
147
148 spin_lock_irq(q->queue_lock);
149
150 #ifndef WESTBRIDGE_NDEBUG
151 cy_as_hal_print_message(
152 "%s: for bq->queue is null\n", __func__);
153 #endif
154
155 if (!bq->req) {
156 /* chk if queue is plugged */
157 if (!blk_queue_plugged(q)) {
158 bq->req = req = blk_fetch_request(q);
159 #ifndef WESTBRIDGE_NDEBUG
160 cy_as_hal_print_message(
161 "%s: blk_fetch_request:%x\n",
162 __func__, (uint32_t)req);
163 #endif
164 } else {
165 #ifndef WESTBRIDGE_NDEBUG
166 cy_as_hal_print_message(
167 "%s: queue plugged, "
168 "skip blk_fetch()\n", __func__);
169 #endif
170 }
171 }
172 spin_unlock_irq(q->queue_lock);
173
174 #ifndef WESTBRIDGE_NDEBUG
175 cy_as_hal_print_message(
176 "%s: checking if request queue is null\n", __func__);
177 #endif
178
179 if (!req) {
180 if (bq->flags & CYASBLKDEV_QUEUE_EXIT) {
181 #ifndef WESTBRIDGE_NDEBUG
182 cy_as_hal_print_message(
183 "%s:got QUEUE_EXIT flag\n", __func__);
184 #endif
185
186 break;
187 }
188
189 #ifndef WESTBRIDGE_NDEBUG
190 cy_as_hal_print_message(
191 "%s: request queue is null, goto sleep, "
192 "thread_sem->count=%d\n",
193 __func__, bq->thread_sem.count);
194 if (spin_is_locked(q->queue_lock)) {
195 cy_as_hal_print_message("%s: queue_lock "
196 "is locked, need to release\n", __func__);
197 spin_unlock(q->queue_lock);
198
199 if (spin_is_locked(q->queue_lock))
200 cy_as_hal_print_message(
201 "%s: unlock did not work\n",
202 __func__);
203 } else {
204 cy_as_hal_print_message(
205 "%s: checked lock, is not locked\n",
206 __func__);
207 }
208 #endif
209
210 up(&bq->thread_sem);
211
212 /* yields to the next rdytorun proc,
213 * then goes back to sleep*/
214 schedule();
215 down(&bq->thread_sem);
216
217 #ifndef WESTBRIDGE_NDEBUG
218 cy_as_hal_print_message(
219 "%s: wake_up,continue\n",
220 __func__);
221 #endif
222 continue;
223 }
224
225 /* new req recieved, issue it to the driver */
226 set_current_state(TASK_RUNNING);
227
228 #ifndef WESTBRIDGE_NDEBUG
229 cy_as_hal_print_message(
230 "%s: issued a RQ:%x\n",
231 __func__, (uint32_t)req);
232 #endif
233
234 bq->issue_fn(bq, req);
235
236 #ifndef WESTBRIDGE_NDEBUG
237 cy_as_hal_print_message(
238 "%s: bq->issue_fn() returned\n",
239 __func__);
240 #endif
241
242
243 } while (1);
244
245 set_current_state(TASK_RUNNING);
246 remove_wait_queue(&bq->thread_wq, &wait);
247 up(&bq->thread_sem);
248
249 complete_and_exit(&bq->thread_complete, 0);
250
251 #ifndef WESTBRIDGE_NDEBUG
0769c38d 252 cy_as_hal_print_message("%s: is finished\n", __func__);
81eb669b
DC
253 #endif
254
255 return 0;
256}
257
258/*
259 * Generic request handler. it is called for any queue on a
260 * particular host. When the host is not busy, we look for a request
261 * on any queue on this host, and attempt to issue it. This may
262 * not be the queue we were asked to process.
263 */
264static void cyasblkdev_request(struct request_queue *q)
265{
266 struct cyasblkdev_queue *bq = q->queuedata;
267 DBGPRN_FUNC_NAME;
268
269 #ifndef WESTBRIDGE_NDEBUG
270 cy_as_hal_print_message(
271 "%s new request on cyasblkdev_queue_t bq:=%x\n",
272 __func__, (uint32_t)bq);
273 #endif
274
275 if (!bq->req) {
276 #ifndef WESTBRIDGE_NDEBUG
277 cy_as_hal_print_message("%s wake_up(&bq->thread_wq)\n",
278 __func__);
279 #endif
280
281 /* wake up cyasblkdev_queue worker thread*/
282 wake_up(&bq->thread_wq);
283 } else {
284 #ifndef WESTBRIDGE_NDEBUG
285 cy_as_hal_print_message("%s: don't wake Q_thr, bq->req:%x\n",
286 __func__, (uint32_t)bq->req);
287 #endif
288 }
289}
290
291/*
292 * cyasblkdev_init_queue - initialise a queue structure.
293 * @bq: cyasblkdev queue
294 * @dev: CyAsDeviceHandle to attach this queue
295 * @lock: queue lock
296 *
297 * Initialise a cyasblkdev_request queue.
298 */
299
300/* MAX NUMBER OF SECTORS PER REQUEST **/
301#define Q_MAX_SECTORS 128
302
303/* MAX NUMBER OF PHYS SEGMENTS (entries in the SG list)*/
304#define Q_MAX_SGS 16
305
306int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock)
307{
308 int ret;
309
310 DBGPRN_FUNC_NAME;
311
312 /* 1st param is a function that wakes up the queue thread */
313 bq->queue = blk_init_queue(cyasblkdev_request, lock);
314 if (!bq->queue)
315 return -ENOMEM;
316
317 blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request);
318
319 blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY);
320 blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS);
321
322 /* As of now, we have the HAL/driver support to
323 * merge scattered segments and handle them simultaneously.
324 * so, setting the max_phys_segments to 8. */
325 /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS);
326 blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/
327 blk_queue_max_segments(bq->queue, Q_MAX_SGS);
328
329 /* should be < then HAL can handle */
330 blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS);
331
332 bq->queue->queuedata = bq;
333 bq->req = NULL;
334
335 init_completion(&bq->thread_complete);
336 init_waitqueue_head(&bq->thread_wq);
45f4d024 337 sema_init(&bq->thread_sem, 1);
81eb669b
DC
338
339 ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
340 if (ret >= 0) {
341 /* wait until the thread is spawned */
342 wait_for_completion(&bq->thread_complete);
343
344 /* reinitialize the completion */
345 init_completion(&bq->thread_complete);
346 ret = 0;
347 goto out;
348 }
349
350out:
351 return ret;
352}
353EXPORT_SYMBOL(cyasblkdev_init_queue);
354
355/*called from blk_put() */
356void cyasblkdev_cleanup_queue(struct cyasblkdev_queue *bq)
357{
358 DBGPRN_FUNC_NAME;
359
360 bq->flags |= CYASBLKDEV_QUEUE_EXIT;
361 wake_up(&bq->thread_wq);
362 wait_for_completion(&bq->thread_complete);
363
364 blk_cleanup_queue(bq->queue);
365}
366EXPORT_SYMBOL(cyasblkdev_cleanup_queue);
367
368
369/**
370 * cyasblkdev_queue_suspend - suspend a CyAsBlkDev request queue
371 * @bq: CyAsBlkDev queue to suspend
372 *
373 * Stop the block request queue, and wait for our thread to
374 * complete any outstanding requests. This ensures that we
375 * won't suspend while a request is being processed.
376 */
377void cyasblkdev_queue_suspend(struct cyasblkdev_queue *bq)
378{
379 struct request_queue *q = bq->queue;
380 unsigned long flags;
381
382 DBGPRN_FUNC_NAME;
383
384 if (!(bq->flags & CYASBLKDEV_QUEUE_SUSPENDED)) {
385 bq->flags |= CYASBLKDEV_QUEUE_SUSPENDED;
386
387 spin_lock_irqsave(q->queue_lock, flags);
388 blk_stop_queue(q);
389 spin_unlock_irqrestore(q->queue_lock, flags);
390
391 down(&bq->thread_sem);
392 }
393}
394EXPORT_SYMBOL(cyasblkdev_queue_suspend);
395
396/*cyasblkdev_queue_resume - resume a previously suspended
397 * CyAsBlkDev request queue @bq: CyAsBlkDev queue to resume */
398void cyasblkdev_queue_resume(struct cyasblkdev_queue *bq)
399{
400 struct request_queue *q = bq->queue;
401 unsigned long flags;
402
403 DBGPRN_FUNC_NAME;
404
405 if (bq->flags & CYASBLKDEV_QUEUE_SUSPENDED) {
406 bq->flags &= ~CYASBLKDEV_QUEUE_SUSPENDED;
407
408 up(&bq->thread_sem);
409
410 spin_lock_irqsave(q->queue_lock, flags);
411 blk_start_queue(q);
412 spin_unlock_irqrestore(q->queue_lock, flags);
413 }
414}
415EXPORT_SYMBOL(cyasblkdev_queue_resume);
416
417/*[]*/