]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/sunrpc/sched.c
SUNRPC: Move the bound cred to struct rpc_rqst
[net-next-2.6.git] / net / sunrpc / sched.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
cca5172a 7 *
1da177e4
LT
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
1da177e4 19#include <linux/spinlock.h>
4a3e2f71 20#include <linux/mutex.h>
1da177e4
LT
21
22#include <linux/sunrpc/clnt.h>
1da177e4 23
6951867b
BH
24#include "sunrpc.h"
25
1da177e4
LT
26#ifdef RPC_DEBUG
27#define RPCDBG_FACILITY RPCDBG_SCHED
1da177e4
LT
28#endif
29
30/*
31 * RPC slabs and memory pools
32 */
33#define RPC_BUFFER_MAXSIZE (2048)
34#define RPC_BUFFER_POOLSIZE (8)
35#define RPC_TASK_POOLSIZE (8)
e18b890b
CL
36static struct kmem_cache *rpc_task_slabp __read_mostly;
37static struct kmem_cache *rpc_buffer_slabp __read_mostly;
ba89966c
ED
38static mempool_t *rpc_task_mempool __read_mostly;
39static mempool_t *rpc_buffer_mempool __read_mostly;
1da177e4 40
65f27f38 41static void rpc_async_schedule(struct work_struct *);
bde8f00c 42static void rpc_release_task(struct rpc_task *task);
36df9aae 43static void __rpc_queue_timer_fn(unsigned long ptr);
1da177e4 44
1da177e4
LT
45/*
46 * RPC tasks sit here while waiting for conditions to improve.
47 */
a4a87499 48static struct rpc_wait_queue delay_queue;
1da177e4 49
1da177e4
LT
50/*
51 * rpciod-related stuff
52 */
24c5d9d7 53struct workqueue_struct *rpciod_workqueue;
1da177e4 54
1da177e4
LT
55/*
56 * Disable the timer for a given RPC task. Should be called with
57 * queue->lock and bh_disabled in order to avoid races within
58 * rpc_run_timer().
59 */
5d00837b 60static void
eb276c0e 61__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 62{
36df9aae
TM
63 if (task->tk_timeout == 0)
64 return;
46121cf7 65 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
1da177e4 66 task->tk_timeout = 0;
36df9aae 67 list_del(&task->u.tk_wait.timer_list);
eb276c0e
TM
68 if (list_empty(&queue->timer_list.list))
69 del_timer(&queue->timer_list.timer);
36df9aae
TM
70}
71
72static void
73rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
74{
75 queue->timer_list.expires = expires;
76 mod_timer(&queue->timer_list.timer, expires);
1da177e4
LT
77}
78
1da177e4
LT
79/*
80 * Set up a timer for the current task.
81 */
5d00837b 82static void
eb276c0e 83__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4
LT
84{
85 if (!task->tk_timeout)
86 return;
87
46121cf7 88 dprintk("RPC: %5u setting alarm for %lu ms\n",
1da177e4
LT
89 task->tk_pid, task->tk_timeout * 1000 / HZ);
90
eb276c0e
TM
91 task->u.tk_wait.expires = jiffies + task->tk_timeout;
92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
93 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
1da177e4
LT
95}
96
97/*
98 * Add new request to a priority queue.
99 */
100static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
101{
102 struct list_head *q;
103 struct rpc_task *t;
104
105 INIT_LIST_HEAD(&task->u.tk_wait.links);
106 q = &queue->tasks[task->tk_priority];
107 if (unlikely(task->tk_priority > queue->maxpriority))
108 q = &queue->tasks[queue->maxpriority];
109 list_for_each_entry(t, q, u.tk_wait.list) {
3ff7576d 110 if (t->tk_owner == task->tk_owner) {
1da177e4
LT
111 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
112 return;
113 }
114 }
115 list_add_tail(&task->u.tk_wait.list, q);
116}
117
118/*
119 * Add new request to wait queue.
120 *
121 * Swapper tasks always get inserted at the head of the queue.
122 * This should avoid many nasty memory deadlocks and hopefully
123 * improve overall performance.
124 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
125 */
126static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
127{
128 BUG_ON (RPC_IS_QUEUED(task));
129
130 if (RPC_IS_PRIORITY(queue))
131 __rpc_add_wait_queue_priority(queue, task);
132 else if (RPC_IS_SWAPPER(task))
133 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
134 else
135 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
96ef13b2 136 task->tk_waitqueue = queue;
e19b63da 137 queue->qlen++;
1da177e4
LT
138 rpc_set_queued(task);
139
46121cf7
CL
140 dprintk("RPC: %5u added to queue %p \"%s\"\n",
141 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
142}
143
144/*
145 * Remove request from a priority queue.
146 */
147static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
148{
149 struct rpc_task *t;
150
151 if (!list_empty(&task->u.tk_wait.links)) {
152 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
153 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
154 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
155 }
1da177e4
LT
156}
157
158/*
159 * Remove request from queue.
160 * Note: must be called with spin lock held.
161 */
96ef13b2 162static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 163{
eb276c0e 164 __rpc_disable_timer(queue, task);
1da177e4
LT
165 if (RPC_IS_PRIORITY(queue))
166 __rpc_remove_wait_queue_priority(task);
36df9aae 167 list_del(&task->u.tk_wait.list);
e19b63da 168 queue->qlen--;
46121cf7
CL
169 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
170 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
171}
172
173static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
174{
175 queue->priority = priority;
176 queue->count = 1 << (priority * 2);
177}
178
3ff7576d 179static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
1da177e4 180{
3ff7576d 181 queue->owner = pid;
1da177e4
LT
182 queue->nr = RPC_BATCH_COUNT;
183}
184
185static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
186{
187 rpc_set_waitqueue_priority(queue, queue->maxpriority);
3ff7576d 188 rpc_set_waitqueue_owner(queue, 0);
1da177e4
LT
189}
190
3ff7576d 191static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
1da177e4
LT
192{
193 int i;
194
195 spin_lock_init(&queue->lock);
196 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
197 INIT_LIST_HEAD(&queue->tasks[i]);
3ff7576d 198 queue->maxpriority = nr_queues - 1;
1da177e4 199 rpc_reset_waitqueue_priority(queue);
36df9aae
TM
200 queue->qlen = 0;
201 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
202 INIT_LIST_HEAD(&queue->timer_list.list);
1da177e4
LT
203#ifdef RPC_DEBUG
204 queue->name = qname;
205#endif
206}
207
208void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
209{
3ff7576d 210 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
1da177e4 211}
689cf5c1 212EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
1da177e4
LT
213
214void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
215{
3ff7576d 216 __rpc_init_priority_wait_queue(queue, qname, 1);
1da177e4 217}
e8914c65 218EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
1da177e4 219
f6a1cc89
TM
220void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
221{
36df9aae 222 del_timer_sync(&queue->timer_list.timer);
f6a1cc89
TM
223}
224EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
225
150030b7 226static int rpc_wait_bit_killable(void *word)
44c28873 227{
150030b7 228 if (fatal_signal_pending(current))
44c28873
TM
229 return -ERESTARTSYS;
230 schedule();
231 return 0;
232}
233
c44fe705
TM
234#ifdef RPC_DEBUG
235static void rpc_task_set_debuginfo(struct rpc_task *task)
236{
237 static atomic_t rpc_pid;
238
c44fe705
TM
239 task->tk_pid = atomic_inc_return(&rpc_pid);
240}
241#else
242static inline void rpc_task_set_debuginfo(struct rpc_task *task)
243{
244}
245#endif
246
e6b3c4db
TM
247static void rpc_set_active(struct rpc_task *task)
248{
c44fe705 249 rpc_task_set_debuginfo(task);
58f9612c 250 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
e6b3c4db
TM
251}
252
44c28873
TM
253/*
254 * Mark an RPC call as having completed by clearing the 'active' bit
255 */
e6b3c4db 256static void rpc_mark_complete_task(struct rpc_task *task)
44c28873 257{
e6b3c4db
TM
258 smp_mb__before_clear_bit();
259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 smp_mb__after_clear_bit();
44c28873
TM
261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
262}
263
264/*
265 * Allow callers to wait for completion of an RPC call
266 */
267int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
268{
269 if (action == NULL)
150030b7 270 action = rpc_wait_bit_killable;
44c28873 271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
150030b7 272 action, TASK_KILLABLE);
44c28873 273}
e8914c65 274EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
44c28873 275
1da177e4
LT
276/*
277 * Make an RPC task runnable.
278 *
cca5172a 279 * Note: If the task is ASYNC, this must be called with
1da177e4
LT
280 * the spinlock held to protect the wait queue operation.
281 */
282static void rpc_make_runnable(struct rpc_task *task)
283{
1da177e4 284 rpc_clear_queued(task);
cc4dc59e
CS
285 if (rpc_test_and_set_running(task))
286 return;
1da177e4
LT
287 if (RPC_IS_ASYNC(task)) {
288 int status;
289
65f27f38 290 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
32bfb5c0 291 status = queue_work(rpciod_workqueue, &task->u.tk_work);
1da177e4
LT
292 if (status < 0) {
293 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
294 task->tk_status = status;
295 return;
296 }
297 } else
96651ab3 298 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
1da177e4
LT
299}
300
1da177e4
LT
301/*
302 * Prepare for sleeping on a wait queue.
303 * By always appending tasks to the list we ensure FIFO behavior.
304 * NB: An RPC task will only receive interrupt-driven events as long
305 * as it's on a wait queue.
306 */
307static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
5d00837b 308 rpc_action action)
1da177e4 309{
46121cf7
CL
310 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
311 task->tk_pid, rpc_qname(q), jiffies);
1da177e4 312
1da177e4
LT
313 __rpc_add_wait_queue(q, task);
314
315 BUG_ON(task->tk_callback != NULL);
316 task->tk_callback = action;
eb276c0e 317 __rpc_add_timer(q, task);
1da177e4
LT
318}
319
320void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
5d00837b 321 rpc_action action)
1da177e4 322{
58f9612c
TM
323 /* We shouldn't ever put an inactive task to sleep */
324 BUG_ON(!RPC_IS_ACTIVATED(task));
e6b3c4db 325
1da177e4
LT
326 /*
327 * Protect the queue operations.
328 */
329 spin_lock_bh(&q->lock);
5d00837b 330 __rpc_sleep_on(q, task, action);
1da177e4
LT
331 spin_unlock_bh(&q->lock);
332}
e8914c65 333EXPORT_SYMBOL_GPL(rpc_sleep_on);
1da177e4
LT
334
335/**
336 * __rpc_do_wake_up_task - wake up a single rpc_task
96ef13b2 337 * @queue: wait queue
1da177e4
LT
338 * @task: task to be woken up
339 *
340 * Caller must hold queue->lock, and have cleared the task queued flag.
341 */
96ef13b2 342static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 343{
46121cf7
CL
344 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
345 task->tk_pid, jiffies);
1da177e4 346
1da177e4
LT
347 /* Has the task been executed yet? If not, we cannot wake it up! */
348 if (!RPC_IS_ACTIVATED(task)) {
349 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
350 return;
351 }
352
96ef13b2 353 __rpc_remove_wait_queue(queue, task);
1da177e4
LT
354
355 rpc_make_runnable(task);
356
46121cf7 357 dprintk("RPC: __rpc_wake_up_task done\n");
1da177e4
LT
358}
359
360/*
96ef13b2 361 * Wake up a queued task while the queue lock is being held
1da177e4 362 */
96ef13b2 363static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 364{
f5fb7b06
TM
365 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
366 __rpc_do_wake_up_task(queue, task);
1da177e4
LT
367}
368
48f18612
AB
369/*
370 * Tests whether rpc queue is empty
371 */
372int rpc_queue_empty(struct rpc_wait_queue *queue)
373{
374 int res;
375
376 spin_lock_bh(&queue->lock);
377 res = queue->qlen;
378 spin_unlock_bh(&queue->lock);
379 return (res == 0);
380}
381EXPORT_SYMBOL_GPL(rpc_queue_empty);
382
1da177e4 383/*
96ef13b2 384 * Wake up a task on a specific queue
1da177e4 385 */
96ef13b2 386void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 387{
5e4424af 388 spin_lock_bh(&queue->lock);
96ef13b2 389 rpc_wake_up_task_queue_locked(queue, task);
5e4424af 390 spin_unlock_bh(&queue->lock);
1da177e4 391}
96ef13b2
TM
392EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
393
1da177e4
LT
394/*
395 * Wake up the next task on a priority queue.
396 */
397static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
398{
399 struct list_head *q;
400 struct rpc_task *task;
401
402 /*
3ff7576d 403 * Service a batch of tasks from a single owner.
1da177e4
LT
404 */
405 q = &queue->tasks[queue->priority];
406 if (!list_empty(q)) {
407 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
3ff7576d 408 if (queue->owner == task->tk_owner) {
1da177e4
LT
409 if (--queue->nr)
410 goto out;
411 list_move_tail(&task->u.tk_wait.list, q);
412 }
413 /*
414 * Check if we need to switch queues.
415 */
416 if (--queue->count)
3ff7576d 417 goto new_owner;
1da177e4
LT
418 }
419
420 /*
421 * Service the next queue.
422 */
423 do {
424 if (q == &queue->tasks[0])
425 q = &queue->tasks[queue->maxpriority];
426 else
427 q = q - 1;
428 if (!list_empty(q)) {
429 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
430 goto new_queue;
431 }
432 } while (q != &queue->tasks[queue->priority]);
433
434 rpc_reset_waitqueue_priority(queue);
435 return NULL;
436
437new_queue:
438 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
3ff7576d
TM
439new_owner:
440 rpc_set_waitqueue_owner(queue, task->tk_owner);
1da177e4 441out:
96ef13b2 442 rpc_wake_up_task_queue_locked(queue, task);
1da177e4
LT
443 return task;
444}
445
446/*
447 * Wake up the next task on the wait queue.
448 */
449struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
450{
451 struct rpc_task *task = NULL;
452
46121cf7
CL
453 dprintk("RPC: wake_up_next(%p \"%s\")\n",
454 queue, rpc_qname(queue));
5e4424af 455 spin_lock_bh(&queue->lock);
1da177e4
LT
456 if (RPC_IS_PRIORITY(queue))
457 task = __rpc_wake_up_next_priority(queue);
458 else {
459 task_for_first(task, &queue->tasks[0])
96ef13b2 460 rpc_wake_up_task_queue_locked(queue, task);
1da177e4 461 }
5e4424af 462 spin_unlock_bh(&queue->lock);
1da177e4
LT
463
464 return task;
465}
e8914c65 466EXPORT_SYMBOL_GPL(rpc_wake_up_next);
1da177e4
LT
467
468/**
469 * rpc_wake_up - wake up all rpc_tasks
470 * @queue: rpc_wait_queue on which the tasks are sleeping
471 *
472 * Grabs queue->lock
473 */
474void rpc_wake_up(struct rpc_wait_queue *queue)
475{
e6d83d55 476 struct rpc_task *task, *next;
1da177e4 477 struct list_head *head;
e6d83d55 478
5e4424af 479 spin_lock_bh(&queue->lock);
1da177e4
LT
480 head = &queue->tasks[queue->maxpriority];
481 for (;;) {
e6d83d55 482 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
96ef13b2 483 rpc_wake_up_task_queue_locked(queue, task);
1da177e4
LT
484 if (head == &queue->tasks[0])
485 break;
486 head--;
487 }
5e4424af 488 spin_unlock_bh(&queue->lock);
1da177e4 489}
e8914c65 490EXPORT_SYMBOL_GPL(rpc_wake_up);
1da177e4
LT
491
492/**
493 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
494 * @queue: rpc_wait_queue on which the tasks are sleeping
495 * @status: status value to set
496 *
497 * Grabs queue->lock
498 */
499void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
500{
e6d83d55 501 struct rpc_task *task, *next;
1da177e4 502 struct list_head *head;
1da177e4 503
5e4424af 504 spin_lock_bh(&queue->lock);
1da177e4
LT
505 head = &queue->tasks[queue->maxpriority];
506 for (;;) {
e6d83d55 507 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
1da177e4 508 task->tk_status = status;
96ef13b2 509 rpc_wake_up_task_queue_locked(queue, task);
1da177e4
LT
510 }
511 if (head == &queue->tasks[0])
512 break;
513 head--;
514 }
5e4424af 515 spin_unlock_bh(&queue->lock);
1da177e4 516}
e8914c65 517EXPORT_SYMBOL_GPL(rpc_wake_up_status);
1da177e4 518
36df9aae
TM
519static void __rpc_queue_timer_fn(unsigned long ptr)
520{
521 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
522 struct rpc_task *task, *n;
523 unsigned long expires, now, timeo;
524
525 spin_lock(&queue->lock);
526 expires = now = jiffies;
527 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
528 timeo = task->u.tk_wait.expires;
529 if (time_after_eq(now, timeo)) {
36df9aae
TM
530 dprintk("RPC: %5u timeout\n", task->tk_pid);
531 task->tk_status = -ETIMEDOUT;
532 rpc_wake_up_task_queue_locked(queue, task);
533 continue;
534 }
535 if (expires == now || time_after(expires, timeo))
536 expires = timeo;
537 }
538 if (!list_empty(&queue->timer_list.list))
539 rpc_set_queue_timer(queue, expires);
540 spin_unlock(&queue->lock);
541}
542
8014793b
TM
543static void __rpc_atrun(struct rpc_task *task)
544{
5d00837b 545 task->tk_status = 0;
8014793b
TM
546}
547
1da177e4
LT
548/*
549 * Run a task at a later time
550 */
8014793b 551void rpc_delay(struct rpc_task *task, unsigned long delay)
1da177e4
LT
552{
553 task->tk_timeout = delay;
5d00837b 554 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
1da177e4 555}
e8914c65 556EXPORT_SYMBOL_GPL(rpc_delay);
1da177e4 557
4ce70ada
TM
558/*
559 * Helper to call task->tk_ops->rpc_call_prepare
560 */
aae2006e 561void rpc_prepare_task(struct rpc_task *task)
4ce70ada
TM
562{
563 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
564}
565
d05fdb0c 566/*
963d8fe5 567 * Helper that calls task->tk_ops->rpc_call_done if it exists
d05fdb0c 568 */
abbcf28f 569void rpc_exit_task(struct rpc_task *task)
d05fdb0c 570{
abbcf28f 571 task->tk_action = NULL;
963d8fe5
TM
572 if (task->tk_ops->rpc_call_done != NULL) {
573 task->tk_ops->rpc_call_done(task, task->tk_calldata);
d05fdb0c 574 if (task->tk_action != NULL) {
abbcf28f
TM
575 WARN_ON(RPC_ASSASSINATED(task));
576 /* Always release the RPC slot and buffer memory */
577 xprt_release(task);
d05fdb0c
TM
578 }
579 }
d05fdb0c 580}
d9b6cd94
TM
581
582void rpc_exit(struct rpc_task *task, int status)
583{
584 task->tk_status = status;
585 task->tk_action = rpc_exit_task;
586 if (RPC_IS_QUEUED(task))
587 rpc_wake_up_queued_task(task->tk_waitqueue, task);
588}
589EXPORT_SYMBOL_GPL(rpc_exit);
d05fdb0c 590
bbd5a1f9
TM
591void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
592{
a86dc496 593 if (ops->rpc_release != NULL)
bbd5a1f9 594 ops->rpc_release(calldata);
bbd5a1f9
TM
595}
596
1da177e4
LT
597/*
598 * This is the RPC `scheduler' (or rather, the finite state machine).
599 */
2efef837 600static void __rpc_execute(struct rpc_task *task)
1da177e4 601{
eb9b55ab
TM
602 struct rpc_wait_queue *queue;
603 int task_is_async = RPC_IS_ASYNC(task);
604 int status = 0;
1da177e4 605
46121cf7
CL
606 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
607 task->tk_pid, task->tk_flags);
1da177e4
LT
608
609 BUG_ON(RPC_IS_QUEUED(task));
610
d05fdb0c 611 for (;;) {
1da177e4
LT
612
613 /*
614 * Execute any pending callback.
615 */
a486aeda 616 if (task->tk_callback) {
1da177e4 617 void (*save_callback)(struct rpc_task *);
cca5172a
YH
618
619 /*
a486aeda
BF
620 * We set tk_callback to NULL before calling it,
621 * in case it sets the tk_callback field itself:
1da177e4 622 */
a486aeda
BF
623 save_callback = task->tk_callback;
624 task->tk_callback = NULL;
1da177e4 625 save_callback(task);
1da177e4
LT
626 }
627
628 /*
629 * Perform the next FSM step.
630 * tk_action may be NULL when the task has been killed
631 * by someone else.
632 */
633 if (!RPC_IS_QUEUED(task)) {
abbcf28f 634 if (task->tk_action == NULL)
1da177e4 635 break;
abbcf28f 636 task->tk_action(task);
1da177e4
LT
637 }
638
639 /*
640 * Lockless check for whether task is sleeping or not.
641 */
642 if (!RPC_IS_QUEUED(task))
643 continue;
eb9b55ab
TM
644 /*
645 * The queue->lock protects against races with
646 * rpc_make_runnable().
647 *
648 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
649 * rpc_task, rpc_make_runnable() can assign it to a
650 * different workqueue. We therefore cannot assume that the
651 * rpc_task pointer may still be dereferenced.
652 */
653 queue = task->tk_waitqueue;
654 spin_lock_bh(&queue->lock);
655 if (!RPC_IS_QUEUED(task)) {
656 spin_unlock_bh(&queue->lock);
1da177e4
LT
657 continue;
658 }
eb9b55ab
TM
659 rpc_clear_running(task);
660 spin_unlock_bh(&queue->lock);
661 if (task_is_async)
662 return;
1da177e4
LT
663
664 /* sync task: sleep here */
46121cf7 665 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
96651ab3 666 status = out_of_line_wait_on_bit(&task->tk_runstate,
150030b7
MW
667 RPC_TASK_QUEUED, rpc_wait_bit_killable,
668 TASK_KILLABLE);
96651ab3 669 if (status == -ERESTARTSYS) {
1da177e4
LT
670 /*
671 * When a sync task receives a signal, it exits with
672 * -ERESTARTSYS. In order to catch any callbacks that
673 * clean up after sleeping on some queue, we don't
674 * break the loop here, but go around once more.
675 */
46121cf7 676 dprintk("RPC: %5u got signal\n", task->tk_pid);
96651ab3
TM
677 task->tk_flags |= RPC_TASK_KILLED;
678 rpc_exit(task, -ERESTARTSYS);
1da177e4
LT
679 }
680 rpc_set_running(task);
46121cf7 681 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
1da177e4
LT
682 }
683
46121cf7
CL
684 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
685 task->tk_status);
1da177e4
LT
686 /* Release all resources associated with the task */
687 rpc_release_task(task);
1da177e4
LT
688}
689
690/*
691 * User-visible entry point to the scheduler.
692 *
693 * This may be called recursively if e.g. an async NFS task updates
694 * the attributes and finds that dirty pages must be flushed.
695 * NOTE: Upon exit of this function the task is guaranteed to be
696 * released. In particular note that tk_release() will have
697 * been called, so your task memory may have been freed.
698 */
2efef837 699void rpc_execute(struct rpc_task *task)
1da177e4 700{
44c28873 701 rpc_set_active(task);
1da177e4 702 rpc_set_running(task);
2efef837 703 __rpc_execute(task);
1da177e4
LT
704}
705
65f27f38 706static void rpc_async_schedule(struct work_struct *work)
1da177e4 707{
65f27f38 708 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
1da177e4
LT
709}
710
02107148
CL
711/**
712 * rpc_malloc - allocate an RPC buffer
713 * @task: RPC task that will use this buffer
714 * @size: requested byte size
1da177e4 715 *
c5a4dd8b
CL
716 * To prevent rpciod from hanging, this allocator never sleeps,
717 * returning NULL if the request cannot be serviced immediately.
718 * The caller can arrange to sleep in a way that is safe for rpciod.
719 *
720 * Most requests are 'small' (under 2KiB) and can be serviced from a
721 * mempool, ensuring that NFS reads and writes can always proceed,
722 * and that there is good locality of reference for these buffers.
723 *
1da177e4 724 * In order to avoid memory starvation triggering more writebacks of
c5a4dd8b 725 * NFS requests, we avoid using GFP_KERNEL.
1da177e4 726 */
c5a4dd8b 727void *rpc_malloc(struct rpc_task *task, size_t size)
1da177e4 728{
aa3d1fae 729 struct rpc_buffer *buf;
c5a4dd8b 730 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
1da177e4 731
aa3d1fae 732 size += sizeof(struct rpc_buffer);
c5a4dd8b
CL
733 if (size <= RPC_BUFFER_MAXSIZE)
734 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1da177e4 735 else
c5a4dd8b 736 buf = kmalloc(size, gfp);
ddce40df
PZ
737
738 if (!buf)
739 return NULL;
740
aa3d1fae 741 buf->len = size;
215d0678 742 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
c5a4dd8b 743 task->tk_pid, size, buf);
aa3d1fae 744 return &buf->data;
1da177e4 745}
12444809 746EXPORT_SYMBOL_GPL(rpc_malloc);
1da177e4 747
02107148
CL
748/**
749 * rpc_free - free buffer allocated via rpc_malloc
c5a4dd8b 750 * @buffer: buffer to free
02107148
CL
751 *
752 */
c5a4dd8b 753void rpc_free(void *buffer)
1da177e4 754{
aa3d1fae
CL
755 size_t size;
756 struct rpc_buffer *buf;
02107148 757
c5a4dd8b
CL
758 if (!buffer)
759 return;
aa3d1fae
CL
760
761 buf = container_of(buffer, struct rpc_buffer, data);
762 size = buf->len;
c5a4dd8b 763
215d0678 764 dprintk("RPC: freeing buffer of size %zu at %p\n",
c5a4dd8b 765 size, buf);
aa3d1fae 766
c5a4dd8b
CL
767 if (size <= RPC_BUFFER_MAXSIZE)
768 mempool_free(buf, rpc_buffer_mempool);
769 else
770 kfree(buf);
1da177e4 771}
12444809 772EXPORT_SYMBOL_GPL(rpc_free);
1da177e4
LT
773
774/*
775 * Creation and deletion of RPC task structures
776 */
47fe0648 777static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1da177e4
LT
778{
779 memset(task, 0, sizeof(*task));
44c28873 780 atomic_set(&task->tk_count, 1);
84115e1c
TM
781 task->tk_flags = task_setup_data->flags;
782 task->tk_ops = task_setup_data->callback_ops;
783 task->tk_calldata = task_setup_data->callback_data;
6529eba0 784 INIT_LIST_HEAD(&task->tk_task);
1da177e4
LT
785
786 /* Initialize retry counters */
787 task->tk_garb_retry = 2;
788 task->tk_cred_retry = 2;
789
3ff7576d
TM
790 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
791 task->tk_owner = current->tgid;
1da177e4
LT
792
793 /* Initialize workqueue for async tasks */
32bfb5c0 794 task->tk_workqueue = task_setup_data->workqueue;
1da177e4 795
84115e1c
TM
796 if (task->tk_ops->rpc_call_prepare != NULL)
797 task->tk_action = rpc_prepare_task;
963d8fe5 798
ef759a2e 799 /* starting timestamp */
ff839970 800 task->tk_start = ktime_get();
ef759a2e 801
46121cf7 802 dprintk("RPC: new task initialized, procpid %u\n",
ba25f9dc 803 task_pid_nr(current));
1da177e4
LT
804}
805
806static struct rpc_task *
807rpc_alloc_task(void)
808{
809 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
810}
811
1da177e4 812/*
90c5755f 813 * Create a new task for the specified client.
1da177e4 814 */
84115e1c 815struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1da177e4 816{
e8f5d77c
TM
817 struct rpc_task *task = setup_data->task;
818 unsigned short flags = 0;
819
820 if (task == NULL) {
821 task = rpc_alloc_task();
19445b99
TM
822 if (task == NULL) {
823 rpc_release_calldata(setup_data->callback_ops,
824 setup_data->callback_data);
825 return ERR_PTR(-ENOMEM);
826 }
e8f5d77c
TM
827 flags = RPC_TASK_DYNAMIC;
828 }
1da177e4 829
84115e1c 830 rpc_init_task(task, setup_data);
19445b99
TM
831 if (task->tk_status < 0) {
832 int err = task->tk_status;
833 rpc_put_task(task);
834 return ERR_PTR(err);
835 }
1da177e4 836
e8f5d77c 837 task->tk_flags |= flags;
46121cf7 838 dprintk("RPC: allocated task %p\n", task);
1da177e4 839 return task;
1da177e4
LT
840}
841
32bfb5c0 842static void rpc_free_task(struct rpc_task *task)
1da177e4 843{
963d8fe5
TM
844 const struct rpc_call_ops *tk_ops = task->tk_ops;
845 void *calldata = task->tk_calldata;
1da177e4 846
5e4424af
TM
847 if (task->tk_flags & RPC_TASK_DYNAMIC) {
848 dprintk("RPC: %5u freeing task\n", task->tk_pid);
849 mempool_free(task, rpc_task_mempool);
850 }
32bfb5c0
TM
851 rpc_release_calldata(tk_ops, calldata);
852}
853
854static void rpc_async_release(struct work_struct *work)
855{
856 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
857}
858
859void rpc_put_task(struct rpc_task *task)
860{
e6b3c4db
TM
861 if (!atomic_dec_and_test(&task->tk_count))
862 return;
863 /* Release resources */
864 if (task->tk_rqstp)
865 xprt_release(task);
866 if (task->tk_msg.rpc_cred)
a17c2153 867 put_rpccred(task->tk_msg.rpc_cred);
58f9612c 868 rpc_task_release_client(task);
32bfb5c0
TM
869 if (task->tk_workqueue != NULL) {
870 INIT_WORK(&task->u.tk_work, rpc_async_release);
871 queue_work(task->tk_workqueue, &task->u.tk_work);
872 } else
873 rpc_free_task(task);
e6b3c4db 874}
e8914c65 875EXPORT_SYMBOL_GPL(rpc_put_task);
e6b3c4db 876
bde8f00c 877static void rpc_release_task(struct rpc_task *task)
e6b3c4db 878{
46121cf7 879 dprintk("RPC: %5u release task\n", task->tk_pid);
1da177e4 880
1da177e4 881 BUG_ON (RPC_IS_QUEUED(task));
1da177e4 882
e6b3c4db
TM
883 /* Wake up anyone who is waiting for task completion */
884 rpc_mark_complete_task(task);
885
886 rpc_put_task(task);
1da177e4
LT
887}
888
b247bbf1
TM
889int rpciod_up(void)
890{
891 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
892}
893
894void rpciod_down(void)
895{
896 module_put(THIS_MODULE);
897}
898
1da177e4 899/*
b247bbf1 900 * Start up the rpciod workqueue.
1da177e4 901 */
b247bbf1 902static int rpciod_start(void)
1da177e4
LT
903{
904 struct workqueue_struct *wq;
ab418d70 905
1da177e4
LT
906 /*
907 * Create the rpciod thread and wait for it to start.
908 */
ab418d70 909 dprintk("RPC: creating workqueue rpciod\n");
1da177e4 910 wq = create_workqueue("rpciod");
1da177e4 911 rpciod_workqueue = wq;
b247bbf1 912 return rpciod_workqueue != NULL;
1da177e4
LT
913}
914
b247bbf1 915static void rpciod_stop(void)
1da177e4 916{
b247bbf1 917 struct workqueue_struct *wq = NULL;
ab418d70 918
b247bbf1
TM
919 if (rpciod_workqueue == NULL)
920 return;
ab418d70 921 dprintk("RPC: destroying workqueue rpciod\n");
1da177e4 922
b247bbf1
TM
923 wq = rpciod_workqueue;
924 rpciod_workqueue = NULL;
925 destroy_workqueue(wq);
1da177e4
LT
926}
927
1da177e4
LT
928void
929rpc_destroy_mempool(void)
930{
b247bbf1 931 rpciod_stop();
1da177e4
LT
932 if (rpc_buffer_mempool)
933 mempool_destroy(rpc_buffer_mempool);
934 if (rpc_task_mempool)
935 mempool_destroy(rpc_task_mempool);
1a1d92c1
AD
936 if (rpc_task_slabp)
937 kmem_cache_destroy(rpc_task_slabp);
938 if (rpc_buffer_slabp)
939 kmem_cache_destroy(rpc_buffer_slabp);
f6a1cc89 940 rpc_destroy_wait_queue(&delay_queue);
1da177e4
LT
941}
942
943int
944rpc_init_mempool(void)
945{
f6a1cc89
TM
946 /*
947 * The following is not strictly a mempool initialisation,
948 * but there is no harm in doing it here
949 */
950 rpc_init_wait_queue(&delay_queue, "delayq");
951 if (!rpciod_start())
952 goto err_nomem;
953
1da177e4
LT
954 rpc_task_slabp = kmem_cache_create("rpc_tasks",
955 sizeof(struct rpc_task),
956 0, SLAB_HWCACHE_ALIGN,
20c2df83 957 NULL);
1da177e4
LT
958 if (!rpc_task_slabp)
959 goto err_nomem;
960 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
961 RPC_BUFFER_MAXSIZE,
962 0, SLAB_HWCACHE_ALIGN,
20c2df83 963 NULL);
1da177e4
LT
964 if (!rpc_buffer_slabp)
965 goto err_nomem;
93d2341c
MD
966 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
967 rpc_task_slabp);
1da177e4
LT
968 if (!rpc_task_mempool)
969 goto err_nomem;
93d2341c
MD
970 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
971 rpc_buffer_slabp);
1da177e4
LT
972 if (!rpc_buffer_mempool)
973 goto err_nomem;
974 return 0;
975err_nomem:
976 rpc_destroy_mempool();
977 return -ENOMEM;
978}