]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/btrfs/async-thread.c
Btrfs: fix async worker startup race
[net-next-2.6.git] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/freezer.h>
23 #include "async-thread.h"
24
25 #define WORK_QUEUED_BIT 0
26 #define WORK_DONE_BIT 1
27 #define WORK_ORDER_DONE_BIT 2
28 #define WORK_HIGH_PRIO_BIT 3
29
30 /*
31  * container for the kthread task pointer and the list of pending work
32  * One of these is allocated per thread.
33  */
34 struct btrfs_worker_thread {
35         /* pool we belong to */
36         struct btrfs_workers *workers;
37
38         /* list of struct btrfs_work that are waiting for service */
39         struct list_head pending;
40         struct list_head prio_pending;
41
42         /* list of worker threads from struct btrfs_workers */
43         struct list_head worker_list;
44
45         /* kthread */
46         struct task_struct *task;
47
48         /* number of things on the pending list */
49         atomic_t num_pending;
50
51         /* reference counter for this struct */
52         atomic_t refs;
53
54         unsigned long sequence;
55
56         /* protects the pending list. */
57         spinlock_t lock;
58
59         /* set to non-zero when this thread is already awake and kicking */
60         int working;
61
62         /* are we currently idle */
63         int idle;
64 };
65
66 /*
67  * helper function to move a thread onto the idle list after it
68  * has finished some requests.
69  */
70 static void check_idle_worker(struct btrfs_worker_thread *worker)
71 {
72         if (!worker->idle && atomic_read(&worker->num_pending) <
73             worker->workers->idle_thresh / 2) {
74                 unsigned long flags;
75                 spin_lock_irqsave(&worker->workers->lock, flags);
76                 worker->idle = 1;
77
78                 /* the list may be empty if the worker is just starting */
79                 if (!list_empty(&worker->worker_list)) {
80                         list_move(&worker->worker_list,
81                                  &worker->workers->idle_list);
82                 }
83                 spin_unlock_irqrestore(&worker->workers->lock, flags);
84         }
85 }
86
87 /*
88  * helper function to move a thread off the idle list after new
89  * pending work is added.
90  */
91 static void check_busy_worker(struct btrfs_worker_thread *worker)
92 {
93         if (worker->idle && atomic_read(&worker->num_pending) >=
94             worker->workers->idle_thresh) {
95                 unsigned long flags;
96                 spin_lock_irqsave(&worker->workers->lock, flags);
97                 worker->idle = 0;
98
99                 if (!list_empty(&worker->worker_list)) {
100                         list_move_tail(&worker->worker_list,
101                                       &worker->workers->worker_list);
102                 }
103                 spin_unlock_irqrestore(&worker->workers->lock, flags);
104         }
105 }
106
107 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
108 {
109         struct btrfs_workers *workers = worker->workers;
110         unsigned long flags;
111
112         rmb();
113         if (!workers->atomic_start_pending)
114                 return;
115
116         spin_lock_irqsave(&workers->lock, flags);
117         if (!workers->atomic_start_pending)
118                 goto out;
119
120         workers->atomic_start_pending = 0;
121         if (workers->num_workers >= workers->max_workers)
122                 goto out;
123
124         spin_unlock_irqrestore(&workers->lock, flags);
125         btrfs_start_workers(workers, 1);
126         return;
127
128 out:
129         spin_unlock_irqrestore(&workers->lock, flags);
130 }
131
132 static noinline int run_ordered_completions(struct btrfs_workers *workers,
133                                             struct btrfs_work *work)
134 {
135         if (!workers->ordered)
136                 return 0;
137
138         set_bit(WORK_DONE_BIT, &work->flags);
139
140         spin_lock(&workers->order_lock);
141
142         while (1) {
143                 if (!list_empty(&workers->prio_order_list)) {
144                         work = list_entry(workers->prio_order_list.next,
145                                           struct btrfs_work, order_list);
146                 } else if (!list_empty(&workers->order_list)) {
147                         work = list_entry(workers->order_list.next,
148                                           struct btrfs_work, order_list);
149                 } else {
150                         break;
151                 }
152                 if (!test_bit(WORK_DONE_BIT, &work->flags))
153                         break;
154
155                 /* we are going to call the ordered done function, but
156                  * we leave the work item on the list as a barrier so
157                  * that later work items that are done don't have their
158                  * functions called before this one returns
159                  */
160                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
161                         break;
162
163                 spin_unlock(&workers->order_lock);
164
165                 work->ordered_func(work);
166
167                 /* now take the lock again and call the freeing code */
168                 spin_lock(&workers->order_lock);
169                 list_del(&work->order_list);
170                 work->ordered_free(work);
171         }
172
173         spin_unlock(&workers->order_lock);
174         return 0;
175 }
176
177 static void put_worker(struct btrfs_worker_thread *worker)
178 {
179         if (atomic_dec_and_test(&worker->refs))
180                 kfree(worker);
181 }
182
183 static int try_worker_shutdown(struct btrfs_worker_thread *worker)
184 {
185         int freeit = 0;
186
187         spin_lock_irq(&worker->lock);
188         spin_lock_irq(&worker->workers->lock);
189         if (worker->workers->num_workers > 1 &&
190             worker->idle &&
191             !worker->working &&
192             !list_empty(&worker->worker_list) &&
193             list_empty(&worker->prio_pending) &&
194             list_empty(&worker->pending)) {
195                 freeit = 1;
196                 list_del_init(&worker->worker_list);
197                 worker->workers->num_workers--;
198         }
199         spin_unlock_irq(&worker->workers->lock);
200         spin_unlock_irq(&worker->lock);
201
202         if (freeit)
203                 put_worker(worker);
204         return freeit;
205 }
206
207 static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
208                                         struct list_head *prio_head,
209                                         struct list_head *head)
210 {
211         struct btrfs_work *work = NULL;
212         struct list_head *cur = NULL;
213
214         if(!list_empty(prio_head))
215                 cur = prio_head->next;
216
217         smp_mb();
218         if (!list_empty(&worker->prio_pending))
219                 goto refill;
220
221         if (!list_empty(head))
222                 cur = head->next;
223
224         if (cur)
225                 goto out;
226
227 refill:
228         spin_lock_irq(&worker->lock);
229         list_splice_tail_init(&worker->prio_pending, prio_head);
230         list_splice_tail_init(&worker->pending, head);
231
232         if (!list_empty(prio_head))
233                 cur = prio_head->next;
234         else if (!list_empty(head))
235                 cur = head->next;
236         spin_unlock_irq(&worker->lock);
237
238         if (!cur)
239                 goto out_fail;
240
241 out:
242         work = list_entry(cur, struct btrfs_work, list);
243
244 out_fail:
245         return work;
246 }
247
248 /*
249  * main loop for servicing work items
250  */
251 static int worker_loop(void *arg)
252 {
253         struct btrfs_worker_thread *worker = arg;
254         struct list_head head;
255         struct list_head prio_head;
256         struct btrfs_work *work;
257
258         INIT_LIST_HEAD(&head);
259         INIT_LIST_HEAD(&prio_head);
260
261         do {
262 again:
263                 while (1) {
264
265
266                         work = get_next_work(worker, &prio_head, &head);
267                         if (!work)
268                                 break;
269
270                         list_del(&work->list);
271                         clear_bit(WORK_QUEUED_BIT, &work->flags);
272
273                         work->worker = worker;
274
275                         work->func(work);
276
277                         atomic_dec(&worker->num_pending);
278                         /*
279                          * unless this is an ordered work queue,
280                          * 'work' was probably freed by func above.
281                          */
282                         run_ordered_completions(worker->workers, work);
283
284                         check_pending_worker_creates(worker);
285
286                 }
287
288                 spin_lock_irq(&worker->lock);
289                 check_idle_worker(worker);
290
291                 if (freezing(current)) {
292                         worker->working = 0;
293                         spin_unlock_irq(&worker->lock);
294                         refrigerator();
295                 } else {
296                         spin_unlock_irq(&worker->lock);
297                         if (!kthread_should_stop()) {
298                                 cpu_relax();
299                                 /*
300                                  * we've dropped the lock, did someone else
301                                  * jump_in?
302                                  */
303                                 smp_mb();
304                                 if (!list_empty(&worker->pending) ||
305                                     !list_empty(&worker->prio_pending))
306                                         continue;
307
308                                 /*
309                                  * this short schedule allows more work to
310                                  * come in without the queue functions
311                                  * needing to go through wake_up_process()
312                                  *
313                                  * worker->working is still 1, so nobody
314                                  * is going to try and wake us up
315                                  */
316                                 schedule_timeout(1);
317                                 smp_mb();
318                                 if (!list_empty(&worker->pending) ||
319                                     !list_empty(&worker->prio_pending))
320                                         continue;
321
322                                 if (kthread_should_stop())
323                                         break;
324
325                                 /* still no more work?, sleep for real */
326                                 spin_lock_irq(&worker->lock);
327                                 set_current_state(TASK_INTERRUPTIBLE);
328                                 if (!list_empty(&worker->pending) ||
329                                     !list_empty(&worker->prio_pending)) {
330                                         spin_unlock_irq(&worker->lock);
331                                         goto again;
332                                 }
333
334                                 /*
335                                  * this makes sure we get a wakeup when someone
336                                  * adds something new to the queue
337                                  */
338                                 worker->working = 0;
339                                 spin_unlock_irq(&worker->lock);
340
341                                 if (!kthread_should_stop()) {
342                                         schedule_timeout(HZ * 120);
343                                         if (!worker->working &&
344                                             try_worker_shutdown(worker)) {
345                                                 return 0;
346                                         }
347                                 }
348                         }
349                         __set_current_state(TASK_RUNNING);
350                 }
351         } while (!kthread_should_stop());
352         return 0;
353 }
354
355 /*
356  * this will wait for all the worker threads to shutdown
357  */
358 int btrfs_stop_workers(struct btrfs_workers *workers)
359 {
360         struct list_head *cur;
361         struct btrfs_worker_thread *worker;
362         int can_stop;
363
364         spin_lock_irq(&workers->lock);
365         list_splice_init(&workers->idle_list, &workers->worker_list);
366         while (!list_empty(&workers->worker_list)) {
367                 cur = workers->worker_list.next;
368                 worker = list_entry(cur, struct btrfs_worker_thread,
369                                     worker_list);
370
371                 atomic_inc(&worker->refs);
372                 workers->num_workers -= 1;
373                 if (!list_empty(&worker->worker_list)) {
374                         list_del_init(&worker->worker_list);
375                         put_worker(worker);
376                         can_stop = 1;
377                 } else
378                         can_stop = 0;
379                 spin_unlock_irq(&workers->lock);
380                 if (can_stop)
381                         kthread_stop(worker->task);
382                 spin_lock_irq(&workers->lock);
383                 put_worker(worker);
384         }
385         spin_unlock_irq(&workers->lock);
386         return 0;
387 }
388
389 /*
390  * simple init on struct btrfs_workers
391  */
392 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
393 {
394         workers->num_workers = 0;
395         INIT_LIST_HEAD(&workers->worker_list);
396         INIT_LIST_HEAD(&workers->idle_list);
397         INIT_LIST_HEAD(&workers->order_list);
398         INIT_LIST_HEAD(&workers->prio_order_list);
399         spin_lock_init(&workers->lock);
400         spin_lock_init(&workers->order_lock);
401         workers->max_workers = max;
402         workers->idle_thresh = 32;
403         workers->name = name;
404         workers->ordered = 0;
405         workers->atomic_start_pending = 0;
406         workers->atomic_worker_start = 0;
407 }
408
409 /*
410  * starts new worker threads.  This does not enforce the max worker
411  * count in case you need to temporarily go past it.
412  */
413 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
414 {
415         struct btrfs_worker_thread *worker;
416         int ret = 0;
417         int i;
418
419         for (i = 0; i < num_workers; i++) {
420                 worker = kzalloc(sizeof(*worker), GFP_NOFS);
421                 if (!worker) {
422                         ret = -ENOMEM;
423                         goto fail;
424                 }
425
426                 INIT_LIST_HEAD(&worker->pending);
427                 INIT_LIST_HEAD(&worker->prio_pending);
428                 INIT_LIST_HEAD(&worker->worker_list);
429                 spin_lock_init(&worker->lock);
430
431                 atomic_set(&worker->num_pending, 0);
432                 atomic_set(&worker->refs, 1);
433                 worker->workers = workers;
434                 worker->task = kthread_run(worker_loop, worker,
435                                            "btrfs-%s-%d", workers->name,
436                                            workers->num_workers + i);
437                 if (IS_ERR(worker->task)) {
438                         ret = PTR_ERR(worker->task);
439                         kfree(worker);
440                         goto fail;
441                 }
442                 spin_lock_irq(&workers->lock);
443                 list_add_tail(&worker->worker_list, &workers->idle_list);
444                 worker->idle = 1;
445                 workers->num_workers++;
446                 spin_unlock_irq(&workers->lock);
447         }
448         return 0;
449 fail:
450         btrfs_stop_workers(workers);
451         return ret;
452 }
453
454 /*
455  * run through the list and find a worker thread that doesn't have a lot
456  * to do right now.  This can return null if we aren't yet at the thread
457  * count limit and all of the threads are busy.
458  */
459 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
460 {
461         struct btrfs_worker_thread *worker;
462         struct list_head *next;
463         int enforce_min = workers->num_workers < workers->max_workers;
464
465         /*
466          * if we find an idle thread, don't move it to the end of the
467          * idle list.  This improves the chance that the next submission
468          * will reuse the same thread, and maybe catch it while it is still
469          * working
470          */
471         if (!list_empty(&workers->idle_list)) {
472                 next = workers->idle_list.next;
473                 worker = list_entry(next, struct btrfs_worker_thread,
474                                     worker_list);
475                 return worker;
476         }
477         if (enforce_min || list_empty(&workers->worker_list))
478                 return NULL;
479
480         /*
481          * if we pick a busy task, move the task to the end of the list.
482          * hopefully this will keep things somewhat evenly balanced.
483          * Do the move in batches based on the sequence number.  This groups
484          * requests submitted at roughly the same time onto the same worker.
485          */
486         next = workers->worker_list.next;
487         worker = list_entry(next, struct btrfs_worker_thread, worker_list);
488         atomic_inc(&worker->num_pending);
489         worker->sequence++;
490
491         if (worker->sequence % workers->idle_thresh == 0)
492                 list_move_tail(next, &workers->worker_list);
493         return worker;
494 }
495
496 /*
497  * selects a worker thread to take the next job.  This will either find
498  * an idle worker, start a new worker up to the max count, or just return
499  * one of the existing busy workers.
500  */
501 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
502 {
503         struct btrfs_worker_thread *worker;
504         unsigned long flags;
505         struct list_head *fallback;
506
507 again:
508         spin_lock_irqsave(&workers->lock, flags);
509         worker = next_worker(workers);
510
511         if (!worker) {
512                 if (workers->num_workers >= workers->max_workers) {
513                         goto fallback;
514                 } else if (workers->atomic_worker_start) {
515                         workers->atomic_start_pending = 1;
516                         goto fallback;
517                 } else {
518                         spin_unlock_irqrestore(&workers->lock, flags);
519                         /* we're below the limit, start another worker */
520                         btrfs_start_workers(workers, 1);
521                         goto again;
522                 }
523         }
524         spin_unlock_irqrestore(&workers->lock, flags);
525         return worker;
526
527 fallback:
528         fallback = NULL;
529         /*
530          * we have failed to find any workers, just
531          * return the first one we can find.
532          */
533         if (!list_empty(&workers->worker_list))
534                 fallback = workers->worker_list.next;
535         if (!list_empty(&workers->idle_list))
536                 fallback = workers->idle_list.next;
537         BUG_ON(!fallback);
538         worker = list_entry(fallback,
539                   struct btrfs_worker_thread, worker_list);
540         spin_unlock_irqrestore(&workers->lock, flags);
541         return worker;
542 }
543
544 /*
545  * btrfs_requeue_work just puts the work item back on the tail of the list
546  * it was taken from.  It is intended for use with long running work functions
547  * that make some progress and want to give the cpu up for others.
548  */
549 int btrfs_requeue_work(struct btrfs_work *work)
550 {
551         struct btrfs_worker_thread *worker = work->worker;
552         unsigned long flags;
553         int wake = 0;
554
555         if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
556                 goto out;
557
558         spin_lock_irqsave(&worker->lock, flags);
559         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
560                 list_add_tail(&work->list, &worker->prio_pending);
561         else
562                 list_add_tail(&work->list, &worker->pending);
563         atomic_inc(&worker->num_pending);
564
565         /* by definition we're busy, take ourselves off the idle
566          * list
567          */
568         if (worker->idle) {
569                 spin_lock(&worker->workers->lock);
570                 worker->idle = 0;
571                 list_move_tail(&worker->worker_list,
572                                &worker->workers->worker_list);
573                 spin_unlock(&worker->workers->lock);
574         }
575         if (!worker->working) {
576                 wake = 1;
577                 worker->working = 1;
578         }
579
580         if (wake)
581                 wake_up_process(worker->task);
582         spin_unlock_irqrestore(&worker->lock, flags);
583 out:
584
585         return 0;
586 }
587
588 void btrfs_set_work_high_prio(struct btrfs_work *work)
589 {
590         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
591 }
592
593 /*
594  * places a struct btrfs_work into the pending queue of one of the kthreads
595  */
596 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
597 {
598         struct btrfs_worker_thread *worker;
599         unsigned long flags;
600         int wake = 0;
601
602         /* don't requeue something already on a list */
603         if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
604                 goto out;
605
606         worker = find_worker(workers);
607         if (workers->ordered) {
608                 /*
609                  * you're not allowed to do ordered queues from an
610                  * interrupt handler
611                  */
612                 spin_lock(&workers->order_lock);
613                 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
614                         list_add_tail(&work->order_list,
615                                       &workers->prio_order_list);
616                 } else {
617                         list_add_tail(&work->order_list, &workers->order_list);
618                 }
619                 spin_unlock(&workers->order_lock);
620         } else {
621                 INIT_LIST_HEAD(&work->order_list);
622         }
623
624         spin_lock_irqsave(&worker->lock, flags);
625
626         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
627                 list_add_tail(&work->list, &worker->prio_pending);
628         else
629                 list_add_tail(&work->list, &worker->pending);
630         atomic_inc(&worker->num_pending);
631         check_busy_worker(worker);
632
633         /*
634          * avoid calling into wake_up_process if this thread has already
635          * been kicked
636          */
637         if (!worker->working)
638                 wake = 1;
639         worker->working = 1;
640
641         if (wake)
642                 wake_up_process(worker->task);
643         spin_unlock_irqrestore(&worker->lock, flags);
644
645 out:
646         return 0;
647 }