]> bbs.cooldavid.org Git - net-next-2.6.git/blame - block/blk-softirq.c
block: split softirq handling into blk-softirq.c
[net-next-2.6.git] / block / blk-softirq.c
CommitLineData
b646fc59
JA
1/*
2 * Functions related to softirq rq completions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11
12#include "blk.h"
13
14static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
15
16static int __cpuinit blk_cpu_notify(struct notifier_block *self,
17 unsigned long action, void *hcpu)
18{
19 /*
20 * If a CPU goes away, splice its entries to the current CPU
21 * and trigger a run of the softirq
22 */
23 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
24 int cpu = (unsigned long) hcpu;
25
26 local_irq_disable();
27 list_splice_init(&per_cpu(blk_cpu_done, cpu),
28 &__get_cpu_var(blk_cpu_done));
29 raise_softirq_irqoff(BLOCK_SOFTIRQ);
30 local_irq_enable();
31 }
32
33 return NOTIFY_OK;
34}
35
36
37static struct notifier_block blk_cpu_notifier __cpuinitdata = {
38 .notifier_call = blk_cpu_notify,
39};
40
41/*
42 * splice the completion data to a local structure and hand off to
43 * process_completion_queue() to complete the requests
44 */
45static void blk_done_softirq(struct softirq_action *h)
46{
47 struct list_head *cpu_list, local_list;
48
49 local_irq_disable();
50 cpu_list = &__get_cpu_var(blk_cpu_done);
51 list_replace_init(cpu_list, &local_list);
52 local_irq_enable();
53
54 while (!list_empty(&local_list)) {
55 struct request *rq;
56
57 rq = list_entry(local_list.next, struct request, donelist);
58 list_del_init(&rq->donelist);
59 rq->q->softirq_done_fn(rq);
60 }
61}
62
63/**
64 * blk_complete_request - end I/O on a request
65 * @req: the request being processed
66 *
67 * Description:
68 * Ends all I/O on a request. It does not handle partial completions,
69 * unless the driver actually implements this in its completion callback
70 * through requeueing. The actual completion happens out-of-order,
71 * through a softirq handler. The user must have registered a completion
72 * callback through blk_queue_softirq_done().
73 **/
74
75void blk_complete_request(struct request *req)
76{
77 struct list_head *cpu_list;
78 unsigned long flags;
79
80 BUG_ON(!req->q->softirq_done_fn);
81
82 local_irq_save(flags);
83
84 cpu_list = &__get_cpu_var(blk_cpu_done);
85 list_add_tail(&req->donelist, cpu_list);
86 raise_softirq_irqoff(BLOCK_SOFTIRQ);
87
88 local_irq_restore(flags);
89}
90EXPORT_SYMBOL(blk_complete_request);
91
92int __init blk_softirq_init(void)
93{
94 int i;
95
96 for_each_possible_cpu(i)
97 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
98
99 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
100 register_hotcpu_notifier(&blk_cpu_notifier);
101 return 0;
102}
103subsys_initcall(blk_softirq_init);