]> bbs.cooldavid.org Git - net-next-2.6.git/blame - block/blk-softirq.c
xps: Transmit Packet Steering
[net-next-2.6.git] / block / blk-softirq.c
CommitLineData
b646fc59
JA
1/*
2 * Functions related to softirq rq completions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11
12#include "blk.h"
13
14static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
15
c7c22e4d
JA
16/*
17 * Softirq action handler - move entries to local list and loop over them
18 * while passing them to the queue registered handler.
19 */
20static void blk_done_softirq(struct softirq_action *h)
21{
22 struct list_head *cpu_list, local_list;
23
24 local_irq_disable();
25 cpu_list = &__get_cpu_var(blk_cpu_done);
26 list_replace_init(cpu_list, &local_list);
27 local_irq_enable();
28
29 while (!list_empty(&local_list)) {
30 struct request *rq;
31
32 rq = list_entry(local_list.next, struct request, csd.list);
33 list_del_init(&rq->csd.list);
34 rq->q->softirq_done_fn(rq);
35 }
36}
37
38#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
39static void trigger_softirq(void *data)
40{
41 struct request *rq = data;
42 unsigned long flags;
43 struct list_head *list;
44
45 local_irq_save(flags);
46 list = &__get_cpu_var(blk_cpu_done);
47 list_add_tail(&rq->csd.list, list);
48
49 if (list->next == &rq->csd.list)
50 raise_softirq_irqoff(BLOCK_SOFTIRQ);
51
52 local_irq_restore(flags);
53}
54
55/*
56 * Setup and invoke a run of 'trigger_softirq' on the given cpu.
57 */
58static int raise_blk_irq(int cpu, struct request *rq)
59{
60 if (cpu_online(cpu)) {
61 struct call_single_data *data = &rq->csd;
62
63 data->func = trigger_softirq;
64 data->info = rq;
65 data->flags = 0;
66
6e275637 67 __smp_call_function_single(cpu, data, 0);
c7c22e4d
JA
68 return 0;
69 }
70
71 return 1;
72}
73#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
74static int raise_blk_irq(int cpu, struct request *rq)
75{
76 return 1;
77}
78#endif
79
b646fc59
JA
80static int __cpuinit blk_cpu_notify(struct notifier_block *self,
81 unsigned long action, void *hcpu)
82{
83 /*
84 * If a CPU goes away, splice its entries to the current CPU
85 * and trigger a run of the softirq
86 */
87 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
88 int cpu = (unsigned long) hcpu;
89
90 local_irq_disable();
91 list_splice_init(&per_cpu(blk_cpu_done, cpu),
92 &__get_cpu_var(blk_cpu_done));
93 raise_softirq_irqoff(BLOCK_SOFTIRQ);
94 local_irq_enable();
95 }
96
97 return NOTIFY_OK;
98}
99
c7c22e4d 100static struct notifier_block __cpuinitdata blk_cpu_notifier = {
b646fc59
JA
101 .notifier_call = blk_cpu_notify,
102};
103
242f9dcb 104void __blk_complete_request(struct request *req)
b646fc59 105{
c7c22e4d 106 struct request_queue *q = req->q;
b646fc59 107 unsigned long flags;
c7c22e4d 108 int ccpu, cpu, group_cpu;
b646fc59 109
c7c22e4d 110 BUG_ON(!q->softirq_done_fn);
b646fc59
JA
111
112 local_irq_save(flags);
c7c22e4d
JA
113 cpu = smp_processor_id();
114 group_cpu = blk_cpu_to_group(cpu);
b646fc59 115
c7c22e4d
JA
116 /*
117 * Select completion CPU
118 */
119 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
120 ccpu = req->cpu;
121 else
122 ccpu = cpu;
123
124 if (ccpu == cpu || ccpu == group_cpu) {
125 struct list_head *list;
126do_local:
127 list = &__get_cpu_var(blk_cpu_done);
128 list_add_tail(&req->csd.list, list);
129
130 /*
131 * if the list only contains our just added request,
132 * signal a raise of the softirq. If there are already
133 * entries there, someone already raised the irq but it
134 * hasn't run yet.
135 */
136 if (list->next == &req->csd.list)
137 raise_softirq_irqoff(BLOCK_SOFTIRQ);
138 } else if (raise_blk_irq(ccpu, req))
139 goto do_local;
b646fc59
JA
140
141 local_irq_restore(flags);
142}
242f9dcb
JA
143
144/**
145 * blk_complete_request - end I/O on a request
146 * @req: the request being processed
147 *
148 * Description:
149 * Ends all I/O on a request. It does not handle partial completions,
150 * unless the driver actually implements this in its completion callback
151 * through requeueing. The actual completion happens out-of-order,
152 * through a softirq handler. The user must have registered a completion
153 * callback through blk_queue_softirq_done().
154 **/
155void blk_complete_request(struct request *req)
156{
581d4e28
JA
157 if (unlikely(blk_should_fake_timeout(req->q)))
158 return;
242f9dcb
JA
159 if (!blk_mark_rq_complete(req))
160 __blk_complete_request(req);
161}
b646fc59
JA
162EXPORT_SYMBOL(blk_complete_request);
163
3c18ce71 164static __init int blk_softirq_init(void)
b646fc59
JA
165{
166 int i;
167
168 for_each_possible_cpu(i)
169 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
170
171 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
172 register_hotcpu_notifier(&blk_cpu_notifier);
173 return 0;
174}
175subsys_initcall(blk_softirq_init);