]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/lib/msr.c
x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus
[net-next-2.6.git] / arch / x86 / lib / msr.c
CommitLineData
b077ffb3
AD
1#include <linux/module.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
b077ffb3
AD
6struct msr_info {
7 u32 msr_no;
6bc1096d 8 struct msr reg;
b034c19f
BP
9 struct msr *msrs;
10 int off;
4e9baad8 11 int err;
b077ffb3
AD
12};
13
14static void __rdmsr_on_cpu(void *info)
15{
16 struct msr_info *rv = info;
b034c19f
BP
17 struct msr *reg;
18 int this_cpu = raw_smp_processor_id();
b077ffb3 19
b034c19f
BP
20 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off];
22 else
23 reg = &rv->reg;
24
25 rdmsr(rv->msr_no, reg->l, reg->h);
b077ffb3
AD
26}
27
bdd31461 28static void __wrmsr_on_cpu(void *info)
b077ffb3 29{
4e9baad8 30 struct msr_info *rv = info;
b034c19f
BP
31 struct msr *reg;
32 int this_cpu = raw_smp_processor_id();
33
34 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off];
36 else
37 reg = &rv->reg;
4e9baad8 38
b034c19f 39 wrmsr(rv->msr_no, reg->l, reg->h);
4e9baad8
RM
40}
41
bdd31461 42int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
4e9baad8 43{
bdd31461 44 int err;
5f1f935c
AK
45 struct msr_info rv;
46
b034c19f
BP
47 memset(&rv, 0, sizeof(rv));
48
5f1f935c 49 rv.msr_no = msr_no;
bdd31461 50 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
6bc1096d
BP
51 *l = rv.reg.l;
52 *h = rv.reg.h;
5f1f935c 53
4e9baad8 54 return err;
b077ffb3 55}
b034c19f 56EXPORT_SYMBOL(rdmsr_on_cpu);
b077ffb3 57
bdd31461
PA
58int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
59{
60 int err;
61 struct msr_info rv;
62
b034c19f
BP
63 memset(&rv, 0, sizeof(rv));
64
bdd31461 65 rv.msr_no = msr_no;
6bc1096d
BP
66 rv.reg.l = l;
67 rv.reg.h = h;
bdd31461
PA
68 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
69
70 return err;
71}
b034c19f
BP
72EXPORT_SYMBOL(wrmsr_on_cpu);
73
b8a47541
BP
74static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
75 struct msr *msrs,
76 void (*msr_func) (void *info))
b034c19f
BP
77{
78 struct msr_info rv;
79 int this_cpu;
80
81 memset(&rv, 0, sizeof(rv));
82
83 rv.off = cpumask_first(mask);
84 rv.msrs = msrs;
85 rv.msr_no = msr_no;
86
bab9a3da
BP
87 this_cpu = get_cpu();
88
89 if (cpumask_test_cpu(this_cpu, mask))
b8a47541 90 msr_func(&rv);
b034c19f 91
b8a47541 92 smp_call_function_many(mask, msr_func, &rv, 1);
bab9a3da 93 put_cpu();
b034c19f 94}
b8a47541
BP
95
96/* rdmsr on a bunch of CPUs
97 *
98 * @mask: which CPUs
99 * @msr_no: which MSR
100 * @msrs: array of MSR values
101 *
102 */
103void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
104{
105 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
106}
b034c19f
BP
107EXPORT_SYMBOL(rdmsr_on_cpus);
108
109/*
110 * wrmsr on a bunch of CPUs
111 *
112 * @mask: which CPUs
113 * @msr_no: which MSR
114 * @msrs: array of MSR values
115 *
116 */
b8a47541 117void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
b034c19f 118{
b8a47541 119 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
b034c19f
BP
120}
121EXPORT_SYMBOL(wrmsr_on_cpus);
bdd31461
PA
122
123/* These "safe" variants are slower and should be used when the target MSR
124 may not actually exist. */
125static void __rdmsr_safe_on_cpu(void *info)
b077ffb3
AD
126{
127 struct msr_info *rv = info;
128
6bc1096d 129 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
b077ffb3
AD
130}
131
4e9baad8 132static void __wrmsr_safe_on_cpu(void *info)
b077ffb3 133{
4e9baad8
RM
134 struct msr_info *rv = info;
135
6bc1096d 136 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
4e9baad8
RM
137}
138
bdd31461 139int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
4e9baad8 140{
bdd31461 141 int err;
5f1f935c
AK
142 struct msr_info rv;
143
b034c19f
BP
144 memset(&rv, 0, sizeof(rv));
145
5f1f935c 146 rv.msr_no = msr_no;
bdd31461 147 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
6bc1096d
BP
148 *l = rv.reg.l;
149 *h = rv.reg.h;
4e9baad8 150
bdd31461 151 return err ? err : rv.err;
4e9baad8 152}
b034c19f 153EXPORT_SYMBOL(rdmsr_safe_on_cpu);
4e9baad8 154
4e9baad8
RM
155int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
156{
bdd31461
PA
157 int err;
158 struct msr_info rv;
4e9baad8 159
b034c19f
BP
160 memset(&rv, 0, sizeof(rv));
161
bdd31461 162 rv.msr_no = msr_no;
6bc1096d
BP
163 rv.reg.l = l;
164 rv.reg.h = h;
bdd31461
PA
165 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
166
167 return err ? err : rv.err;
b077ffb3 168}
4e9baad8 169EXPORT_SYMBOL(wrmsr_safe_on_cpu);
8b956bf1
PA
170
171/*
172 * These variants are significantly slower, but allows control over
173 * the entire 32-bit GPR set.
174 */
175struct msr_regs_info {
176 u32 *regs;
177 int err;
178};
179
180static void __rdmsr_safe_regs_on_cpu(void *info)
181{
182 struct msr_regs_info *rv = info;
183
184 rv->err = rdmsr_safe_regs(rv->regs);
185}
186
187static void __wrmsr_safe_regs_on_cpu(void *info)
188{
189 struct msr_regs_info *rv = info;
190
191 rv->err = wrmsr_safe_regs(rv->regs);
192}
193
194int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
195{
196 int err;
197 struct msr_regs_info rv;
198
199 rv.regs = regs;
200 rv.err = -EIO;
201 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
202
203 return err ? err : rv.err;
204}
205EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
206
207int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
208{
209 int err;
210 struct msr_regs_info rv;
211
212 rv.regs = regs;
213 rv.err = -EIO;
214 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
215
216 return err ? err : rv.err;
217}
218EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);