]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/powerpc/platforms/pseries/hvCall.S
powerpc: tracing: Add hypervisor call tracepoints
[net-next-2.6.git] / arch / powerpc / platforms / pseries / hvCall.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * This file contains the generic code to perform a call to the
3 * pSeries LPAR hypervisor.
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#include <asm/hvcall.h>
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
57852a85 13#include <asm/asm-offsets.h>
1da177e4
LT
14
15#define STK_PARM(i) (48 + ((i)-3)*8)
16
c8cd093a
AB
17#ifdef CONFIG_TRACEPOINTS
18
19 .section ".toc","aw"
20
21 .globl hcall_tracepoint_refcount
22hcall_tracepoint_refcount:
23 .llong 0
24
25 .section ".text"
26
57852a85
MK
27/*
28 * precall must preserve all registers. use unused STK_PARM()
c8cd093a
AB
29 * areas to save snapshots and opcode. We branch around this
30 * in early init (eg when populating the MMU hashtable) by using an
31 * unconditional cpu feature.
57852a85
MK
32 */
33#define HCALL_INST_PRECALL \
57852a85 34BEGIN_FTR_SECTION; \
c8cd093a
AB
35 b 1f; \
36END_FTR_SECTION(0, 1); \
37 ld r12,hcall_tracepoint_refcount@toc(r2); \
38 cmpdi r12,0; \
39 beq+ 1f; \
40 mflr r0; \
41 std r3,STK_PARM(r3)(r1); \
42 std r4,STK_PARM(r4)(r1); \
43 std r5,STK_PARM(r5)(r1); \
44 std r6,STK_PARM(r6)(r1); \
45 std r7,STK_PARM(r7)(r1); \
46 std r8,STK_PARM(r8)(r1); \
47 std r9,STK_PARM(r9)(r1); \
48 std r10,STK_PARM(r10)(r1); \
49 std r0,16(r1); \
50 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
51 bl .__trace_hcall_entry; \
52 addi r1,r1,STACK_FRAME_OVERHEAD; \
53 ld r0,16(r1); \
54 ld r3,STK_PARM(r3)(r1); \
55 ld r4,STK_PARM(r4)(r1); \
56 ld r5,STK_PARM(r5)(r1); \
57 ld r6,STK_PARM(r6)(r1); \
58 ld r7,STK_PARM(r7)(r1); \
59 ld r8,STK_PARM(r8)(r1); \
60 ld r9,STK_PARM(r9)(r1); \
61 ld r10,STK_PARM(r10)(r1); \
62 mtlr r0; \
631:
64
57852a85
MK
65/*
66 * postcall is performed immediately before function return which
4f5fa2fb
AB
67 * allows liberal use of volatile registers. We branch around this
68 * in early init (eg when populating the MMU hashtable) by using an
69 * unconditional cpu feature.
57852a85
MK
70 */
71#define HCALL_INST_POSTCALL \
4f5fa2fb
AB
72BEGIN_FTR_SECTION; \
73 b 1f; \
74END_FTR_SECTION(0, 1); \
c8cd093a
AB
75 ld r12,hcall_tracepoint_refcount@toc(r2); \
76 cmpdi r12,0; \
77 beq+ 1f; \
78 mflr r0; \
79 ld r6,STK_PARM(r3)(r1); \
80 std r3,STK_PARM(r3)(r1); \
81 mr r4,r3; \
82 mr r3,r6; \
83 std r0,16(r1); \
84 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
85 bl .__trace_hcall_exit; \
86 addi r1,r1,STACK_FRAME_OVERHEAD; \
87 ld r0,16(r1); \
88 ld r3,STK_PARM(r3)(r1); \
89 mtlr r0; \
57852a85
MK
901:
91#else
92#define HCALL_INST_PRECALL
93#define HCALL_INST_POSTCALL
94#endif
95
1da177e4
LT
96 .text
97
1da177e4 98_GLOBAL(plpar_hcall_norets)
eeb24de4
AB
99 HMT_MEDIUM
100
1da177e4
LT
101 mfcr r0
102 stw r0,8(r1)
103
57852a85
MK
104 HCALL_INST_PRECALL
105
1da177e4
LT
106 HVSC /* invoke the hypervisor */
107
57852a85
MK
108 HCALL_INST_POSTCALL
109
1da177e4
LT
110 lwz r0,8(r1)
111 mtcrf 0xff,r0
112 blr /* return r3 = status */
113
b9377ffc 114_GLOBAL(plpar_hcall)
eeb24de4
AB
115 HMT_MEDIUM
116
1da177e4 117 mfcr r0
1da177e4
LT
118 stw r0,8(r1)
119
57852a85
MK
120 HCALL_INST_PRECALL
121
b9377ffc 122 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
1da177e4 123
b9377ffc
AB
124 mr r4,r5
125 mr r5,r6
126 mr r6,r7
127 mr r7,r8
128 mr r8,r9
129 mr r9,r10
1da177e4
LT
130
131 HVSC /* invoke the hypervisor */
132
b9377ffc
AB
133 ld r12,STK_PARM(r4)(r1)
134 std r4, 0(r12)
135 std r5, 8(r12)
136 std r6, 16(r12)
137 std r7, 24(r12)
b13a96cf 138
57852a85
MK
139 HCALL_INST_POSTCALL
140
b13a96cf 141 lwz r0,8(r1)
b13a96cf
HS
142 mtcrf 0xff,r0
143
144 blr /* return r3 = status */
145
b4aea36b
MK
146/*
147 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
148 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
149 * does not access the per cpu hypervisor call statistics variables,
150 * since these variables may not be present in the RMO region.
151 */
152_GLOBAL(plpar_hcall_raw)
153 HMT_MEDIUM
154
155 mfcr r0
156 stw r0,8(r1)
157
158 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
159
160 mr r4,r5
161 mr r5,r6
162 mr r6,r7
163 mr r7,r8
164 mr r8,r9
165 mr r9,r10
166
167 HVSC /* invoke the hypervisor */
168
169 ld r12,STK_PARM(r4)(r1)
170 std r4, 0(r12)
171 std r5, 8(r12)
172 std r6, 16(r12)
173 std r7, 24(r12)
174
175 lwz r0,8(r1)
176 mtcrf 0xff,r0
177
178 blr /* return r3 = status */
179
b9377ffc 180_GLOBAL(plpar_hcall9)
b13a96cf
HS
181 HMT_MEDIUM
182
183 mfcr r0
184 stw r0,8(r1)
185
57852a85
MK
186 HCALL_INST_PRECALL
187
b9377ffc
AB
188 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
189
190 mr r4,r5
191 mr r5,r6
192 mr r6,r7
193 mr r7,r8
194 mr r8,r9
195 mr r9,r10
196 ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
197 ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
198 ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
b13a96cf
HS
199
200 HVSC /* invoke the hypervisor */
201
ab87e8dc 202 mr r0,r12
b9377ffc
AB
203 ld r12,STK_PARM(r4)(r1)
204 std r4, 0(r12)
205 std r5, 8(r12)
206 std r6, 16(r12)
207 std r7, 24(r12)
208 std r8, 32(r12)
209 std r9, 40(r12)
210 std r10,48(r12)
211 std r11,56(r12)
ab87e8dc 212 std r0, 64(r12)
b13a96cf 213
57852a85
MK
214 HCALL_INST_POSTCALL
215
b13a96cf
HS
216 lwz r0,8(r1)
217 mtcrf 0xff,r0
218
219 blr /* return r3 = status */