]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/trace_gfs2.h
jbd: Use offset_in_page() instead of manual calculation
[net-next-2.6.git] / fs / gfs2 / trace_gfs2.h
CommitLineData
d0b6e04a
LZ
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM gfs2
3
63997775
SW
4#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_GFS2_H
6
7#include <linux/tracepoint.h>
8
63997775
SW
9#include <linux/fs.h>
10#include <linux/buffer_head.h>
11#include <linux/dlmconstants.h>
12#include <linux/gfs2_ondisk.h>
13#include "incore.h"
14#include "glock.h"
15
16#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
17#define glock_trace_name(x) __print_symbolic(x, \
18 dlm_state_name(IV), \
19 dlm_state_name(NL), \
20 dlm_state_name(CR), \
21 dlm_state_name(CW), \
22 dlm_state_name(PR), \
23 dlm_state_name(PW), \
24 dlm_state_name(EX))
25
26#define block_state_name(x) __print_symbolic(x, \
27 { GFS2_BLKST_FREE, "free" }, \
28 { GFS2_BLKST_USED, "used" }, \
29 { GFS2_BLKST_DINODE, "dinode" }, \
30 { GFS2_BLKST_UNLINKED, "unlinked" })
31
32#define show_glock_flags(flags) __print_flags(flags, "", \
33 {(1UL << GLF_LOCK), "l" }, \
34 {(1UL << GLF_DEMOTE), "D" }, \
35 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
36 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
37 {(1UL << GLF_DIRTY), "y" }, \
38 {(1UL << GLF_LFLUSH), "f" }, \
39 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
40 {(1UL << GLF_REPLY_PENDING), "r" }, \
41 {(1UL << GLF_INITIAL), "I" }, \
42 {(1UL << GLF_FROZEN), "F" })
43
44#ifndef NUMPTY
45#define NUMPTY
46static inline u8 glock_trace_state(unsigned int state)
47{
48 switch(state) {
49 case LM_ST_SHARED:
50 return DLM_LOCK_PR;
51 case LM_ST_DEFERRED:
52 return DLM_LOCK_CW;
53 case LM_ST_EXCLUSIVE:
54 return DLM_LOCK_EX;
55 }
56 return DLM_LOCK_NL;
57}
58#endif
59
60/* Section 1 - Locking
61 *
62 * Objectives:
63 * Latency: Remote demote request to state change
64 * Latency: Local lock request to state change
65 * Latency: State change to lock grant
66 * Correctness: Ordering of local lock state vs. I/O requests
67 * Correctness: Responses to remote demote requests
68 */
69
70/* General glock state change (DLM lock request completes) */
71TRACE_EVENT(gfs2_glock_state_change,
72
73 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
74
75 TP_ARGS(gl, new_state),
76
77 TP_STRUCT__entry(
78 __field( dev_t, dev )
79 __field( u64, glnum )
80 __field( u32, gltype )
81 __field( u8, cur_state )
82 __field( u8, new_state )
83 __field( u8, dmt_state )
84 __field( u8, tgt_state )
85 __field( unsigned long, flags )
86 ),
87
88 TP_fast_assign(
89 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
90 __entry->glnum = gl->gl_name.ln_number;
91 __entry->gltype = gl->gl_name.ln_type;
92 __entry->cur_state = glock_trace_state(gl->gl_state);
93 __entry->new_state = glock_trace_state(new_state);
94 __entry->tgt_state = glock_trace_state(gl->gl_target);
95 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
96 __entry->flags = gl->gl_flags;
97 ),
98
99 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
100 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
101 (unsigned long long)__entry->glnum,
102 glock_trace_name(__entry->cur_state),
103 glock_trace_name(__entry->new_state),
104 glock_trace_name(__entry->tgt_state),
105 glock_trace_name(__entry->dmt_state),
106 show_glock_flags(__entry->flags))
107);
108
109/* State change -> unlocked, glock is being deallocated */
110TRACE_EVENT(gfs2_glock_put,
111
112 TP_PROTO(const struct gfs2_glock *gl),
113
114 TP_ARGS(gl),
115
116 TP_STRUCT__entry(
117 __field( dev_t, dev )
118 __field( u64, glnum )
119 __field( u32, gltype )
120 __field( u8, cur_state )
121 __field( unsigned long, flags )
122 ),
123
124 TP_fast_assign(
125 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
126 __entry->gltype = gl->gl_name.ln_type;
127 __entry->glnum = gl->gl_name.ln_number;
128 __entry->cur_state = glock_trace_state(gl->gl_state);
129 __entry->flags = gl->gl_flags;
130 ),
131
132 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
133 MAJOR(__entry->dev), MINOR(__entry->dev),
134 __entry->gltype, (unsigned long long)__entry->glnum,
135 glock_trace_name(__entry->cur_state),
136 glock_trace_name(DLM_LOCK_IV),
137 show_glock_flags(__entry->flags))
138
139);
140
141/* Callback (local or remote) requesting lock demotion */
142TRACE_EVENT(gfs2_demote_rq,
143
144 TP_PROTO(const struct gfs2_glock *gl),
145
146 TP_ARGS(gl),
147
148 TP_STRUCT__entry(
149 __field( dev_t, dev )
150 __field( u64, glnum )
151 __field( u32, gltype )
152 __field( u8, cur_state )
153 __field( u8, dmt_state )
154 __field( unsigned long, flags )
155 ),
156
157 TP_fast_assign(
158 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
159 __entry->gltype = gl->gl_name.ln_type;
160 __entry->glnum = gl->gl_name.ln_number;
161 __entry->cur_state = glock_trace_state(gl->gl_state);
162 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
163 __entry->flags = gl->gl_flags;
164 ),
165
166 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
167 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
168 (unsigned long long)__entry->glnum,
169 glock_trace_name(__entry->cur_state),
170 glock_trace_name(__entry->dmt_state),
171 show_glock_flags(__entry->flags))
172
173);
174
175/* Promotion/grant of a glock */
176TRACE_EVENT(gfs2_promote,
177
178 TP_PROTO(const struct gfs2_holder *gh, int first),
179
180 TP_ARGS(gh, first),
181
182 TP_STRUCT__entry(
183 __field( dev_t, dev )
184 __field( u64, glnum )
185 __field( u32, gltype )
186 __field( int, first )
187 __field( u8, state )
188 ),
189
190 TP_fast_assign(
191 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
192 __entry->glnum = gh->gh_gl->gl_name.ln_number;
193 __entry->gltype = gh->gh_gl->gl_name.ln_type;
194 __entry->first = first;
195 __entry->state = glock_trace_state(gh->gh_state);
196 ),
197
198 TP_printk("%u,%u glock %u:%llu promote %s %s",
199 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
200 (unsigned long long)__entry->glnum,
201 __entry->first ? "first": "other",
202 glock_trace_name(__entry->state))
203);
204
205/* Queue/dequeue a lock request */
206TRACE_EVENT(gfs2_glock_queue,
207
208 TP_PROTO(const struct gfs2_holder *gh, int queue),
209
210 TP_ARGS(gh, queue),
211
212 TP_STRUCT__entry(
213 __field( dev_t, dev )
214 __field( u64, glnum )
215 __field( u32, gltype )
216 __field( int, queue )
217 __field( u8, state )
218 ),
219
220 TP_fast_assign(
221 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
222 __entry->glnum = gh->gh_gl->gl_name.ln_number;
223 __entry->gltype = gh->gh_gl->gl_name.ln_type;
224 __entry->queue = queue;
225 __entry->state = glock_trace_state(gh->gh_state);
226 ),
227
228 TP_printk("%u,%u glock %u:%llu %squeue %s",
229 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
230 (unsigned long long)__entry->glnum,
231 __entry->queue ? "" : "de",
232 glock_trace_name(__entry->state))
233);
234
235/* Section 2 - Log/journal
236 *
237 * Objectives:
238 * Latency: Log flush time
239 * Correctness: pin/unpin vs. disk I/O ordering
240 * Performance: Log usage stats
241 */
242
243/* Pin/unpin a block in the log */
244TRACE_EVENT(gfs2_pin,
245
246 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
247
248 TP_ARGS(bd, pin),
249
250 TP_STRUCT__entry(
251 __field( dev_t, dev )
252 __field( int, pin )
253 __field( u32, len )
254 __field( sector_t, block )
255 __field( u64, ino )
256 ),
257
258 TP_fast_assign(
259 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
260 __entry->pin = pin;
261 __entry->len = bd->bd_bh->b_size;
262 __entry->block = bd->bd_bh->b_blocknr;
263 __entry->ino = bd->bd_gl->gl_name.ln_number;
264 ),
265
266 TP_printk("%u,%u log %s %llu/%lu inode %llu",
267 MAJOR(__entry->dev), MINOR(__entry->dev),
268 __entry->pin ? "pin" : "unpin",
269 (unsigned long long)__entry->block,
270 (unsigned long)__entry->len,
271 (unsigned long long)__entry->ino)
272);
273
274/* Flushing the log */
275TRACE_EVENT(gfs2_log_flush,
276
277 TP_PROTO(const struct gfs2_sbd *sdp, int start),
278
279 TP_ARGS(sdp, start),
280
281 TP_STRUCT__entry(
282 __field( dev_t, dev )
283 __field( int, start )
284 __field( u64, log_seq )
285 ),
286
287 TP_fast_assign(
288 __entry->dev = sdp->sd_vfs->s_dev;
289 __entry->start = start;
290 __entry->log_seq = sdp->sd_log_sequence;
291 ),
292
293 TP_printk("%u,%u log flush %s %llu",
294 MAJOR(__entry->dev), MINOR(__entry->dev),
295 __entry->start ? "start" : "end",
296 (unsigned long long)__entry->log_seq)
297);
298
299/* Reserving/releasing blocks in the log */
300TRACE_EVENT(gfs2_log_blocks,
301
302 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
303
304 TP_ARGS(sdp, blocks),
305
306 TP_STRUCT__entry(
307 __field( dev_t, dev )
308 __field( int, blocks )
309 ),
310
311 TP_fast_assign(
312 __entry->dev = sdp->sd_vfs->s_dev;
313 __entry->blocks = blocks;
314 ),
315
316 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
317 MINOR(__entry->dev), __entry->blocks)
318);
319
320/* Section 3 - bmap
321 *
322 * Objectives:
323 * Latency: Bmap request time
324 * Performance: Block allocator tracing
325 * Correctness: Test of disard generation vs. blocks allocated
326 */
327
328/* Map an extent of blocks, possibly a new allocation */
329TRACE_EVENT(gfs2_bmap,
330
331 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
332 sector_t lblock, int create, int errno),
333
334 TP_ARGS(ip, bh, lblock, create, errno),
335
336 TP_STRUCT__entry(
337 __field( dev_t, dev )
338 __field( sector_t, lblock )
339 __field( sector_t, pblock )
340 __field( u64, inum )
341 __field( unsigned long, state )
342 __field( u32, len )
343 __field( int, create )
344 __field( int, errno )
345 ),
346
347 TP_fast_assign(
348 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
349 __entry->lblock = lblock;
350 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
351 __entry->inum = ip->i_no_addr;
352 __entry->state = bh->b_state;
353 __entry->len = bh->b_size;
354 __entry->create = create;
355 __entry->errno = errno;
356 ),
357
358 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
359 MAJOR(__entry->dev), MINOR(__entry->dev),
360 (unsigned long long)__entry->inum,
361 (unsigned long long)__entry->lblock,
362 (unsigned long)__entry->len,
363 (unsigned long long)__entry->pblock,
364 __entry->state, __entry->create ? "create " : "nocreate",
365 __entry->errno)
366);
367
368/* Keep track of blocks as they are allocated/freed */
369TRACE_EVENT(gfs2_block_alloc,
370
371 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
372 u8 block_state),
373
374 TP_ARGS(ip, block, len, block_state),
375
376 TP_STRUCT__entry(
377 __field( dev_t, dev )
378 __field( u64, start )
379 __field( u64, inum )
380 __field( u32, len )
381 __field( u8, block_state )
382 ),
383
384 TP_fast_assign(
385 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
386 __entry->start = block;
387 __entry->inum = ip->i_no_addr;
388 __entry->len = len;
389 __entry->block_state = block_state;
390 ),
391
392 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
393 MAJOR(__entry->dev), MINOR(__entry->dev),
394 (unsigned long long)__entry->inum,
395 (unsigned long long)__entry->start,
396 (unsigned long)__entry->len,
397 block_state_name(__entry->block_state))
398);
399
400#endif /* _TRACE_GFS2_H */
401
402/* This part must be outside protection */
403#undef TRACE_INCLUDE_PATH
404#define TRACE_INCLUDE_PATH .
d0b6e04a 405#define TRACE_INCLUDE_FILE trace_gfs2
63997775
SW
406#include <trace/define_trace.h>
407