]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/ring_buffer.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild...
[net-next-2.6.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
14131f2f 7#include <linux/trace_clock.h>
78d904b4 8#include <linux/ftrace_irq.h>
7a8e76a3
SR
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
a81bd80a 12#include <linux/hardirq.h>
7a8e76a3
SR
13#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
7a8e76a3
SR
16#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/list.h>
554f786e 19#include <linux/cpu.h>
7a8e76a3
SR
20#include <linux/fs.h>
21
182e9f5f
SR
22#include "trace.h"
23
d1b182a8
SR
24/*
25 * The ring buffer header is special. We must manually up keep it.
26 */
27int ring_buffer_print_entry_header(struct trace_seq *s)
28{
29 int ret;
30
334d4169
LJ
31 ret = trace_seq_printf(s, "# compressed entry header\n");
32 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
d1b182a8
SR
33 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
34 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
35 ret = trace_seq_printf(s, "\n");
36 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
37 RINGBUF_TYPE_PADDING);
38 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 RINGBUF_TYPE_TIME_EXTEND);
334d4169
LJ
40 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
41 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
d1b182a8
SR
42
43 return ret;
44}
45
5cc98548
SR
46/*
47 * The ring buffer is made up of a list of pages. A separate list of pages is
48 * allocated for each CPU. A writer may only write to a buffer that is
49 * associated with the CPU it is currently executing on. A reader may read
50 * from any per cpu buffer.
51 *
52 * The reader is special. For each per cpu buffer, the reader has its own
53 * reader page. When a reader has read the entire reader page, this reader
54 * page is swapped with another page in the ring buffer.
55 *
56 * Now, as long as the writer is off the reader page, the reader can do what
57 * ever it wants with that page. The writer will never write to that page
58 * again (as long as it is out of the ring buffer).
59 *
60 * Here's some silly ASCII art.
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |
65 * +------+ +---+ +---+ +---+
66 * | |-->| |-->| |
67 * +---+ +---+ +---+
68 * ^ |
69 * | |
70 * +---------------+
71 *
72 *
73 * +------+
74 * |reader| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * | |-->| |-->| |
78 * +---+ +---+ +---+
79 * ^ |
80 * | |
81 * +---------------+
82 *
83 *
84 * +------+
85 * |reader| RING BUFFER
86 * |page |------------------v
87 * +------+ +---+ +---+ +---+
88 * ^ | |-->| |-->| |
89 * | +---+ +---+ +---+
90 * | |
91 * | |
92 * +------------------------------+
93 *
94 *
95 * +------+
96 * |buffer| RING BUFFER
97 * |page |------------------v
98 * +------+ +---+ +---+ +---+
99 * ^ | | | |-->| |
100 * | New +---+ +---+ +---+
101 * | Reader------^ |
102 * | page |
103 * +------------------------------+
104 *
105 *
106 * After we make this swap, the reader can hand this page off to the splice
107 * code and be done with it. It can even allocate a new page if it needs to
108 * and swap that into the ring buffer.
109 *
110 * We will be using cmpxchg soon to make all this lockless.
111 *
112 */
113
033601a3
SR
114/*
115 * A fast way to enable or disable all ring buffers is to
116 * call tracing_on or tracing_off. Turning off the ring buffers
117 * prevents all ring buffers from being recorded to.
118 * Turning this switch on, makes it OK to write to the
119 * ring buffer, if the ring buffer is enabled itself.
120 *
121 * There's three layers that must be on in order to write
122 * to the ring buffer.
123 *
124 * 1) This global flag must be set.
125 * 2) The ring buffer must be enabled for recording.
126 * 3) The per cpu buffer must be enabled for recording.
127 *
128 * In case of an anomaly, this global flag has a bit set that
129 * will permantly disable all ring buffers.
130 */
131
132/*
133 * Global flag to disable all recording to ring buffers
134 * This has two bits: ON, DISABLED
135 *
136 * ON DISABLED
137 * ---- ----------
138 * 0 0 : ring buffers are off
139 * 1 0 : ring buffers are on
140 * X 1 : ring buffers are permanently disabled
141 */
142
143enum {
144 RB_BUFFERS_ON_BIT = 0,
145 RB_BUFFERS_DISABLED_BIT = 1,
146};
147
148enum {
149 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
150 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
151};
152
5e39841c 153static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 154
474d32b6
SR
155#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
a3583244
SR
157/**
158 * tracing_on - enable all tracing buffers
159 *
160 * This function enables all tracing buffers that may have been
161 * disabled with tracing_off.
162 */
163void tracing_on(void)
164{
033601a3 165 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244 166}
c4f50183 167EXPORT_SYMBOL_GPL(tracing_on);
a3583244
SR
168
169/**
170 * tracing_off - turn off all tracing buffers
171 *
172 * This function stops all tracing buffers from recording data.
173 * It does not disable any overhead the tracers themselves may
174 * be causing. This function simply causes all recording to
175 * the ring buffers to fail.
176 */
177void tracing_off(void)
178{
033601a3
SR
179 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180}
c4f50183 181EXPORT_SYMBOL_GPL(tracing_off);
033601a3
SR
182
183/**
184 * tracing_off_permanent - permanently disable ring buffers
185 *
186 * This function, once called, will disable all ring buffers
c3706f00 187 * permanently.
033601a3
SR
188 */
189void tracing_off_permanent(void)
190{
191 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
192}
193
988ae9d6
SR
194/**
195 * tracing_is_on - show state of ring buffers enabled
196 */
197int tracing_is_on(void)
198{
199 return ring_buffer_flags == RB_BUFFERS_ON;
200}
201EXPORT_SYMBOL_GPL(tracing_is_on);
202
d06bbd66
IM
203#include "trace.h"
204
e3d6bf0a 205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 206#define RB_ALIGNMENT 4U
334d4169
LJ
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
2d622719
TZ
217static inline int rb_null_event(struct ring_buffer_event *event)
218{
334d4169
LJ
219 return event->type_len == RINGBUF_TYPE_PADDING
220 && event->time_delta == 0;
2d622719
TZ
221}
222
223static inline int rb_discarded_event(struct ring_buffer_event *event)
224{
334d4169 225 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
2d622719
TZ
226}
227
228static void rb_event_set_padding(struct ring_buffer_event *event)
229{
334d4169 230 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
231 event->time_delta = 0;
232}
233
34a148bf 234static unsigned
2d622719 235rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
236{
237 unsigned length;
238
334d4169
LJ
239 if (event->type_len)
240 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
241 else
242 length = event->array[0];
243 return length + RB_EVNT_HDR_SIZE;
244}
245
246/* inline for ring buffer fast paths */
247static unsigned
248rb_event_length(struct ring_buffer_event *event)
249{
334d4169 250 switch (event->type_len) {
7a8e76a3 251 case RINGBUF_TYPE_PADDING:
2d622719
TZ
252 if (rb_null_event(event))
253 /* undefined */
254 return -1;
334d4169 255 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
256
257 case RINGBUF_TYPE_TIME_EXTEND:
258 return RB_LEN_TIME_EXTEND;
259
260 case RINGBUF_TYPE_TIME_STAMP:
261 return RB_LEN_TIME_STAMP;
262
263 case RINGBUF_TYPE_DATA:
2d622719 264 return rb_event_data_length(event);
7a8e76a3
SR
265 default:
266 BUG();
267 }
268 /* not hit */
269 return 0;
270}
271
272/**
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
275 */
276unsigned ring_buffer_event_length(struct ring_buffer_event *event)
277{
465634ad 278 unsigned length = rb_event_length(event);
334d4169 279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
280 return length;
281 length -= RB_EVNT_HDR_SIZE;
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283 length -= sizeof(event->array[0]);
284 return length;
7a8e76a3 285}
c4f50183 286EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
287
288/* inline for ring buffer fast paths */
34a148bf 289static void *
7a8e76a3
SR
290rb_event_data(struct ring_buffer_event *event)
291{
334d4169 292 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 293 /* If length is in len field, then array[0] has the data */
334d4169 294 if (event->type_len)
7a8e76a3
SR
295 return (void *)&event->array[0];
296 /* Otherwise length is in array[0] and array[1] has the data */
297 return (void *)&event->array[1];
298}
299
300/**
301 * ring_buffer_event_data - return the data of the event
302 * @event: the event to get the data from
303 */
304void *ring_buffer_event_data(struct ring_buffer_event *event)
305{
306 return rb_event_data(event);
307}
c4f50183 308EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
309
310#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 311 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
312
313#define TS_SHIFT 27
314#define TS_MASK ((1ULL << TS_SHIFT) - 1)
315#define TS_DELTA_TEST (~TS_MASK)
316
abc9b56d 317struct buffer_data_page {
e4c2ce82 318 u64 time_stamp; /* page time stamp */
c3706f00 319 local_t commit; /* write committed index */
abc9b56d
SR
320 unsigned char data[]; /* data of buffer page */
321};
322
323struct buffer_page {
778c55d4 324 struct list_head list; /* list of buffer pages */
abc9b56d 325 local_t write; /* index for next write */
6f807acd 326 unsigned read; /* index for next read */
778c55d4 327 local_t entries; /* entries on this page */
abc9b56d 328 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
329};
330
044fa782 331static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 332{
044fa782 333 local_set(&bpage->commit, 0);
abc9b56d
SR
334}
335
474d32b6
SR
336/**
337 * ring_buffer_page_len - the size of data on the page.
338 * @page: The page to read
339 *
340 * Returns the amount of data on the page, including buffer page header.
341 */
ef7a4a16
SR
342size_t ring_buffer_page_len(void *page)
343{
474d32b6
SR
344 return local_read(&((struct buffer_data_page *)page)->commit)
345 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
346}
347
ed56829c
SR
348/*
349 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
350 * this issue out.
351 */
34a148bf 352static void free_buffer_page(struct buffer_page *bpage)
ed56829c 353{
34a148bf 354 free_page((unsigned long)bpage->page);
e4c2ce82 355 kfree(bpage);
ed56829c
SR
356}
357
7a8e76a3
SR
358/*
359 * We need to fit the time_stamp delta into 27 bits.
360 */
361static inline int test_time_stamp(u64 delta)
362{
363 if (delta & TS_DELTA_TEST)
364 return 1;
365 return 0;
366}
367
474d32b6 368#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 369
be957c44
SR
370/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372
ea05b57c
SR
373/* Max number of timestamps that can fit on a page */
374#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
375
d1b182a8
SR
376int ring_buffer_print_page_header(struct trace_seq *s)
377{
378 struct buffer_data_page field;
379 int ret;
380
381 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
382 "offset:0;\tsize:%u;\n",
383 (unsigned int)sizeof(field.time_stamp));
384
385 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
386 "offset:%u;\tsize:%u;\n",
387 (unsigned int)offsetof(typeof(field), commit),
388 (unsigned int)sizeof(field.commit));
389
390 ret = trace_seq_printf(s, "\tfield: char data;\t"
391 "offset:%u;\tsize:%u;\n",
392 (unsigned int)offsetof(typeof(field), data),
393 (unsigned int)BUF_PAGE_SIZE);
394
395 return ret;
396}
397
7a8e76a3
SR
398/*
399 * head_page == tail_page && head == tail then buffer is empty.
400 */
401struct ring_buffer_per_cpu {
402 int cpu;
403 struct ring_buffer *buffer;
f83c9d0f 404 spinlock_t reader_lock; /* serialize readers */
3e03fb7f 405 raw_spinlock_t lock;
7a8e76a3
SR
406 struct lock_class_key lock_key;
407 struct list_head pages;
6f807acd
SR
408 struct buffer_page *head_page; /* read from head */
409 struct buffer_page *tail_page; /* write to tail */
c3706f00 410 struct buffer_page *commit_page; /* committed pages */
d769041f 411 struct buffer_page *reader_page;
f0d2c681
SR
412 unsigned long nmi_dropped;
413 unsigned long commit_overrun;
7a8e76a3 414 unsigned long overrun;
e4906eff
SR
415 unsigned long read;
416 local_t entries;
7a8e76a3
SR
417 u64 write_stamp;
418 u64 read_stamp;
419 atomic_t record_disabled;
420};
421
422struct ring_buffer {
7a8e76a3
SR
423 unsigned pages;
424 unsigned flags;
425 int cpus;
7a8e76a3 426 atomic_t record_disabled;
00f62f61 427 cpumask_var_t cpumask;
7a8e76a3 428
1f8a6a10
PZ
429 struct lock_class_key *reader_lock_key;
430
7a8e76a3
SR
431 struct mutex mutex;
432
433 struct ring_buffer_per_cpu **buffers;
554f786e 434
59222efe 435#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
436 struct notifier_block cpu_notify;
437#endif
37886f6a 438 u64 (*clock)(void);
7a8e76a3
SR
439};
440
441struct ring_buffer_iter {
442 struct ring_buffer_per_cpu *cpu_buffer;
443 unsigned long head;
444 struct buffer_page *head_page;
445 u64 read_stamp;
446};
447
f536aafc 448/* buffer may be either ring_buffer or ring_buffer_per_cpu */
bf41a158 449#define RB_WARN_ON(buffer, cond) \
3e89c7bb
SR
450 ({ \
451 int _____ret = unlikely(cond); \
452 if (_____ret) { \
bf41a158
SR
453 atomic_inc(&buffer->record_disabled); \
454 WARN_ON(1); \
455 } \
3e89c7bb
SR
456 _____ret; \
457 })
f536aafc 458
37886f6a
SR
459/* Up this if you want to test the TIME_EXTENTS and normalization */
460#define DEBUG_SHIFT 0
461
88eb0125
SR
462static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
463{
464 /* shift to debug/test normalization and TIME_EXTENTS */
465 return buffer->clock() << DEBUG_SHIFT;
466}
467
37886f6a
SR
468u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
469{
470 u64 time;
471
472 preempt_disable_notrace();
88eb0125 473 time = rb_time_stamp(buffer, cpu);
37886f6a
SR
474 preempt_enable_no_resched_notrace();
475
476 return time;
477}
478EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
479
480void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
481 int cpu, u64 *ts)
482{
483 /* Just stupid testing the normalize function and deltas */
484 *ts >>= DEBUG_SHIFT;
485}
486EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
487
7a8e76a3
SR
488/**
489 * check_pages - integrity check of buffer pages
490 * @cpu_buffer: CPU buffer with pages to test
491 *
c3706f00 492 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
493 * been corrupted.
494 */
495static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
496{
497 struct list_head *head = &cpu_buffer->pages;
044fa782 498 struct buffer_page *bpage, *tmp;
7a8e76a3 499
3e89c7bb
SR
500 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
501 return -1;
502 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
503 return -1;
7a8e76a3 504
044fa782 505 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 506 if (RB_WARN_ON(cpu_buffer,
044fa782 507 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
508 return -1;
509 if (RB_WARN_ON(cpu_buffer,
044fa782 510 bpage->list.prev->next != &bpage->list))
3e89c7bb 511 return -1;
7a8e76a3
SR
512 }
513
514 return 0;
515}
516
7a8e76a3
SR
517static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
518 unsigned nr_pages)
519{
520 struct list_head *head = &cpu_buffer->pages;
044fa782 521 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
522 unsigned long addr;
523 LIST_HEAD(pages);
524 unsigned i;
525
526 for (i = 0; i < nr_pages; i++) {
044fa782 527 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
aa1e0e3b 528 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
044fa782 529 if (!bpage)
e4c2ce82 530 goto free_pages;
044fa782 531 list_add(&bpage->list, &pages);
e4c2ce82 532
7a8e76a3
SR
533 addr = __get_free_page(GFP_KERNEL);
534 if (!addr)
535 goto free_pages;
044fa782
SR
536 bpage->page = (void *)addr;
537 rb_init_page(bpage->page);
7a8e76a3
SR
538 }
539
540 list_splice(&pages, head);
541
542 rb_check_pages(cpu_buffer);
543
544 return 0;
545
546 free_pages:
044fa782
SR
547 list_for_each_entry_safe(bpage, tmp, &pages, list) {
548 list_del_init(&bpage->list);
549 free_buffer_page(bpage);
7a8e76a3
SR
550 }
551 return -ENOMEM;
552}
553
554static struct ring_buffer_per_cpu *
555rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
556{
557 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 558 struct buffer_page *bpage;
d769041f 559 unsigned long addr;
7a8e76a3
SR
560 int ret;
561
562 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
563 GFP_KERNEL, cpu_to_node(cpu));
564 if (!cpu_buffer)
565 return NULL;
566
567 cpu_buffer->cpu = cpu;
568 cpu_buffer->buffer = buffer;
f83c9d0f 569 spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 570 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
3e03fb7f 571 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
572 INIT_LIST_HEAD(&cpu_buffer->pages);
573
044fa782 574 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 575 GFP_KERNEL, cpu_to_node(cpu));
044fa782 576 if (!bpage)
e4c2ce82
SR
577 goto fail_free_buffer;
578
044fa782 579 cpu_buffer->reader_page = bpage;
d769041f
SR
580 addr = __get_free_page(GFP_KERNEL);
581 if (!addr)
e4c2ce82 582 goto fail_free_reader;
044fa782
SR
583 bpage->page = (void *)addr;
584 rb_init_page(bpage->page);
e4c2ce82 585
d769041f 586 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 587
7a8e76a3
SR
588 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
589 if (ret < 0)
d769041f 590 goto fail_free_reader;
7a8e76a3
SR
591
592 cpu_buffer->head_page
593 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 594 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
595
596 return cpu_buffer;
597
d769041f
SR
598 fail_free_reader:
599 free_buffer_page(cpu_buffer->reader_page);
600
7a8e76a3
SR
601 fail_free_buffer:
602 kfree(cpu_buffer);
603 return NULL;
604}
605
606static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
607{
608 struct list_head *head = &cpu_buffer->pages;
044fa782 609 struct buffer_page *bpage, *tmp;
7a8e76a3 610
d769041f
SR
611 free_buffer_page(cpu_buffer->reader_page);
612
044fa782
SR
613 list_for_each_entry_safe(bpage, tmp, head, list) {
614 list_del_init(&bpage->list);
615 free_buffer_page(bpage);
7a8e76a3
SR
616 }
617 kfree(cpu_buffer);
618}
619
a7b13743
SR
620/*
621 * Causes compile errors if the struct buffer_page gets bigger
622 * than the struct page.
623 */
624extern int ring_buffer_page_too_big(void);
625
59222efe 626#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
627static int rb_cpu_notify(struct notifier_block *self,
628 unsigned long action, void *hcpu);
554f786e
SR
629#endif
630
7a8e76a3
SR
631/**
632 * ring_buffer_alloc - allocate a new ring_buffer
68814b58 633 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
634 * @flags: attributes to set for the ring buffer.
635 *
636 * Currently the only flag that is available is the RB_FL_OVERWRITE
637 * flag. This flag means that the buffer will overwrite old data
638 * when the buffer wraps. If this flag is not set, the buffer will
639 * drop data when the tail hits the head.
640 */
1f8a6a10
PZ
641struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
642 struct lock_class_key *key)
7a8e76a3
SR
643{
644 struct ring_buffer *buffer;
645 int bsize;
646 int cpu;
647
a7b13743
SR
648 /* Paranoid! Optimizes out when all is well */
649 if (sizeof(struct buffer_page) > sizeof(struct page))
650 ring_buffer_page_too_big();
651
652
7a8e76a3
SR
653 /* keep it in its own cache line */
654 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
655 GFP_KERNEL);
656 if (!buffer)
657 return NULL;
658
9e01c1b7
RR
659 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
660 goto fail_free_buffer;
661
7a8e76a3
SR
662 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
663 buffer->flags = flags;
37886f6a 664 buffer->clock = trace_clock_local;
1f8a6a10 665 buffer->reader_lock_key = key;
7a8e76a3
SR
666
667 /* need at least two pages */
668 if (buffer->pages == 1)
669 buffer->pages++;
670
3bf832ce
FW
671 /*
672 * In case of non-hotplug cpu, if the ring-buffer is allocated
673 * in early initcall, it will not be notified of secondary cpus.
674 * In that off case, we need to allocate for all possible cpus.
675 */
676#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
677 get_online_cpus();
678 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
679#else
680 cpumask_copy(buffer->cpumask, cpu_possible_mask);
681#endif
7a8e76a3
SR
682 buffer->cpus = nr_cpu_ids;
683
684 bsize = sizeof(void *) * nr_cpu_ids;
685 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
686 GFP_KERNEL);
687 if (!buffer->buffers)
9e01c1b7 688 goto fail_free_cpumask;
7a8e76a3
SR
689
690 for_each_buffer_cpu(buffer, cpu) {
691 buffer->buffers[cpu] =
692 rb_allocate_cpu_buffer(buffer, cpu);
693 if (!buffer->buffers[cpu])
694 goto fail_free_buffers;
695 }
696
59222efe 697#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
698 buffer->cpu_notify.notifier_call = rb_cpu_notify;
699 buffer->cpu_notify.priority = 0;
700 register_cpu_notifier(&buffer->cpu_notify);
701#endif
702
703 put_online_cpus();
7a8e76a3
SR
704 mutex_init(&buffer->mutex);
705
706 return buffer;
707
708 fail_free_buffers:
709 for_each_buffer_cpu(buffer, cpu) {
710 if (buffer->buffers[cpu])
711 rb_free_cpu_buffer(buffer->buffers[cpu]);
712 }
713 kfree(buffer->buffers);
714
9e01c1b7
RR
715 fail_free_cpumask:
716 free_cpumask_var(buffer->cpumask);
554f786e 717 put_online_cpus();
9e01c1b7 718
7a8e76a3
SR
719 fail_free_buffer:
720 kfree(buffer);
721 return NULL;
722}
1f8a6a10 723EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
724
725/**
726 * ring_buffer_free - free a ring buffer.
727 * @buffer: the buffer to free.
728 */
729void
730ring_buffer_free(struct ring_buffer *buffer)
731{
732 int cpu;
733
554f786e
SR
734 get_online_cpus();
735
59222efe 736#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
737 unregister_cpu_notifier(&buffer->cpu_notify);
738#endif
739
7a8e76a3
SR
740 for_each_buffer_cpu(buffer, cpu)
741 rb_free_cpu_buffer(buffer->buffers[cpu]);
742
554f786e
SR
743 put_online_cpus();
744
9e01c1b7
RR
745 free_cpumask_var(buffer->cpumask);
746
7a8e76a3
SR
747 kfree(buffer);
748}
c4f50183 749EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 750
37886f6a
SR
751void ring_buffer_set_clock(struct ring_buffer *buffer,
752 u64 (*clock)(void))
753{
754 buffer->clock = clock;
755}
756
7a8e76a3
SR
757static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
758
759static void
760rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
761{
044fa782 762 struct buffer_page *bpage;
7a8e76a3
SR
763 struct list_head *p;
764 unsigned i;
765
766 atomic_inc(&cpu_buffer->record_disabled);
767 synchronize_sched();
768
769 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
770 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
771 return;
7a8e76a3 772 p = cpu_buffer->pages.next;
044fa782
SR
773 bpage = list_entry(p, struct buffer_page, list);
774 list_del_init(&bpage->list);
775 free_buffer_page(bpage);
7a8e76a3 776 }
3e89c7bb
SR
777 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
778 return;
7a8e76a3
SR
779
780 rb_reset_cpu(cpu_buffer);
781
782 rb_check_pages(cpu_buffer);
783
784 atomic_dec(&cpu_buffer->record_disabled);
785
786}
787
788static void
789rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
790 struct list_head *pages, unsigned nr_pages)
791{
044fa782 792 struct buffer_page *bpage;
7a8e76a3
SR
793 struct list_head *p;
794 unsigned i;
795
796 atomic_inc(&cpu_buffer->record_disabled);
797 synchronize_sched();
798
799 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
800 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
801 return;
7a8e76a3 802 p = pages->next;
044fa782
SR
803 bpage = list_entry(p, struct buffer_page, list);
804 list_del_init(&bpage->list);
805 list_add_tail(&bpage->list, &cpu_buffer->pages);
7a8e76a3
SR
806 }
807 rb_reset_cpu(cpu_buffer);
808
809 rb_check_pages(cpu_buffer);
810
811 atomic_dec(&cpu_buffer->record_disabled);
812}
813
814/**
815 * ring_buffer_resize - resize the ring buffer
816 * @buffer: the buffer to resize.
817 * @size: the new size.
818 *
819 * The tracer is responsible for making sure that the buffer is
820 * not being used while changing the size.
821 * Note: We may be able to change the above requirement by using
822 * RCU synchronizations.
823 *
824 * Minimum size is 2 * BUF_PAGE_SIZE.
825 *
826 * Returns -1 on failure.
827 */
828int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
829{
830 struct ring_buffer_per_cpu *cpu_buffer;
831 unsigned nr_pages, rm_pages, new_pages;
044fa782 832 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
833 unsigned long buffer_size;
834 unsigned long addr;
835 LIST_HEAD(pages);
836 int i, cpu;
837
ee51a1de
IM
838 /*
839 * Always succeed at resizing a non-existent buffer:
840 */
841 if (!buffer)
842 return size;
843
7a8e76a3
SR
844 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
845 size *= BUF_PAGE_SIZE;
846 buffer_size = buffer->pages * BUF_PAGE_SIZE;
847
848 /* we need a minimum of two pages */
849 if (size < BUF_PAGE_SIZE * 2)
850 size = BUF_PAGE_SIZE * 2;
851
852 if (size == buffer_size)
853 return size;
854
855 mutex_lock(&buffer->mutex);
554f786e 856 get_online_cpus();
7a8e76a3
SR
857
858 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
859
860 if (size < buffer_size) {
861
862 /* easy case, just free pages */
554f786e
SR
863 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
864 goto out_fail;
7a8e76a3
SR
865
866 rm_pages = buffer->pages - nr_pages;
867
868 for_each_buffer_cpu(buffer, cpu) {
869 cpu_buffer = buffer->buffers[cpu];
870 rb_remove_pages(cpu_buffer, rm_pages);
871 }
872 goto out;
873 }
874
875 /*
876 * This is a bit more difficult. We only want to add pages
877 * when we can allocate enough for all CPUs. We do this
878 * by allocating all the pages and storing them on a local
879 * link list. If we succeed in our allocation, then we
880 * add these pages to the cpu_buffers. Otherwise we just free
881 * them all and return -ENOMEM;
882 */
554f786e
SR
883 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
884 goto out_fail;
f536aafc 885
7a8e76a3
SR
886 new_pages = nr_pages - buffer->pages;
887
888 for_each_buffer_cpu(buffer, cpu) {
889 for (i = 0; i < new_pages; i++) {
044fa782 890 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
e4c2ce82
SR
891 cache_line_size()),
892 GFP_KERNEL, cpu_to_node(cpu));
044fa782 893 if (!bpage)
e4c2ce82 894 goto free_pages;
044fa782 895 list_add(&bpage->list, &pages);
7a8e76a3
SR
896 addr = __get_free_page(GFP_KERNEL);
897 if (!addr)
898 goto free_pages;
044fa782
SR
899 bpage->page = (void *)addr;
900 rb_init_page(bpage->page);
7a8e76a3
SR
901 }
902 }
903
904 for_each_buffer_cpu(buffer, cpu) {
905 cpu_buffer = buffer->buffers[cpu];
906 rb_insert_pages(cpu_buffer, &pages, new_pages);
907 }
908
554f786e
SR
909 if (RB_WARN_ON(buffer, !list_empty(&pages)))
910 goto out_fail;
7a8e76a3
SR
911
912 out:
913 buffer->pages = nr_pages;
554f786e 914 put_online_cpus();
7a8e76a3
SR
915 mutex_unlock(&buffer->mutex);
916
917 return size;
918
919 free_pages:
044fa782
SR
920 list_for_each_entry_safe(bpage, tmp, &pages, list) {
921 list_del_init(&bpage->list);
922 free_buffer_page(bpage);
7a8e76a3 923 }
554f786e 924 put_online_cpus();
641d2f63 925 mutex_unlock(&buffer->mutex);
7a8e76a3 926 return -ENOMEM;
554f786e
SR
927
928 /*
929 * Something went totally wrong, and we are too paranoid
930 * to even clean up the mess.
931 */
932 out_fail:
933 put_online_cpus();
934 mutex_unlock(&buffer->mutex);
935 return -1;
7a8e76a3 936}
c4f50183 937EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 938
8789a9e7 939static inline void *
044fa782 940__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 941{
044fa782 942 return bpage->data + index;
8789a9e7
SR
943}
944
044fa782 945static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 946{
044fa782 947 return bpage->page->data + index;
7a8e76a3
SR
948}
949
950static inline struct ring_buffer_event *
d769041f 951rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 952{
6f807acd
SR
953 return __rb_page_index(cpu_buffer->reader_page,
954 cpu_buffer->reader_page->read);
955}
956
957static inline struct ring_buffer_event *
958rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
959{
960 return __rb_page_index(cpu_buffer->head_page,
961 cpu_buffer->head_page->read);
7a8e76a3
SR
962}
963
964static inline struct ring_buffer_event *
965rb_iter_head_event(struct ring_buffer_iter *iter)
966{
6f807acd 967 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
968}
969
bf41a158
SR
970static inline unsigned rb_page_write(struct buffer_page *bpage)
971{
972 return local_read(&bpage->write);
973}
974
975static inline unsigned rb_page_commit(struct buffer_page *bpage)
976{
abc9b56d 977 return local_read(&bpage->page->commit);
bf41a158
SR
978}
979
980/* Size is determined by what has been commited */
981static inline unsigned rb_page_size(struct buffer_page *bpage)
982{
983 return rb_page_commit(bpage);
984}
985
986static inline unsigned
987rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
988{
989 return rb_page_commit(cpu_buffer->commit_page);
990}
991
992static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
993{
994 return rb_page_commit(cpu_buffer->head_page);
995}
996
7a8e76a3 997static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
044fa782 998 struct buffer_page **bpage)
7a8e76a3 999{
044fa782 1000 struct list_head *p = (*bpage)->list.next;
7a8e76a3
SR
1001
1002 if (p == &cpu_buffer->pages)
1003 p = p->next;
1004
044fa782 1005 *bpage = list_entry(p, struct buffer_page, list);
7a8e76a3
SR
1006}
1007
bf41a158
SR
1008static inline unsigned
1009rb_event_index(struct ring_buffer_event *event)
1010{
1011 unsigned long addr = (unsigned long)event;
1012
1013 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1014}
1015
0f0c85fc 1016static inline int
bf41a158
SR
1017rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1018 struct ring_buffer_event *event)
1019{
1020 unsigned long addr = (unsigned long)event;
1021 unsigned long index;
1022
1023 index = rb_event_index(event);
1024 addr &= PAGE_MASK;
1025
1026 return cpu_buffer->commit_page->page == (void *)addr &&
1027 rb_commit_index(cpu_buffer) == index;
1028}
1029
34a148bf 1030static void
bf41a158
SR
1031rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1032 struct ring_buffer_event *event)
7a8e76a3 1033{
bf41a158
SR
1034 unsigned long addr = (unsigned long)event;
1035 unsigned long index;
1036
1037 index = rb_event_index(event);
1038 addr &= PAGE_MASK;
1039
1040 while (cpu_buffer->commit_page->page != (void *)addr) {
3e89c7bb
SR
1041 if (RB_WARN_ON(cpu_buffer,
1042 cpu_buffer->commit_page == cpu_buffer->tail_page))
1043 return;
abc9b56d 1044 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1045 cpu_buffer->commit_page->write;
1046 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1047 cpu_buffer->write_stamp =
1048 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1049 }
1050
1051 /* Now set the commit to the event's index */
abc9b56d 1052 local_set(&cpu_buffer->commit_page->page->commit, index);
7a8e76a3
SR
1053}
1054
34a148bf 1055static void
bf41a158 1056rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1057{
bf41a158
SR
1058 /*
1059 * We only race with interrupts and NMIs on this CPU.
1060 * If we own the commit event, then we can commit
1061 * all others that interrupted us, since the interruptions
1062 * are in stack format (they finish before they come
1063 * back to us). This allows us to do a simple loop to
1064 * assign the commit to the tail.
1065 */
a8ccf1d6 1066 again:
bf41a158 1067 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
abc9b56d 1068 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1069 cpu_buffer->commit_page->write;
1070 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1071 cpu_buffer->write_stamp =
1072 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1073 /* add barrier to keep gcc from optimizing too much */
1074 barrier();
1075 }
1076 while (rb_commit_index(cpu_buffer) !=
1077 rb_page_write(cpu_buffer->commit_page)) {
abc9b56d 1078 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1079 cpu_buffer->commit_page->write;
1080 barrier();
1081 }
a8ccf1d6
SR
1082
1083 /* again, keep gcc from optimizing */
1084 barrier();
1085
1086 /*
1087 * If an interrupt came in just after the first while loop
1088 * and pushed the tail page forward, we will be left with
1089 * a dangling commit that will never go forward.
1090 */
1091 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1092 goto again;
7a8e76a3
SR
1093}
1094
d769041f 1095static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1096{
abc9b56d 1097 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1098 cpu_buffer->reader_page->read = 0;
d769041f
SR
1099}
1100
34a148bf 1101static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1102{
1103 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1104
1105 /*
1106 * The iterator could be on the reader page (it starts there).
1107 * But the head could have moved, since the reader was
1108 * found. Check for this case and assign the iterator
1109 * to the head page instead of next.
1110 */
1111 if (iter->head_page == cpu_buffer->reader_page)
1112 iter->head_page = cpu_buffer->head_page;
1113 else
1114 rb_inc_page(cpu_buffer, &iter->head_page);
1115
abc9b56d 1116 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1117 iter->head = 0;
1118}
1119
1120/**
1121 * ring_buffer_update_event - update event type and data
1122 * @event: the even to update
1123 * @type: the type of event
1124 * @length: the size of the event field in the ring buffer
1125 *
1126 * Update the type and data fields of the event. The length
1127 * is the actual size that is written to the ring buffer,
1128 * and with this, we can determine what to place into the
1129 * data field.
1130 */
34a148bf 1131static void
7a8e76a3
SR
1132rb_update_event(struct ring_buffer_event *event,
1133 unsigned type, unsigned length)
1134{
334d4169 1135 event->type_len = type;
7a8e76a3
SR
1136
1137 switch (type) {
1138
1139 case RINGBUF_TYPE_PADDING:
7a8e76a3 1140 case RINGBUF_TYPE_TIME_EXTEND:
7a8e76a3 1141 case RINGBUF_TYPE_TIME_STAMP:
7a8e76a3
SR
1142 break;
1143
334d4169 1144 case 0:
7a8e76a3 1145 length -= RB_EVNT_HDR_SIZE;
334d4169 1146 if (length > RB_MAX_SMALL_DATA)
7a8e76a3 1147 event->array[0] = length;
334d4169
LJ
1148 else
1149 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
1150 break;
1151 default:
1152 BUG();
1153 }
1154}
1155
34a148bf 1156static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
1157{
1158 struct ring_buffer_event event; /* Used only for sizeof array */
1159
1160 /* zero length can cause confusions */
1161 if (!length)
1162 length = 1;
1163
1164 if (length > RB_MAX_SMALL_DATA)
1165 length += sizeof(event.array[0]);
1166
1167 length += RB_EVNT_HDR_SIZE;
1168 length = ALIGN(length, RB_ALIGNMENT);
1169
1170 return length;
1171}
1172
6634ff26 1173
7a8e76a3 1174static struct ring_buffer_event *
6634ff26
SR
1175rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1176 unsigned long length, unsigned long tail,
1177 struct buffer_page *commit_page,
1178 struct buffer_page *tail_page, u64 *ts)
7a8e76a3 1179{
6634ff26 1180 struct buffer_page *next_page, *head_page, *reader_page;
7a8e76a3
SR
1181 struct ring_buffer *buffer = cpu_buffer->buffer;
1182 struct ring_buffer_event *event;
78d904b4 1183 bool lock_taken = false;
6634ff26 1184 unsigned long flags;
aa20ae84
SR
1185
1186 next_page = tail_page;
1187
1188 local_irq_save(flags);
1189 /*
1190 * Since the write to the buffer is still not
1191 * fully lockless, we must be careful with NMIs.
1192 * The locks in the writers are taken when a write
1193 * crosses to a new page. The locks protect against
1194 * races with the readers (this will soon be fixed
1195 * with a lockless solution).
1196 *
1197 * Because we can not protect against NMIs, and we
1198 * want to keep traces reentrant, we need to manage
1199 * what happens when we are in an NMI.
1200 *
1201 * NMIs can happen after we take the lock.
1202 * If we are in an NMI, only take the lock
1203 * if it is not already taken. Otherwise
1204 * simply fail.
1205 */
1206 if (unlikely(in_nmi())) {
1207 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1208 cpu_buffer->nmi_dropped++;
1209 goto out_reset;
1210 }
1211 } else
1212 __raw_spin_lock(&cpu_buffer->lock);
1213
1214 lock_taken = true;
1215
1216 rb_inc_page(cpu_buffer, &next_page);
1217
1218 head_page = cpu_buffer->head_page;
1219 reader_page = cpu_buffer->reader_page;
1220
1221 /* we grabbed the lock before incrementing */
1222 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1223 goto out_reset;
1224
1225 /*
1226 * If for some reason, we had an interrupt storm that made
1227 * it all the way around the buffer, bail, and warn
1228 * about it.
1229 */
1230 if (unlikely(next_page == commit_page)) {
1231 cpu_buffer->commit_overrun++;
1232 goto out_reset;
1233 }
1234
1235 if (next_page == head_page) {
1236 if (!(buffer->flags & RB_FL_OVERWRITE))
1237 goto out_reset;
1238
1239 /* tail_page has not moved yet? */
1240 if (tail_page == cpu_buffer->tail_page) {
1241 /* count overflows */
1242 cpu_buffer->overrun +=
1243 local_read(&head_page->entries);
1244
1245 rb_inc_page(cpu_buffer, &head_page);
1246 cpu_buffer->head_page = head_page;
1247 cpu_buffer->head_page->read = 0;
1248 }
1249 }
1250
1251 /*
1252 * If the tail page is still the same as what we think
1253 * it is, then it is up to us to update the tail
1254 * pointer.
1255 */
1256 if (tail_page == cpu_buffer->tail_page) {
1257 local_set(&next_page->write, 0);
1258 local_set(&next_page->entries, 0);
1259 local_set(&next_page->page->commit, 0);
1260 cpu_buffer->tail_page = next_page;
1261
1262 /* reread the time stamp */
88eb0125 1263 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
aa20ae84
SR
1264 cpu_buffer->tail_page->page->time_stamp = *ts;
1265 }
1266
1267 /*
1268 * The actual tail page has moved forward.
1269 */
1270 if (tail < BUF_PAGE_SIZE) {
1271 /* Mark the rest of the page with padding */
1272 event = __rb_page_index(tail_page, tail);
1273 rb_event_set_padding(event);
1274 }
1275
8e7abf1c
SR
1276 /* Set the write back to the previous setting */
1277 local_sub(length, &tail_page->write);
aa20ae84
SR
1278
1279 /*
1280 * If this was a commit entry that failed,
1281 * increment that too
1282 */
1283 if (tail_page == cpu_buffer->commit_page &&
1284 tail == rb_commit_index(cpu_buffer)) {
1285 rb_set_commit_to_write(cpu_buffer);
1286 }
1287
1288 __raw_spin_unlock(&cpu_buffer->lock);
1289 local_irq_restore(flags);
1290
1291 /* fail and let the caller try again */
1292 return ERR_PTR(-EAGAIN);
1293
45141d46 1294 out_reset:
6f3b3440 1295 /* reset write */
8e7abf1c 1296 local_sub(length, &tail_page->write);
6f3b3440 1297
78d904b4
SR
1298 if (likely(lock_taken))
1299 __raw_spin_unlock(&cpu_buffer->lock);
3e03fb7f 1300 local_irq_restore(flags);
bf41a158 1301 return NULL;
7a8e76a3
SR
1302}
1303
6634ff26
SR
1304static struct ring_buffer_event *
1305__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1306 unsigned type, unsigned long length, u64 *ts)
1307{
1308 struct buffer_page *tail_page, *commit_page;
1309 struct ring_buffer_event *event;
1310 unsigned long tail, write;
1311
1312 commit_page = cpu_buffer->commit_page;
1313 /* we just need to protect against interrupts */
1314 barrier();
1315 tail_page = cpu_buffer->tail_page;
1316 write = local_add_return(length, &tail_page->write);
1317 tail = write - length;
1318
1319 /* See if we shot pass the end of this buffer page */
1320 if (write > BUF_PAGE_SIZE)
1321 return rb_move_tail(cpu_buffer, length, tail,
1322 commit_page, tail_page, ts);
1323
1324 /* We reserved something on the buffer */
1325
1326 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1327 return NULL;
1328
1329 event = __rb_page_index(tail_page, tail);
1330 rb_update_event(event, type, length);
1331
1332 /* The passed in type is zero for DATA */
1333 if (likely(!type))
1334 local_inc(&tail_page->entries);
1335
1336 /*
1337 * If this is a commit and the tail is zero, then update
1338 * this page's time stamp.
1339 */
1340 if (!tail && rb_is_commit(cpu_buffer, event))
1341 cpu_buffer->commit_page->page->time_stamp = *ts;
1342
1343 return event;
1344}
1345
edd813bf
SR
1346static inline int
1347rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1348 struct ring_buffer_event *event)
1349{
1350 unsigned long new_index, old_index;
1351 struct buffer_page *bpage;
1352 unsigned long index;
1353 unsigned long addr;
1354
1355 new_index = rb_event_index(event);
1356 old_index = new_index + rb_event_length(event);
1357 addr = (unsigned long)event;
1358 addr &= PAGE_MASK;
1359
1360 bpage = cpu_buffer->tail_page;
1361
1362 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1363 /*
1364 * This is on the tail page. It is possible that
1365 * a write could come in and move the tail page
1366 * and write to the next page. That is fine
1367 * because we just shorten what is on this page.
1368 */
1369 index = local_cmpxchg(&bpage->write, old_index, new_index);
1370 if (index == old_index)
1371 return 1;
1372 }
1373
1374 /* could not discard */
1375 return 0;
1376}
1377
7a8e76a3
SR
1378static int
1379rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1380 u64 *ts, u64 *delta)
1381{
1382 struct ring_buffer_event *event;
1383 static int once;
bf41a158 1384 int ret;
7a8e76a3
SR
1385
1386 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1387 printk(KERN_WARNING "Delta way too big! %llu"
1388 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1389 (unsigned long long)*delta,
1390 (unsigned long long)*ts,
1391 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1392 WARN_ON(1);
1393 }
1394
1395 /*
1396 * The delta is too big, we to add a
1397 * new timestamp.
1398 */
1399 event = __rb_reserve_next(cpu_buffer,
1400 RINGBUF_TYPE_TIME_EXTEND,
1401 RB_LEN_TIME_EXTEND,
1402 ts);
1403 if (!event)
bf41a158 1404 return -EBUSY;
7a8e76a3 1405
bf41a158
SR
1406 if (PTR_ERR(event) == -EAGAIN)
1407 return -EAGAIN;
1408
1409 /* Only a commited time event can update the write stamp */
1410 if (rb_is_commit(cpu_buffer, event)) {
1411 /*
1412 * If this is the first on the page, then we need to
1413 * update the page itself, and just put in a zero.
1414 */
1415 if (rb_event_index(event)) {
1416 event->time_delta = *delta & TS_MASK;
1417 event->array[0] = *delta >> TS_SHIFT;
1418 } else {
abc9b56d 1419 cpu_buffer->commit_page->page->time_stamp = *ts;
ea05b57c
SR
1420 /* try to discard, since we do not need this */
1421 if (!rb_try_to_discard(cpu_buffer, event)) {
1422 /* nope, just zero it */
1423 event->time_delta = 0;
1424 event->array[0] = 0;
1425 }
bf41a158 1426 }
7a8e76a3 1427 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1428 /* let the caller know this was the commit */
1429 ret = 1;
1430 } else {
edd813bf
SR
1431 /* Try to discard the event */
1432 if (!rb_try_to_discard(cpu_buffer, event)) {
1433 /* Darn, this is just wasted space */
1434 event->time_delta = 0;
1435 event->array[0] = 0;
edd813bf 1436 }
f57a8a19 1437 ret = 0;
7a8e76a3
SR
1438 }
1439
bf41a158
SR
1440 *delta = 0;
1441
1442 return ret;
7a8e76a3
SR
1443}
1444
1445static struct ring_buffer_event *
1446rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 1447 unsigned long length)
7a8e76a3
SR
1448{
1449 struct ring_buffer_event *event;
168b6b1d 1450 u64 ts, delta = 0;
bf41a158 1451 int commit = 0;
818e3dd3 1452 int nr_loops = 0;
7a8e76a3 1453
be957c44 1454 length = rb_calculate_event_length(length);
bf41a158 1455 again:
818e3dd3
SR
1456 /*
1457 * We allow for interrupts to reenter here and do a trace.
1458 * If one does, it will cause this original code to loop
1459 * back here. Even with heavy interrupts happening, this
1460 * should only happen a few times in a row. If this happens
1461 * 1000 times in a row, there must be either an interrupt
1462 * storm or we have something buggy.
1463 * Bail!
1464 */
3e89c7bb 1465 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
818e3dd3 1466 return NULL;
818e3dd3 1467
88eb0125 1468 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
7a8e76a3 1469
bf41a158
SR
1470 /*
1471 * Only the first commit can update the timestamp.
1472 * Yes there is a race here. If an interrupt comes in
1473 * just after the conditional and it traces too, then it
1474 * will also check the deltas. More than one timestamp may
1475 * also be made. But only the entry that did the actual
1476 * commit will be something other than zero.
1477 */
0f0c85fc
SR
1478 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1479 rb_page_write(cpu_buffer->tail_page) ==
1480 rb_commit_index(cpu_buffer))) {
168b6b1d 1481 u64 diff;
bf41a158 1482
168b6b1d 1483 diff = ts - cpu_buffer->write_stamp;
7a8e76a3 1484
168b6b1d 1485 /* make sure this diff is calculated here */
bf41a158
SR
1486 barrier();
1487
1488 /* Did the write stamp get updated already? */
1489 if (unlikely(ts < cpu_buffer->write_stamp))
168b6b1d 1490 goto get_event;
bf41a158 1491
168b6b1d
SR
1492 delta = diff;
1493 if (unlikely(test_time_stamp(delta))) {
7a8e76a3 1494
bf41a158 1495 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
bf41a158 1496 if (commit == -EBUSY)
7a8e76a3 1497 return NULL;
bf41a158
SR
1498
1499 if (commit == -EAGAIN)
1500 goto again;
1501
1502 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1503 }
168b6b1d 1504 }
7a8e76a3 1505
168b6b1d 1506 get_event:
1cd8d735 1507 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
168b6b1d 1508 if (unlikely(PTR_ERR(event) == -EAGAIN))
bf41a158
SR
1509 goto again;
1510
1511 if (!event) {
1512 if (unlikely(commit))
1513 /*
1514 * Ouch! We needed a timestamp and it was commited. But
1515 * we didn't get our event reserved.
1516 */
1517 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1518 return NULL;
bf41a158 1519 }
7a8e76a3 1520
bf41a158
SR
1521 /*
1522 * If the timestamp was commited, make the commit our entry
1523 * now so that we will update it when needed.
1524 */
0f0c85fc 1525 if (unlikely(commit))
bf41a158
SR
1526 rb_set_commit_event(cpu_buffer, event);
1527 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1528 delta = 0;
1529
1530 event->time_delta = delta;
1531
1532 return event;
1533}
1534
aa18efb2 1535#define TRACE_RECURSIVE_DEPTH 16
261842b7
SR
1536
1537static int trace_recursive_lock(void)
1538{
aa18efb2 1539 current->trace_recursion++;
261842b7 1540
aa18efb2
SR
1541 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1542 return 0;
e057a5e5 1543
aa18efb2
SR
1544 /* Disable all tracing before we do anything else */
1545 tracing_off_permanent();
261842b7 1546
7d7d2b80 1547 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
aa18efb2
SR
1548 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1549 current->trace_recursion,
1550 hardirq_count() >> HARDIRQ_SHIFT,
1551 softirq_count() >> SOFTIRQ_SHIFT,
1552 in_nmi());
261842b7 1553
aa18efb2
SR
1554 WARN_ON_ONCE(1);
1555 return -1;
261842b7
SR
1556}
1557
1558static void trace_recursive_unlock(void)
1559{
aa18efb2 1560 WARN_ON_ONCE(!current->trace_recursion);
261842b7 1561
aa18efb2 1562 current->trace_recursion--;
261842b7
SR
1563}
1564
bf41a158
SR
1565static DEFINE_PER_CPU(int, rb_need_resched);
1566
7a8e76a3
SR
1567/**
1568 * ring_buffer_lock_reserve - reserve a part of the buffer
1569 * @buffer: the ring buffer to reserve from
1570 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
1571 *
1572 * Returns a reseverd event on the ring buffer to copy directly to.
1573 * The user of this interface will need to get the body to write into
1574 * and can use the ring_buffer_event_data() interface.
1575 *
1576 * The length is the length of the data needed, not the event length
1577 * which also includes the event header.
1578 *
1579 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1580 * If NULL is returned, then nothing has been allocated or locked.
1581 */
1582struct ring_buffer_event *
0a987751 1583ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
1584{
1585 struct ring_buffer_per_cpu *cpu_buffer;
1586 struct ring_buffer_event *event;
bf41a158 1587 int cpu, resched;
7a8e76a3 1588
033601a3 1589 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1590 return NULL;
1591
7a8e76a3
SR
1592 if (atomic_read(&buffer->record_disabled))
1593 return NULL;
1594
bf41a158 1595 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1596 resched = ftrace_preempt_disable();
bf41a158 1597
261842b7
SR
1598 if (trace_recursive_lock())
1599 goto out_nocheck;
1600
7a8e76a3
SR
1601 cpu = raw_smp_processor_id();
1602
9e01c1b7 1603 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 1604 goto out;
7a8e76a3
SR
1605
1606 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1607
1608 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1609 goto out;
7a8e76a3 1610
be957c44 1611 if (length > BUF_MAX_DATA_SIZE)
bf41a158 1612 goto out;
7a8e76a3 1613
1cd8d735 1614 event = rb_reserve_next_event(cpu_buffer, length);
7a8e76a3 1615 if (!event)
d769041f 1616 goto out;
7a8e76a3 1617
bf41a158
SR
1618 /*
1619 * Need to store resched state on this cpu.
1620 * Only the first needs to.
1621 */
1622
1623 if (preempt_count() == 1)
1624 per_cpu(rb_need_resched, cpu) = resched;
1625
7a8e76a3
SR
1626 return event;
1627
d769041f 1628 out:
261842b7
SR
1629 trace_recursive_unlock();
1630
1631 out_nocheck:
182e9f5f 1632 ftrace_preempt_enable(resched);
7a8e76a3
SR
1633 return NULL;
1634}
c4f50183 1635EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3
SR
1636
1637static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1638 struct ring_buffer_event *event)
1639{
e4906eff 1640 local_inc(&cpu_buffer->entries);
bf41a158
SR
1641
1642 /* Only process further if we own the commit */
1643 if (!rb_is_commit(cpu_buffer, event))
1644 return;
1645
1646 cpu_buffer->write_stamp += event->time_delta;
1647
1648 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1649}
1650
1651/**
1652 * ring_buffer_unlock_commit - commit a reserved
1653 * @buffer: The buffer to commit to
1654 * @event: The event pointer to commit.
7a8e76a3
SR
1655 *
1656 * This commits the data to the ring buffer, and releases any locks held.
1657 *
1658 * Must be paired with ring_buffer_lock_reserve.
1659 */
1660int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 1661 struct ring_buffer_event *event)
7a8e76a3
SR
1662{
1663 struct ring_buffer_per_cpu *cpu_buffer;
1664 int cpu = raw_smp_processor_id();
1665
1666 cpu_buffer = buffer->buffers[cpu];
1667
7a8e76a3
SR
1668 rb_commit(cpu_buffer, event);
1669
261842b7
SR
1670 trace_recursive_unlock();
1671
bf41a158
SR
1672 /*
1673 * Only the last preempt count needs to restore preemption.
1674 */
182e9f5f
SR
1675 if (preempt_count() == 1)
1676 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1677 else
bf41a158 1678 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1679
1680 return 0;
1681}
c4f50183 1682EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 1683
f3b9aae1
FW
1684static inline void rb_event_discard(struct ring_buffer_event *event)
1685{
334d4169
LJ
1686 /* array[0] holds the actual length for the discarded event */
1687 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1688 event->type_len = RINGBUF_TYPE_PADDING;
f3b9aae1
FW
1689 /* time delta must be non zero */
1690 if (!event->time_delta)
1691 event->time_delta = 1;
1692}
1693
fa1b47dd
SR
1694/**
1695 * ring_buffer_event_discard - discard any event in the ring buffer
1696 * @event: the event to discard
1697 *
1698 * Sometimes a event that is in the ring buffer needs to be ignored.
1699 * This function lets the user discard an event in the ring buffer
1700 * and then that event will not be read later.
1701 *
1702 * Note, it is up to the user to be careful with this, and protect
1703 * against races. If the user discards an event that has been consumed
1704 * it is possible that it could corrupt the ring buffer.
1705 */
1706void ring_buffer_event_discard(struct ring_buffer_event *event)
1707{
f3b9aae1 1708 rb_event_discard(event);
fa1b47dd
SR
1709}
1710EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1711
1712/**
1713 * ring_buffer_commit_discard - discard an event that has not been committed
1714 * @buffer: the ring buffer
1715 * @event: non committed event to discard
1716 *
1717 * This is similar to ring_buffer_event_discard but must only be
1718 * performed on an event that has not been committed yet. The difference
1719 * is that this will also try to free the event from the ring buffer
1720 * if another event has not been added behind it.
1721 *
1722 * If another event has been added behind it, it will set the event
1723 * up as discarded, and perform the commit.
1724 *
1725 * If this function is called, do not call ring_buffer_unlock_commit on
1726 * the event.
1727 */
1728void ring_buffer_discard_commit(struct ring_buffer *buffer,
1729 struct ring_buffer_event *event)
1730{
1731 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
1732 int cpu;
1733
1734 /* The event is discarded regardless */
f3b9aae1 1735 rb_event_discard(event);
fa1b47dd
SR
1736
1737 /*
1738 * This must only be called if the event has not been
1739 * committed yet. Thus we can assume that preemption
1740 * is still disabled.
1741 */
74f4fd21 1742 RB_WARN_ON(buffer, preemptible());
fa1b47dd
SR
1743
1744 cpu = smp_processor_id();
1745 cpu_buffer = buffer->buffers[cpu];
1746
edd813bf
SR
1747 if (!rb_try_to_discard(cpu_buffer, event))
1748 goto out;
fa1b47dd
SR
1749
1750 /*
1751 * The commit is still visible by the reader, so we
1752 * must increment entries.
1753 */
e4906eff 1754 local_inc(&cpu_buffer->entries);
fa1b47dd
SR
1755 out:
1756 /*
1757 * If a write came in and pushed the tail page
1758 * we still need to update the commit pointer
1759 * if we were the commit.
1760 */
1761 if (rb_is_commit(cpu_buffer, event))
1762 rb_set_commit_to_write(cpu_buffer);
1763
f3b9aae1
FW
1764 trace_recursive_unlock();
1765
fa1b47dd
SR
1766 /*
1767 * Only the last preempt count needs to restore preemption.
1768 */
1769 if (preempt_count() == 1)
1770 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1771 else
1772 preempt_enable_no_resched_notrace();
1773
1774}
1775EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1776
7a8e76a3
SR
1777/**
1778 * ring_buffer_write - write data to the buffer without reserving
1779 * @buffer: The ring buffer to write to.
1780 * @length: The length of the data being written (excluding the event header)
1781 * @data: The data to write to the buffer.
1782 *
1783 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1784 * one function. If you already have the data to write to the buffer, it
1785 * may be easier to simply call this function.
1786 *
1787 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1788 * and not the length of the event which would hold the header.
1789 */
1790int ring_buffer_write(struct ring_buffer *buffer,
1791 unsigned long length,
1792 void *data)
1793{
1794 struct ring_buffer_per_cpu *cpu_buffer;
1795 struct ring_buffer_event *event;
7a8e76a3
SR
1796 void *body;
1797 int ret = -EBUSY;
bf41a158 1798 int cpu, resched;
7a8e76a3 1799
033601a3 1800 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1801 return -EBUSY;
1802
7a8e76a3
SR
1803 if (atomic_read(&buffer->record_disabled))
1804 return -EBUSY;
1805
182e9f5f 1806 resched = ftrace_preempt_disable();
bf41a158 1807
7a8e76a3
SR
1808 cpu = raw_smp_processor_id();
1809
9e01c1b7 1810 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 1811 goto out;
7a8e76a3
SR
1812
1813 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1814
1815 if (atomic_read(&cpu_buffer->record_disabled))
1816 goto out;
1817
be957c44
SR
1818 if (length > BUF_MAX_DATA_SIZE)
1819 goto out;
1820
1821 event = rb_reserve_next_event(cpu_buffer, length);
7a8e76a3
SR
1822 if (!event)
1823 goto out;
1824
1825 body = rb_event_data(event);
1826
1827 memcpy(body, data, length);
1828
1829 rb_commit(cpu_buffer, event);
1830
1831 ret = 0;
1832 out:
182e9f5f 1833 ftrace_preempt_enable(resched);
7a8e76a3
SR
1834
1835 return ret;
1836}
c4f50183 1837EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 1838
34a148bf 1839static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
1840{
1841 struct buffer_page *reader = cpu_buffer->reader_page;
1842 struct buffer_page *head = cpu_buffer->head_page;
1843 struct buffer_page *commit = cpu_buffer->commit_page;
1844
1845 return reader->read == rb_page_commit(reader) &&
1846 (commit == reader ||
1847 (commit == head &&
1848 head->read == rb_page_commit(commit)));
1849}
1850
7a8e76a3
SR
1851/**
1852 * ring_buffer_record_disable - stop all writes into the buffer
1853 * @buffer: The ring buffer to stop writes to.
1854 *
1855 * This prevents all writes to the buffer. Any attempt to write
1856 * to the buffer after this will fail and return NULL.
1857 *
1858 * The caller should call synchronize_sched() after this.
1859 */
1860void ring_buffer_record_disable(struct ring_buffer *buffer)
1861{
1862 atomic_inc(&buffer->record_disabled);
1863}
c4f50183 1864EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
1865
1866/**
1867 * ring_buffer_record_enable - enable writes to the buffer
1868 * @buffer: The ring buffer to enable writes
1869 *
1870 * Note, multiple disables will need the same number of enables
1871 * to truely enable the writing (much like preempt_disable).
1872 */
1873void ring_buffer_record_enable(struct ring_buffer *buffer)
1874{
1875 atomic_dec(&buffer->record_disabled);
1876}
c4f50183 1877EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3
SR
1878
1879/**
1880 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1881 * @buffer: The ring buffer to stop writes to.
1882 * @cpu: The CPU buffer to stop
1883 *
1884 * This prevents all writes to the buffer. Any attempt to write
1885 * to the buffer after this will fail and return NULL.
1886 *
1887 * The caller should call synchronize_sched() after this.
1888 */
1889void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1890{
1891 struct ring_buffer_per_cpu *cpu_buffer;
1892
9e01c1b7 1893 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1894 return;
7a8e76a3
SR
1895
1896 cpu_buffer = buffer->buffers[cpu];
1897 atomic_inc(&cpu_buffer->record_disabled);
1898}
c4f50183 1899EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
1900
1901/**
1902 * ring_buffer_record_enable_cpu - enable writes to the buffer
1903 * @buffer: The ring buffer to enable writes
1904 * @cpu: The CPU to enable.
1905 *
1906 * Note, multiple disables will need the same number of enables
1907 * to truely enable the writing (much like preempt_disable).
1908 */
1909void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1910{
1911 struct ring_buffer_per_cpu *cpu_buffer;
1912
9e01c1b7 1913 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1914 return;
7a8e76a3
SR
1915
1916 cpu_buffer = buffer->buffers[cpu];
1917 atomic_dec(&cpu_buffer->record_disabled);
1918}
c4f50183 1919EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3
SR
1920
1921/**
1922 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1923 * @buffer: The ring buffer
1924 * @cpu: The per CPU buffer to get the entries from.
1925 */
1926unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1927{
1928 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 1929 unsigned long ret;
7a8e76a3 1930
9e01c1b7 1931 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1932 return 0;
7a8e76a3
SR
1933
1934 cpu_buffer = buffer->buffers[cpu];
e4906eff
SR
1935 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1936 - cpu_buffer->read;
554f786e
SR
1937
1938 return ret;
7a8e76a3 1939}
c4f50183 1940EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
1941
1942/**
1943 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1944 * @buffer: The ring buffer
1945 * @cpu: The per CPU buffer to get the number of overruns from
1946 */
1947unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1948{
1949 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 1950 unsigned long ret;
7a8e76a3 1951
9e01c1b7 1952 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1953 return 0;
7a8e76a3
SR
1954
1955 cpu_buffer = buffer->buffers[cpu];
554f786e 1956 ret = cpu_buffer->overrun;
554f786e
SR
1957
1958 return ret;
7a8e76a3 1959}
c4f50183 1960EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 1961
f0d2c681
SR
1962/**
1963 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1964 * @buffer: The ring buffer
1965 * @cpu: The per CPU buffer to get the number of overruns from
1966 */
1967unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1968{
1969 struct ring_buffer_per_cpu *cpu_buffer;
1970 unsigned long ret;
1971
1972 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1973 return 0;
1974
1975 cpu_buffer = buffer->buffers[cpu];
1976 ret = cpu_buffer->nmi_dropped;
1977
1978 return ret;
1979}
1980EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1981
1982/**
1983 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1984 * @buffer: The ring buffer
1985 * @cpu: The per CPU buffer to get the number of overruns from
1986 */
1987unsigned long
1988ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1989{
1990 struct ring_buffer_per_cpu *cpu_buffer;
1991 unsigned long ret;
1992
1993 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1994 return 0;
1995
1996 cpu_buffer = buffer->buffers[cpu];
1997 ret = cpu_buffer->commit_overrun;
1998
1999 return ret;
2000}
2001EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2002
7a8e76a3
SR
2003/**
2004 * ring_buffer_entries - get the number of entries in a buffer
2005 * @buffer: The ring buffer
2006 *
2007 * Returns the total number of entries in the ring buffer
2008 * (all CPU entries)
2009 */
2010unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2011{
2012 struct ring_buffer_per_cpu *cpu_buffer;
2013 unsigned long entries = 0;
2014 int cpu;
2015
2016 /* if you care about this being correct, lock the buffer */
2017 for_each_buffer_cpu(buffer, cpu) {
2018 cpu_buffer = buffer->buffers[cpu];
e4906eff
SR
2019 entries += (local_read(&cpu_buffer->entries) -
2020 cpu_buffer->overrun) - cpu_buffer->read;
7a8e76a3
SR
2021 }
2022
2023 return entries;
2024}
c4f50183 2025EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
2026
2027/**
2028 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2029 * @buffer: The ring buffer
2030 *
2031 * Returns the total number of overruns in the ring buffer
2032 * (all CPU entries)
2033 */
2034unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2035{
2036 struct ring_buffer_per_cpu *cpu_buffer;
2037 unsigned long overruns = 0;
2038 int cpu;
2039
2040 /* if you care about this being correct, lock the buffer */
2041 for_each_buffer_cpu(buffer, cpu) {
2042 cpu_buffer = buffer->buffers[cpu];
2043 overruns += cpu_buffer->overrun;
2044 }
2045
2046 return overruns;
2047}
c4f50183 2048EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 2049
642edba5 2050static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
2051{
2052 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2053
d769041f
SR
2054 /* Iterator usage is expected to have record disabled */
2055 if (list_empty(&cpu_buffer->reader_page->list)) {
2056 iter->head_page = cpu_buffer->head_page;
6f807acd 2057 iter->head = cpu_buffer->head_page->read;
d769041f
SR
2058 } else {
2059 iter->head_page = cpu_buffer->reader_page;
6f807acd 2060 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
2061 }
2062 if (iter->head)
2063 iter->read_stamp = cpu_buffer->read_stamp;
2064 else
abc9b56d 2065 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 2066}
f83c9d0f 2067
642edba5
SR
2068/**
2069 * ring_buffer_iter_reset - reset an iterator
2070 * @iter: The iterator to reset
2071 *
2072 * Resets the iterator, so that it will start from the beginning
2073 * again.
2074 */
2075void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2076{
554f786e 2077 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
2078 unsigned long flags;
2079
554f786e
SR
2080 if (!iter)
2081 return;
2082
2083 cpu_buffer = iter->cpu_buffer;
2084
642edba5
SR
2085 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2086 rb_iter_reset(iter);
f83c9d0f 2087 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2088}
c4f50183 2089EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
2090
2091/**
2092 * ring_buffer_iter_empty - check if an iterator has no more to read
2093 * @iter: The iterator to check
2094 */
2095int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2096{
2097 struct ring_buffer_per_cpu *cpu_buffer;
2098
2099 cpu_buffer = iter->cpu_buffer;
2100
bf41a158
SR
2101 return iter->head_page == cpu_buffer->commit_page &&
2102 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 2103}
c4f50183 2104EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
2105
2106static void
2107rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2108 struct ring_buffer_event *event)
2109{
2110 u64 delta;
2111
334d4169 2112 switch (event->type_len) {
7a8e76a3
SR
2113 case RINGBUF_TYPE_PADDING:
2114 return;
2115
2116 case RINGBUF_TYPE_TIME_EXTEND:
2117 delta = event->array[0];
2118 delta <<= TS_SHIFT;
2119 delta += event->time_delta;
2120 cpu_buffer->read_stamp += delta;
2121 return;
2122
2123 case RINGBUF_TYPE_TIME_STAMP:
2124 /* FIXME: not implemented */
2125 return;
2126
2127 case RINGBUF_TYPE_DATA:
2128 cpu_buffer->read_stamp += event->time_delta;
2129 return;
2130
2131 default:
2132 BUG();
2133 }
2134 return;
2135}
2136
2137static void
2138rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2139 struct ring_buffer_event *event)
2140{
2141 u64 delta;
2142
334d4169 2143 switch (event->type_len) {
7a8e76a3
SR
2144 case RINGBUF_TYPE_PADDING:
2145 return;
2146
2147 case RINGBUF_TYPE_TIME_EXTEND:
2148 delta = event->array[0];
2149 delta <<= TS_SHIFT;
2150 delta += event->time_delta;
2151 iter->read_stamp += delta;
2152 return;
2153
2154 case RINGBUF_TYPE_TIME_STAMP:
2155 /* FIXME: not implemented */
2156 return;
2157
2158 case RINGBUF_TYPE_DATA:
2159 iter->read_stamp += event->time_delta;
2160 return;
2161
2162 default:
2163 BUG();
2164 }
2165 return;
2166}
2167
d769041f
SR
2168static struct buffer_page *
2169rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2170{
d769041f
SR
2171 struct buffer_page *reader = NULL;
2172 unsigned long flags;
818e3dd3 2173 int nr_loops = 0;
d769041f 2174
3e03fb7f
SR
2175 local_irq_save(flags);
2176 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
2177
2178 again:
818e3dd3
SR
2179 /*
2180 * This should normally only loop twice. But because the
2181 * start of the reader inserts an empty page, it causes
2182 * a case where we will loop three times. There should be no
2183 * reason to loop four times (that I know of).
2184 */
3e89c7bb 2185 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
2186 reader = NULL;
2187 goto out;
2188 }
2189
d769041f
SR
2190 reader = cpu_buffer->reader_page;
2191
2192 /* If there's more to read, return this page */
bf41a158 2193 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
2194 goto out;
2195
2196 /* Never should we have an index greater than the size */
3e89c7bb
SR
2197 if (RB_WARN_ON(cpu_buffer,
2198 cpu_buffer->reader_page->read > rb_page_size(reader)))
2199 goto out;
d769041f
SR
2200
2201 /* check if we caught up to the tail */
2202 reader = NULL;
bf41a158 2203 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 2204 goto out;
7a8e76a3
SR
2205
2206 /*
d769041f
SR
2207 * Splice the empty reader page into the list around the head.
2208 * Reset the reader page to size zero.
7a8e76a3 2209 */
7a8e76a3 2210
d769041f
SR
2211 reader = cpu_buffer->head_page;
2212 cpu_buffer->reader_page->list.next = reader->list.next;
2213 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
2214
2215 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 2216 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 2217 local_set(&cpu_buffer->reader_page->page->commit, 0);
7a8e76a3 2218
d769041f
SR
2219 /* Make the reader page now replace the head */
2220 reader->list.prev->next = &cpu_buffer->reader_page->list;
2221 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
2222
2223 /*
d769041f
SR
2224 * If the tail is on the reader, then we must set the head
2225 * to the inserted page, otherwise we set it one before.
7a8e76a3 2226 */
d769041f 2227 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 2228
bf41a158 2229 if (cpu_buffer->commit_page != reader)
d769041f
SR
2230 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2231
2232 /* Finally update the reader page to the new head */
2233 cpu_buffer->reader_page = reader;
2234 rb_reset_reader_page(cpu_buffer);
2235
2236 goto again;
2237
2238 out:
3e03fb7f
SR
2239 __raw_spin_unlock(&cpu_buffer->lock);
2240 local_irq_restore(flags);
d769041f
SR
2241
2242 return reader;
2243}
2244
2245static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2246{
2247 struct ring_buffer_event *event;
2248 struct buffer_page *reader;
2249 unsigned length;
2250
2251 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 2252
d769041f 2253 /* This function should not be called when buffer is empty */
3e89c7bb
SR
2254 if (RB_WARN_ON(cpu_buffer, !reader))
2255 return;
7a8e76a3 2256
d769041f
SR
2257 event = rb_reader_event(cpu_buffer);
2258
334d4169
LJ
2259 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2260 || rb_discarded_event(event))
e4906eff 2261 cpu_buffer->read++;
d769041f
SR
2262
2263 rb_update_read_stamp(cpu_buffer, event);
2264
2265 length = rb_event_length(event);
6f807acd 2266 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
2267}
2268
2269static void rb_advance_iter(struct ring_buffer_iter *iter)
2270{
2271 struct ring_buffer *buffer;
2272 struct ring_buffer_per_cpu *cpu_buffer;
2273 struct ring_buffer_event *event;
2274 unsigned length;
2275
2276 cpu_buffer = iter->cpu_buffer;
2277 buffer = cpu_buffer->buffer;
2278
2279 /*
2280 * Check if we are at the end of the buffer.
2281 */
bf41a158 2282 if (iter->head >= rb_page_size(iter->head_page)) {
ea05b57c
SR
2283 /* discarded commits can make the page empty */
2284 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 2285 return;
d769041f 2286 rb_inc_iter(iter);
7a8e76a3
SR
2287 return;
2288 }
2289
2290 event = rb_iter_head_event(iter);
2291
2292 length = rb_event_length(event);
2293
2294 /*
2295 * This should not be called to advance the header if we are
2296 * at the tail of the buffer.
2297 */
3e89c7bb 2298 if (RB_WARN_ON(cpu_buffer,
f536aafc 2299 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
2300 (iter->head + length > rb_commit_index(cpu_buffer))))
2301 return;
7a8e76a3
SR
2302
2303 rb_update_iter_read_stamp(iter, event);
2304
2305 iter->head += length;
2306
2307 /* check for end of page padding */
bf41a158
SR
2308 if ((iter->head >= rb_page_size(iter->head_page)) &&
2309 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
2310 rb_advance_iter(iter);
2311}
2312
f83c9d0f
SR
2313static struct ring_buffer_event *
2314rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
7a8e76a3
SR
2315{
2316 struct ring_buffer_per_cpu *cpu_buffer;
2317 struct ring_buffer_event *event;
d769041f 2318 struct buffer_page *reader;
818e3dd3 2319 int nr_loops = 0;
7a8e76a3 2320
7a8e76a3
SR
2321 cpu_buffer = buffer->buffers[cpu];
2322
2323 again:
818e3dd3
SR
2324 /*
2325 * We repeat when a timestamp is encountered. It is possible
2326 * to get multiple timestamps from an interrupt entering just
ea05b57c
SR
2327 * as one timestamp is about to be written, or from discarded
2328 * commits. The most that we can have is the number on a single page.
818e3dd3 2329 */
ea05b57c 2330 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 2331 return NULL;
818e3dd3 2332
d769041f
SR
2333 reader = rb_get_reader_page(cpu_buffer);
2334 if (!reader)
7a8e76a3
SR
2335 return NULL;
2336
d769041f 2337 event = rb_reader_event(cpu_buffer);
7a8e76a3 2338
334d4169 2339 switch (event->type_len) {
7a8e76a3 2340 case RINGBUF_TYPE_PADDING:
2d622719
TZ
2341 if (rb_null_event(event))
2342 RB_WARN_ON(cpu_buffer, 1);
2343 /*
2344 * Because the writer could be discarding every
2345 * event it creates (which would probably be bad)
2346 * if we were to go back to "again" then we may never
2347 * catch up, and will trigger the warn on, or lock
2348 * the box. Return the padding, and we will release
2349 * the current locks, and try again.
2350 */
d769041f 2351 rb_advance_reader(cpu_buffer);
2d622719 2352 return event;
7a8e76a3
SR
2353
2354 case RINGBUF_TYPE_TIME_EXTEND:
2355 /* Internal data, OK to advance */
d769041f 2356 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
2357 goto again;
2358
2359 case RINGBUF_TYPE_TIME_STAMP:
2360 /* FIXME: not implemented */
d769041f 2361 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
2362 goto again;
2363
2364 case RINGBUF_TYPE_DATA:
2365 if (ts) {
2366 *ts = cpu_buffer->read_stamp + event->time_delta;
37886f6a
SR
2367 ring_buffer_normalize_time_stamp(buffer,
2368 cpu_buffer->cpu, ts);
7a8e76a3
SR
2369 }
2370 return event;
2371
2372 default:
2373 BUG();
2374 }
2375
2376 return NULL;
2377}
c4f50183 2378EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 2379
f83c9d0f
SR
2380static struct ring_buffer_event *
2381rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
2382{
2383 struct ring_buffer *buffer;
2384 struct ring_buffer_per_cpu *cpu_buffer;
2385 struct ring_buffer_event *event;
818e3dd3 2386 int nr_loops = 0;
7a8e76a3
SR
2387
2388 if (ring_buffer_iter_empty(iter))
2389 return NULL;
2390
2391 cpu_buffer = iter->cpu_buffer;
2392 buffer = cpu_buffer->buffer;
2393
2394 again:
818e3dd3 2395 /*
ea05b57c
SR
2396 * We repeat when a timestamp is encountered.
2397 * We can get multiple timestamps by nested interrupts or also
2398 * if filtering is on (discarding commits). Since discarding
2399 * commits can be frequent we can get a lot of timestamps.
2400 * But we limit them by not adding timestamps if they begin
2401 * at the start of a page.
818e3dd3 2402 */
ea05b57c 2403 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
818e3dd3 2404 return NULL;
818e3dd3 2405
7a8e76a3
SR
2406 if (rb_per_cpu_empty(cpu_buffer))
2407 return NULL;
2408
2409 event = rb_iter_head_event(iter);
2410
334d4169 2411 switch (event->type_len) {
7a8e76a3 2412 case RINGBUF_TYPE_PADDING:
2d622719
TZ
2413 if (rb_null_event(event)) {
2414 rb_inc_iter(iter);
2415 goto again;
2416 }
2417 rb_advance_iter(iter);
2418 return event;
7a8e76a3
SR
2419
2420 case RINGBUF_TYPE_TIME_EXTEND:
2421 /* Internal data, OK to advance */
2422 rb_advance_iter(iter);
2423 goto again;
2424
2425 case RINGBUF_TYPE_TIME_STAMP:
2426 /* FIXME: not implemented */
2427 rb_advance_iter(iter);
2428 goto again;
2429
2430 case RINGBUF_TYPE_DATA:
2431 if (ts) {
2432 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
2433 ring_buffer_normalize_time_stamp(buffer,
2434 cpu_buffer->cpu, ts);
7a8e76a3
SR
2435 }
2436 return event;
2437
2438 default:
2439 BUG();
2440 }
2441
2442 return NULL;
2443}
c4f50183 2444EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 2445
f83c9d0f
SR
2446/**
2447 * ring_buffer_peek - peek at the next event to be read
2448 * @buffer: The ring buffer to read
2449 * @cpu: The cpu to peak at
2450 * @ts: The timestamp counter of this event.
2451 *
2452 * This will return the event that will be read next, but does
2453 * not consume the data.
2454 */
2455struct ring_buffer_event *
2456ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2457{
2458 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 2459 struct ring_buffer_event *event;
f83c9d0f
SR
2460 unsigned long flags;
2461
554f786e 2462 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2463 return NULL;
554f786e 2464
2d622719 2465 again:
f83c9d0f
SR
2466 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2467 event = rb_buffer_peek(buffer, cpu, ts);
2468 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2469
334d4169 2470 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2d622719
TZ
2471 cpu_relax();
2472 goto again;
2473 }
2474
f83c9d0f
SR
2475 return event;
2476}
2477
2478/**
2479 * ring_buffer_iter_peek - peek at the next event to be read
2480 * @iter: The ring buffer iterator
2481 * @ts: The timestamp counter of this event.
2482 *
2483 * This will return the event that will be read next, but does
2484 * not increment the iterator.
2485 */
2486struct ring_buffer_event *
2487ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2488{
2489 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2490 struct ring_buffer_event *event;
2491 unsigned long flags;
2492
2d622719 2493 again:
f83c9d0f
SR
2494 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2495 event = rb_iter_peek(iter, ts);
2496 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2497
334d4169 2498 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2d622719
TZ
2499 cpu_relax();
2500 goto again;
2501 }
2502
f83c9d0f
SR
2503 return event;
2504}
2505
7a8e76a3
SR
2506/**
2507 * ring_buffer_consume - return an event and consume it
2508 * @buffer: The ring buffer to get the next event from
2509 *
2510 * Returns the next event in the ring buffer, and that event is consumed.
2511 * Meaning, that sequential reads will keep returning a different event,
2512 * and eventually empty the ring buffer if the producer is slower.
2513 */
2514struct ring_buffer_event *
2515ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2516{
554f786e
SR
2517 struct ring_buffer_per_cpu *cpu_buffer;
2518 struct ring_buffer_event *event = NULL;
f83c9d0f 2519 unsigned long flags;
7a8e76a3 2520
2d622719 2521 again:
554f786e
SR
2522 /* might be called in atomic */
2523 preempt_disable();
2524
9e01c1b7 2525 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 2526 goto out;
7a8e76a3 2527
554f786e 2528 cpu_buffer = buffer->buffers[cpu];
f83c9d0f
SR
2529 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2530
2531 event = rb_buffer_peek(buffer, cpu, ts);
7a8e76a3 2532 if (!event)
554f786e 2533 goto out_unlock;
7a8e76a3 2534
d769041f 2535 rb_advance_reader(cpu_buffer);
7a8e76a3 2536
554f786e 2537 out_unlock:
f83c9d0f
SR
2538 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2539
554f786e
SR
2540 out:
2541 preempt_enable();
2542
334d4169 2543 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2d622719
TZ
2544 cpu_relax();
2545 goto again;
2546 }
2547
7a8e76a3
SR
2548 return event;
2549}
c4f50183 2550EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
2551
2552/**
2553 * ring_buffer_read_start - start a non consuming read of the buffer
2554 * @buffer: The ring buffer to read from
2555 * @cpu: The cpu buffer to iterate over
2556 *
2557 * This starts up an iteration through the buffer. It also disables
2558 * the recording to the buffer until the reading is finished.
2559 * This prevents the reading from being corrupted. This is not
2560 * a consuming read, so a producer is not expected.
2561 *
2562 * Must be paired with ring_buffer_finish.
2563 */
2564struct ring_buffer_iter *
2565ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2566{
2567 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2568 struct ring_buffer_iter *iter;
d769041f 2569 unsigned long flags;
7a8e76a3 2570
9e01c1b7 2571 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2572 return NULL;
7a8e76a3
SR
2573
2574 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2575 if (!iter)
8aabee57 2576 return NULL;
7a8e76a3
SR
2577
2578 cpu_buffer = buffer->buffers[cpu];
2579
2580 iter->cpu_buffer = cpu_buffer;
2581
2582 atomic_inc(&cpu_buffer->record_disabled);
2583 synchronize_sched();
2584
f83c9d0f 2585 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3e03fb7f 2586 __raw_spin_lock(&cpu_buffer->lock);
642edba5 2587 rb_iter_reset(iter);
3e03fb7f 2588 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 2589 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2590
2591 return iter;
2592}
c4f50183 2593EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
2594
2595/**
2596 * ring_buffer_finish - finish reading the iterator of the buffer
2597 * @iter: The iterator retrieved by ring_buffer_start
2598 *
2599 * This re-enables the recording to the buffer, and frees the
2600 * iterator.
2601 */
2602void
2603ring_buffer_read_finish(struct ring_buffer_iter *iter)
2604{
2605 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2606
2607 atomic_dec(&cpu_buffer->record_disabled);
2608 kfree(iter);
2609}
c4f50183 2610EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
2611
2612/**
2613 * ring_buffer_read - read the next item in the ring buffer by the iterator
2614 * @iter: The ring buffer iterator
2615 * @ts: The time stamp of the event read.
2616 *
2617 * This reads the next event in the ring buffer and increments the iterator.
2618 */
2619struct ring_buffer_event *
2620ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2621{
2622 struct ring_buffer_event *event;
f83c9d0f
SR
2623 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2624 unsigned long flags;
7a8e76a3 2625
2d622719 2626 again:
f83c9d0f
SR
2627 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2628 event = rb_iter_peek(iter, ts);
7a8e76a3 2629 if (!event)
f83c9d0f 2630 goto out;
7a8e76a3
SR
2631
2632 rb_advance_iter(iter);
f83c9d0f
SR
2633 out:
2634 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2635
334d4169 2636 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2d622719
TZ
2637 cpu_relax();
2638 goto again;
2639 }
2640
7a8e76a3
SR
2641 return event;
2642}
c4f50183 2643EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
2644
2645/**
2646 * ring_buffer_size - return the size of the ring buffer (in bytes)
2647 * @buffer: The ring buffer.
2648 */
2649unsigned long ring_buffer_size(struct ring_buffer *buffer)
2650{
2651 return BUF_PAGE_SIZE * buffer->pages;
2652}
c4f50183 2653EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
2654
2655static void
2656rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2657{
2658 cpu_buffer->head_page
2659 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 2660 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 2661 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 2662 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 2663
6f807acd 2664 cpu_buffer->head_page->read = 0;
bf41a158
SR
2665
2666 cpu_buffer->tail_page = cpu_buffer->head_page;
2667 cpu_buffer->commit_page = cpu_buffer->head_page;
2668
2669 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2670 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 2671 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 2672 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 2673 cpu_buffer->reader_page->read = 0;
7a8e76a3 2674
f0d2c681
SR
2675 cpu_buffer->nmi_dropped = 0;
2676 cpu_buffer->commit_overrun = 0;
7a8e76a3 2677 cpu_buffer->overrun = 0;
e4906eff
SR
2678 cpu_buffer->read = 0;
2679 local_set(&cpu_buffer->entries, 0);
69507c06
SR
2680
2681 cpu_buffer->write_stamp = 0;
2682 cpu_buffer->read_stamp = 0;
7a8e76a3
SR
2683}
2684
2685/**
2686 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2687 * @buffer: The ring buffer to reset a per cpu buffer of
2688 * @cpu: The CPU buffer to be reset
2689 */
2690void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2691{
2692 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2693 unsigned long flags;
2694
9e01c1b7 2695 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2696 return;
7a8e76a3 2697
41ede23e
SR
2698 atomic_inc(&cpu_buffer->record_disabled);
2699
f83c9d0f
SR
2700 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2701
3e03fb7f 2702 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
2703
2704 rb_reset_cpu(cpu_buffer);
2705
3e03fb7f 2706 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f
SR
2707
2708 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
41ede23e
SR
2709
2710 atomic_dec(&cpu_buffer->record_disabled);
7a8e76a3 2711}
c4f50183 2712EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
2713
2714/**
2715 * ring_buffer_reset - reset a ring buffer
2716 * @buffer: The ring buffer to reset all cpu buffers
2717 */
2718void ring_buffer_reset(struct ring_buffer *buffer)
2719{
7a8e76a3
SR
2720 int cpu;
2721
7a8e76a3 2722 for_each_buffer_cpu(buffer, cpu)
d769041f 2723 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 2724}
c4f50183 2725EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
2726
2727/**
2728 * rind_buffer_empty - is the ring buffer empty?
2729 * @buffer: The ring buffer to test
2730 */
2731int ring_buffer_empty(struct ring_buffer *buffer)
2732{
2733 struct ring_buffer_per_cpu *cpu_buffer;
2734 int cpu;
2735
2736 /* yes this is racy, but if you don't like the race, lock the buffer */
2737 for_each_buffer_cpu(buffer, cpu) {
2738 cpu_buffer = buffer->buffers[cpu];
2739 if (!rb_per_cpu_empty(cpu_buffer))
2740 return 0;
2741 }
554f786e 2742
7a8e76a3
SR
2743 return 1;
2744}
c4f50183 2745EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
2746
2747/**
2748 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2749 * @buffer: The ring buffer
2750 * @cpu: The CPU buffer to test
2751 */
2752int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2753{
2754 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2755 int ret;
7a8e76a3 2756
9e01c1b7 2757 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2758 return 1;
7a8e76a3
SR
2759
2760 cpu_buffer = buffer->buffers[cpu];
554f786e
SR
2761 ret = rb_per_cpu_empty(cpu_buffer);
2762
554f786e
SR
2763
2764 return ret;
7a8e76a3 2765}
c4f50183 2766EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3
SR
2767
2768/**
2769 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2770 * @buffer_a: One buffer to swap with
2771 * @buffer_b: The other buffer to swap with
2772 *
2773 * This function is useful for tracers that want to take a "snapshot"
2774 * of a CPU buffer and has another back up buffer lying around.
2775 * it is expected that the tracer handles the cpu buffer not being
2776 * used at the moment.
2777 */
2778int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2779 struct ring_buffer *buffer_b, int cpu)
2780{
2781 struct ring_buffer_per_cpu *cpu_buffer_a;
2782 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
2783 int ret = -EINVAL;
2784
9e01c1b7
RR
2785 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2786 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 2787 goto out;
7a8e76a3
SR
2788
2789 /* At least make sure the two buffers are somewhat the same */
6d102bc6 2790 if (buffer_a->pages != buffer_b->pages)
554f786e
SR
2791 goto out;
2792
2793 ret = -EAGAIN;
7a8e76a3 2794
97b17efe 2795 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 2796 goto out;
97b17efe
SR
2797
2798 if (atomic_read(&buffer_a->record_disabled))
554f786e 2799 goto out;
97b17efe
SR
2800
2801 if (atomic_read(&buffer_b->record_disabled))
554f786e 2802 goto out;
97b17efe 2803
7a8e76a3
SR
2804 cpu_buffer_a = buffer_a->buffers[cpu];
2805 cpu_buffer_b = buffer_b->buffers[cpu];
2806
97b17efe 2807 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 2808 goto out;
97b17efe
SR
2809
2810 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 2811 goto out;
97b17efe 2812
7a8e76a3
SR
2813 /*
2814 * We can't do a synchronize_sched here because this
2815 * function can be called in atomic context.
2816 * Normally this will be called from the same CPU as cpu.
2817 * If not it's up to the caller to protect this.
2818 */
2819 atomic_inc(&cpu_buffer_a->record_disabled);
2820 atomic_inc(&cpu_buffer_b->record_disabled);
2821
2822 buffer_a->buffers[cpu] = cpu_buffer_b;
2823 buffer_b->buffers[cpu] = cpu_buffer_a;
2824
2825 cpu_buffer_b->buffer = buffer_a;
2826 cpu_buffer_a->buffer = buffer_b;
2827
2828 atomic_dec(&cpu_buffer_a->record_disabled);
2829 atomic_dec(&cpu_buffer_b->record_disabled);
2830
554f786e
SR
2831 ret = 0;
2832out:
554f786e 2833 return ret;
7a8e76a3 2834}
c4f50183 2835EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
7a8e76a3 2836
8789a9e7
SR
2837/**
2838 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2839 * @buffer: the buffer to allocate for.
2840 *
2841 * This function is used in conjunction with ring_buffer_read_page.
2842 * When reading a full page from the ring buffer, these functions
2843 * can be used to speed up the process. The calling function should
2844 * allocate a few pages first with this function. Then when it
2845 * needs to get pages from the ring buffer, it passes the result
2846 * of this function into ring_buffer_read_page, which will swap
2847 * the page that was allocated, with the read page of the buffer.
2848 *
2849 * Returns:
2850 * The page allocated, or NULL on error.
2851 */
2852void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2853{
044fa782 2854 struct buffer_data_page *bpage;
ef7a4a16 2855 unsigned long addr;
8789a9e7
SR
2856
2857 addr = __get_free_page(GFP_KERNEL);
2858 if (!addr)
2859 return NULL;
2860
044fa782 2861 bpage = (void *)addr;
8789a9e7 2862
ef7a4a16
SR
2863 rb_init_page(bpage);
2864
044fa782 2865 return bpage;
8789a9e7 2866}
d6ce96da 2867EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
2868
2869/**
2870 * ring_buffer_free_read_page - free an allocated read page
2871 * @buffer: the buffer the page was allocate for
2872 * @data: the page to free
2873 *
2874 * Free a page allocated from ring_buffer_alloc_read_page.
2875 */
2876void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2877{
2878 free_page((unsigned long)data);
2879}
d6ce96da 2880EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
2881
2882/**
2883 * ring_buffer_read_page - extract a page from the ring buffer
2884 * @buffer: buffer to extract from
2885 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 2886 * @len: amount to extract
8789a9e7
SR
2887 * @cpu: the cpu of the buffer to extract
2888 * @full: should the extraction only happen when the page is full.
2889 *
2890 * This function will pull out a page from the ring buffer and consume it.
2891 * @data_page must be the address of the variable that was returned
2892 * from ring_buffer_alloc_read_page. This is because the page might be used
2893 * to swap with a page in the ring buffer.
2894 *
2895 * for example:
b85fa01e 2896 * rpage = ring_buffer_alloc_read_page(buffer);
8789a9e7
SR
2897 * if (!rpage)
2898 * return error;
ef7a4a16 2899 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
2900 * if (ret >= 0)
2901 * process_page(rpage, ret);
8789a9e7
SR
2902 *
2903 * When @full is set, the function will not return true unless
2904 * the writer is off the reader page.
2905 *
2906 * Note: it is up to the calling functions to handle sleeps and wakeups.
2907 * The ring buffer can be used anywhere in the kernel and can not
2908 * blindly call wake_up. The layer that uses the ring buffer must be
2909 * responsible for that.
2910 *
2911 * Returns:
667d2412
LJ
2912 * >=0 if data has been transferred, returns the offset of consumed data.
2913 * <0 if no data has been transferred.
8789a9e7
SR
2914 */
2915int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 2916 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
2917{
2918 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2919 struct ring_buffer_event *event;
044fa782 2920 struct buffer_data_page *bpage;
ef7a4a16 2921 struct buffer_page *reader;
8789a9e7 2922 unsigned long flags;
ef7a4a16 2923 unsigned int commit;
667d2412 2924 unsigned int read;
4f3640f8 2925 u64 save_timestamp;
667d2412 2926 int ret = -1;
8789a9e7 2927
554f786e
SR
2928 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2929 goto out;
2930
474d32b6
SR
2931 /*
2932 * If len is not big enough to hold the page header, then
2933 * we can not copy anything.
2934 */
2935 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 2936 goto out;
474d32b6
SR
2937
2938 len -= BUF_PAGE_HDR_SIZE;
2939
8789a9e7 2940 if (!data_page)
554f786e 2941 goto out;
8789a9e7 2942
044fa782
SR
2943 bpage = *data_page;
2944 if (!bpage)
554f786e 2945 goto out;
8789a9e7
SR
2946
2947 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2948
ef7a4a16
SR
2949 reader = rb_get_reader_page(cpu_buffer);
2950 if (!reader)
554f786e 2951 goto out_unlock;
8789a9e7 2952
ef7a4a16
SR
2953 event = rb_reader_event(cpu_buffer);
2954
2955 read = reader->read;
2956 commit = rb_page_commit(reader);
667d2412 2957
8789a9e7 2958 /*
474d32b6
SR
2959 * If this page has been partially read or
2960 * if len is not big enough to read the rest of the page or
2961 * a writer is still on the page, then
2962 * we must copy the data from the page to the buffer.
2963 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 2964 */
474d32b6 2965 if (read || (len < (commit - read)) ||
ef7a4a16 2966 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 2967 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
2968 unsigned int rpos = read;
2969 unsigned int pos = 0;
ef7a4a16 2970 unsigned int size;
8789a9e7
SR
2971
2972 if (full)
554f786e 2973 goto out_unlock;
8789a9e7 2974
ef7a4a16
SR
2975 if (len > (commit - read))
2976 len = (commit - read);
2977
2978 size = rb_event_length(event);
2979
2980 if (len < size)
554f786e 2981 goto out_unlock;
ef7a4a16 2982
4f3640f8
SR
2983 /* save the current timestamp, since the user will need it */
2984 save_timestamp = cpu_buffer->read_stamp;
2985
ef7a4a16
SR
2986 /* Need to copy one event at a time */
2987 do {
474d32b6 2988 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
2989
2990 len -= size;
2991
2992 rb_advance_reader(cpu_buffer);
474d32b6
SR
2993 rpos = reader->read;
2994 pos += size;
ef7a4a16
SR
2995
2996 event = rb_reader_event(cpu_buffer);
2997 size = rb_event_length(event);
2998 } while (len > size);
667d2412
LJ
2999
3000 /* update bpage */
ef7a4a16 3001 local_set(&bpage->commit, pos);
4f3640f8 3002 bpage->time_stamp = save_timestamp;
ef7a4a16 3003
474d32b6
SR
3004 /* we copied everything to the beginning */
3005 read = 0;
8789a9e7 3006 } else {
afbab76a
SR
3007 /* update the entry counter */
3008 cpu_buffer->read += local_read(&reader->entries);
3009
8789a9e7 3010 /* swap the pages */
044fa782 3011 rb_init_page(bpage);
ef7a4a16
SR
3012 bpage = reader->page;
3013 reader->page = *data_page;
3014 local_set(&reader->write, 0);
778c55d4 3015 local_set(&reader->entries, 0);
ef7a4a16 3016 reader->read = 0;
044fa782 3017 *data_page = bpage;
8789a9e7 3018 }
667d2412 3019 ret = read;
8789a9e7 3020
554f786e 3021 out_unlock:
8789a9e7
SR
3022 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3023
554f786e 3024 out:
8789a9e7
SR
3025 return ret;
3026}
d6ce96da 3027EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 3028
a3583244
SR
3029static ssize_t
3030rb_simple_read(struct file *filp, char __user *ubuf,
3031 size_t cnt, loff_t *ppos)
3032{
5e39841c 3033 unsigned long *p = filp->private_data;
a3583244
SR
3034 char buf[64];
3035 int r;
3036
033601a3
SR
3037 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3038 r = sprintf(buf, "permanently disabled\n");
3039 else
3040 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
3041
3042 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3043}
3044
3045static ssize_t
3046rb_simple_write(struct file *filp, const char __user *ubuf,
3047 size_t cnt, loff_t *ppos)
3048{
5e39841c 3049 unsigned long *p = filp->private_data;
a3583244 3050 char buf[64];
5e39841c 3051 unsigned long val;
a3583244
SR
3052 int ret;
3053
3054 if (cnt >= sizeof(buf))
3055 return -EINVAL;
3056
3057 if (copy_from_user(&buf, ubuf, cnt))
3058 return -EFAULT;
3059
3060 buf[cnt] = 0;
3061
3062 ret = strict_strtoul(buf, 10, &val);
3063 if (ret < 0)
3064 return ret;
3065
033601a3
SR
3066 if (val)
3067 set_bit(RB_BUFFERS_ON_BIT, p);
3068 else
3069 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
3070
3071 (*ppos)++;
3072
3073 return cnt;
3074}
3075
5e2336a0 3076static const struct file_operations rb_simple_fops = {
a3583244
SR
3077 .open = tracing_open_generic,
3078 .read = rb_simple_read,
3079 .write = rb_simple_write,
3080};
3081
3082
3083static __init int rb_init_debugfs(void)
3084{
3085 struct dentry *d_tracer;
a3583244
SR
3086
3087 d_tracer = tracing_init_dentry();
3088
5452af66
FW
3089 trace_create_file("tracing_on", 0644, d_tracer,
3090 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
3091
3092 return 0;
3093}
3094
3095fs_initcall(rb_init_debugfs);
554f786e 3096
59222efe 3097#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
3098static int rb_cpu_notify(struct notifier_block *self,
3099 unsigned long action, void *hcpu)
554f786e
SR
3100{
3101 struct ring_buffer *buffer =
3102 container_of(self, struct ring_buffer, cpu_notify);
3103 long cpu = (long)hcpu;
3104
3105 switch (action) {
3106 case CPU_UP_PREPARE:
3107 case CPU_UP_PREPARE_FROZEN:
3108 if (cpu_isset(cpu, *buffer->cpumask))
3109 return NOTIFY_OK;
3110
3111 buffer->buffers[cpu] =
3112 rb_allocate_cpu_buffer(buffer, cpu);
3113 if (!buffer->buffers[cpu]) {
3114 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3115 cpu);
3116 return NOTIFY_OK;
3117 }
3118 smp_wmb();
3119 cpu_set(cpu, *buffer->cpumask);
3120 break;
3121 case CPU_DOWN_PREPARE:
3122 case CPU_DOWN_PREPARE_FROZEN:
3123 /*
3124 * Do nothing.
3125 * If we were to free the buffer, then the user would
3126 * lose any trace that was in the buffer.
3127 */
3128 break;
3129 default:
3130 break;
3131 }
3132 return NOTIFY_OK;
3133}
3134#endif