]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/ring_buffer.c
tracing: fix cut and paste macro error
[net-next-2.6.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
14131f2f 7#include <linux/trace_clock.h>
78d904b4 8#include <linux/ftrace_irq.h>
7a8e76a3
SR
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
a81bd80a 12#include <linux/hardirq.h>
7a8e76a3
SR
13#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
7a8e76a3
SR
16#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/list.h>
554f786e 19#include <linux/cpu.h>
7a8e76a3
SR
20#include <linux/fs.h>
21
182e9f5f
SR
22#include "trace.h"
23
d1b182a8
SR
24/*
25 * The ring buffer header is special. We must manually up keep it.
26 */
27int ring_buffer_print_entry_header(struct trace_seq *s)
28{
29 int ret;
30
31 ret = trace_seq_printf(s, "\ttype : 2 bits\n");
32 ret = trace_seq_printf(s, "\tlen : 3 bits\n");
33 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
34 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
35 ret = trace_seq_printf(s, "\n");
36 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
37 RINGBUF_TYPE_PADDING);
38 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 RINGBUF_TYPE_TIME_EXTEND);
40 ret = trace_seq_printf(s, "\tdata : type == %d\n",
41 RINGBUF_TYPE_DATA);
42
43 return ret;
44}
45
5cc98548
SR
46/*
47 * The ring buffer is made up of a list of pages. A separate list of pages is
48 * allocated for each CPU. A writer may only write to a buffer that is
49 * associated with the CPU it is currently executing on. A reader may read
50 * from any per cpu buffer.
51 *
52 * The reader is special. For each per cpu buffer, the reader has its own
53 * reader page. When a reader has read the entire reader page, this reader
54 * page is swapped with another page in the ring buffer.
55 *
56 * Now, as long as the writer is off the reader page, the reader can do what
57 * ever it wants with that page. The writer will never write to that page
58 * again (as long as it is out of the ring buffer).
59 *
60 * Here's some silly ASCII art.
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |
65 * +------+ +---+ +---+ +---+
66 * | |-->| |-->| |
67 * +---+ +---+ +---+
68 * ^ |
69 * | |
70 * +---------------+
71 *
72 *
73 * +------+
74 * |reader| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * | |-->| |-->| |
78 * +---+ +---+ +---+
79 * ^ |
80 * | |
81 * +---------------+
82 *
83 *
84 * +------+
85 * |reader| RING BUFFER
86 * |page |------------------v
87 * +------+ +---+ +---+ +---+
88 * ^ | |-->| |-->| |
89 * | +---+ +---+ +---+
90 * | |
91 * | |
92 * +------------------------------+
93 *
94 *
95 * +------+
96 * |buffer| RING BUFFER
97 * |page |------------------v
98 * +------+ +---+ +---+ +---+
99 * ^ | | | |-->| |
100 * | New +---+ +---+ +---+
101 * | Reader------^ |
102 * | page |
103 * +------------------------------+
104 *
105 *
106 * After we make this swap, the reader can hand this page off to the splice
107 * code and be done with it. It can even allocate a new page if it needs to
108 * and swap that into the ring buffer.
109 *
110 * We will be using cmpxchg soon to make all this lockless.
111 *
112 */
113
033601a3
SR
114/*
115 * A fast way to enable or disable all ring buffers is to
116 * call tracing_on or tracing_off. Turning off the ring buffers
117 * prevents all ring buffers from being recorded to.
118 * Turning this switch on, makes it OK to write to the
119 * ring buffer, if the ring buffer is enabled itself.
120 *
121 * There's three layers that must be on in order to write
122 * to the ring buffer.
123 *
124 * 1) This global flag must be set.
125 * 2) The ring buffer must be enabled for recording.
126 * 3) The per cpu buffer must be enabled for recording.
127 *
128 * In case of an anomaly, this global flag has a bit set that
129 * will permantly disable all ring buffers.
130 */
131
132/*
133 * Global flag to disable all recording to ring buffers
134 * This has two bits: ON, DISABLED
135 *
136 * ON DISABLED
137 * ---- ----------
138 * 0 0 : ring buffers are off
139 * 1 0 : ring buffers are on
140 * X 1 : ring buffers are permanently disabled
141 */
142
143enum {
144 RB_BUFFERS_ON_BIT = 0,
145 RB_BUFFERS_DISABLED_BIT = 1,
146};
147
148enum {
149 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
150 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
151};
152
5e39841c 153static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 154
474d32b6
SR
155#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
a3583244
SR
157/**
158 * tracing_on - enable all tracing buffers
159 *
160 * This function enables all tracing buffers that may have been
161 * disabled with tracing_off.
162 */
163void tracing_on(void)
164{
033601a3 165 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244 166}
c4f50183 167EXPORT_SYMBOL_GPL(tracing_on);
a3583244
SR
168
169/**
170 * tracing_off - turn off all tracing buffers
171 *
172 * This function stops all tracing buffers from recording data.
173 * It does not disable any overhead the tracers themselves may
174 * be causing. This function simply causes all recording to
175 * the ring buffers to fail.
176 */
177void tracing_off(void)
178{
033601a3
SR
179 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180}
c4f50183 181EXPORT_SYMBOL_GPL(tracing_off);
033601a3
SR
182
183/**
184 * tracing_off_permanent - permanently disable ring buffers
185 *
186 * This function, once called, will disable all ring buffers
c3706f00 187 * permanently.
033601a3
SR
188 */
189void tracing_off_permanent(void)
190{
191 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
192}
193
988ae9d6
SR
194/**
195 * tracing_is_on - show state of ring buffers enabled
196 */
197int tracing_is_on(void)
198{
199 return ring_buffer_flags == RB_BUFFERS_ON;
200}
201EXPORT_SYMBOL_GPL(tracing_is_on);
202
d06bbd66
IM
203#include "trace.h"
204
e3d6bf0a 205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 206#define RB_ALIGNMENT 4U
7a8e76a3
SR
207#define RB_MAX_SMALL_DATA 28
208
209enum {
210 RB_LEN_TIME_EXTEND = 8,
211 RB_LEN_TIME_STAMP = 16,
212};
213
2d622719
TZ
214static inline int rb_null_event(struct ring_buffer_event *event)
215{
216 return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
217}
218
219static inline int rb_discarded_event(struct ring_buffer_event *event)
220{
221 return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
222}
223
224static void rb_event_set_padding(struct ring_buffer_event *event)
225{
226 event->type = RINGBUF_TYPE_PADDING;
227 event->time_delta = 0;
228}
229
34a148bf 230static unsigned
2d622719 231rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
232{
233 unsigned length;
234
2d622719
TZ
235 if (event->len)
236 length = event->len * RB_ALIGNMENT;
237 else
238 length = event->array[0];
239 return length + RB_EVNT_HDR_SIZE;
240}
241
242/* inline for ring buffer fast paths */
243static unsigned
244rb_event_length(struct ring_buffer_event *event)
245{
7a8e76a3
SR
246 switch (event->type) {
247 case RINGBUF_TYPE_PADDING:
2d622719
TZ
248 if (rb_null_event(event))
249 /* undefined */
250 return -1;
251 return rb_event_data_length(event);
7a8e76a3
SR
252
253 case RINGBUF_TYPE_TIME_EXTEND:
254 return RB_LEN_TIME_EXTEND;
255
256 case RINGBUF_TYPE_TIME_STAMP:
257 return RB_LEN_TIME_STAMP;
258
259 case RINGBUF_TYPE_DATA:
2d622719 260 return rb_event_data_length(event);
7a8e76a3
SR
261 default:
262 BUG();
263 }
264 /* not hit */
265 return 0;
266}
267
268/**
269 * ring_buffer_event_length - return the length of the event
270 * @event: the event to get the length of
271 */
272unsigned ring_buffer_event_length(struct ring_buffer_event *event)
273{
465634ad
RR
274 unsigned length = rb_event_length(event);
275 if (event->type != RINGBUF_TYPE_DATA)
276 return length;
277 length -= RB_EVNT_HDR_SIZE;
278 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
279 length -= sizeof(event->array[0]);
280 return length;
7a8e76a3 281}
c4f50183 282EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
283
284/* inline for ring buffer fast paths */
34a148bf 285static void *
7a8e76a3
SR
286rb_event_data(struct ring_buffer_event *event)
287{
288 BUG_ON(event->type != RINGBUF_TYPE_DATA);
289 /* If length is in len field, then array[0] has the data */
290 if (event->len)
291 return (void *)&event->array[0];
292 /* Otherwise length is in array[0] and array[1] has the data */
293 return (void *)&event->array[1];
294}
295
296/**
297 * ring_buffer_event_data - return the data of the event
298 * @event: the event to get the data from
299 */
300void *ring_buffer_event_data(struct ring_buffer_event *event)
301{
302 return rb_event_data(event);
303}
c4f50183 304EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
305
306#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 307 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
308
309#define TS_SHIFT 27
310#define TS_MASK ((1ULL << TS_SHIFT) - 1)
311#define TS_DELTA_TEST (~TS_MASK)
312
abc9b56d 313struct buffer_data_page {
e4c2ce82 314 u64 time_stamp; /* page time stamp */
c3706f00 315 local_t commit; /* write committed index */
abc9b56d
SR
316 unsigned char data[]; /* data of buffer page */
317};
318
319struct buffer_page {
320 local_t write; /* index for next write */
6f807acd 321 unsigned read; /* index for next read */
e4c2ce82 322 struct list_head list; /* list of free pages */
abc9b56d 323 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
324};
325
044fa782 326static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 327{
044fa782 328 local_set(&bpage->commit, 0);
abc9b56d
SR
329}
330
474d32b6
SR
331/**
332 * ring_buffer_page_len - the size of data on the page.
333 * @page: The page to read
334 *
335 * Returns the amount of data on the page, including buffer page header.
336 */
ef7a4a16
SR
337size_t ring_buffer_page_len(void *page)
338{
474d32b6
SR
339 return local_read(&((struct buffer_data_page *)page)->commit)
340 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
341}
342
ed56829c
SR
343/*
344 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
345 * this issue out.
346 */
34a148bf 347static void free_buffer_page(struct buffer_page *bpage)
ed56829c 348{
34a148bf 349 free_page((unsigned long)bpage->page);
e4c2ce82 350 kfree(bpage);
ed56829c
SR
351}
352
7a8e76a3
SR
353/*
354 * We need to fit the time_stamp delta into 27 bits.
355 */
356static inline int test_time_stamp(u64 delta)
357{
358 if (delta & TS_DELTA_TEST)
359 return 1;
360 return 0;
361}
362
474d32b6 363#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 364
d1b182a8
SR
365int ring_buffer_print_page_header(struct trace_seq *s)
366{
367 struct buffer_data_page field;
368 int ret;
369
370 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
371 "offset:0;\tsize:%u;\n",
372 (unsigned int)sizeof(field.time_stamp));
373
374 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
375 "offset:%u;\tsize:%u;\n",
376 (unsigned int)offsetof(typeof(field), commit),
377 (unsigned int)sizeof(field.commit));
378
379 ret = trace_seq_printf(s, "\tfield: char data;\t"
380 "offset:%u;\tsize:%u;\n",
381 (unsigned int)offsetof(typeof(field), data),
382 (unsigned int)BUF_PAGE_SIZE);
383
384 return ret;
385}
386
7a8e76a3
SR
387/*
388 * head_page == tail_page && head == tail then buffer is empty.
389 */
390struct ring_buffer_per_cpu {
391 int cpu;
392 struct ring_buffer *buffer;
f83c9d0f 393 spinlock_t reader_lock; /* serialize readers */
3e03fb7f 394 raw_spinlock_t lock;
7a8e76a3
SR
395 struct lock_class_key lock_key;
396 struct list_head pages;
6f807acd
SR
397 struct buffer_page *head_page; /* read from head */
398 struct buffer_page *tail_page; /* write to tail */
c3706f00 399 struct buffer_page *commit_page; /* committed pages */
d769041f 400 struct buffer_page *reader_page;
7a8e76a3
SR
401 unsigned long overrun;
402 unsigned long entries;
403 u64 write_stamp;
404 u64 read_stamp;
405 atomic_t record_disabled;
406};
407
408struct ring_buffer {
7a8e76a3
SR
409 unsigned pages;
410 unsigned flags;
411 int cpus;
7a8e76a3 412 atomic_t record_disabled;
00f62f61 413 cpumask_var_t cpumask;
7a8e76a3
SR
414
415 struct mutex mutex;
416
417 struct ring_buffer_per_cpu **buffers;
554f786e 418
59222efe 419#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
420 struct notifier_block cpu_notify;
421#endif
37886f6a 422 u64 (*clock)(void);
7a8e76a3
SR
423};
424
425struct ring_buffer_iter {
426 struct ring_buffer_per_cpu *cpu_buffer;
427 unsigned long head;
428 struct buffer_page *head_page;
429 u64 read_stamp;
430};
431
f536aafc 432/* buffer may be either ring_buffer or ring_buffer_per_cpu */
bf41a158 433#define RB_WARN_ON(buffer, cond) \
3e89c7bb
SR
434 ({ \
435 int _____ret = unlikely(cond); \
436 if (_____ret) { \
bf41a158
SR
437 atomic_inc(&buffer->record_disabled); \
438 WARN_ON(1); \
439 } \
3e89c7bb
SR
440 _____ret; \
441 })
f536aafc 442
37886f6a
SR
443/* Up this if you want to test the TIME_EXTENTS and normalization */
444#define DEBUG_SHIFT 0
445
446u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
447{
448 u64 time;
449
450 preempt_disable_notrace();
451 /* shift to debug/test normalization and TIME_EXTENTS */
452 time = buffer->clock() << DEBUG_SHIFT;
453 preempt_enable_no_resched_notrace();
454
455 return time;
456}
457EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
458
459void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
460 int cpu, u64 *ts)
461{
462 /* Just stupid testing the normalize function and deltas */
463 *ts >>= DEBUG_SHIFT;
464}
465EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
466
7a8e76a3
SR
467/**
468 * check_pages - integrity check of buffer pages
469 * @cpu_buffer: CPU buffer with pages to test
470 *
c3706f00 471 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
472 * been corrupted.
473 */
474static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
475{
476 struct list_head *head = &cpu_buffer->pages;
044fa782 477 struct buffer_page *bpage, *tmp;
7a8e76a3 478
3e89c7bb
SR
479 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
480 return -1;
481 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
482 return -1;
7a8e76a3 483
044fa782 484 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 485 if (RB_WARN_ON(cpu_buffer,
044fa782 486 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
487 return -1;
488 if (RB_WARN_ON(cpu_buffer,
044fa782 489 bpage->list.prev->next != &bpage->list))
3e89c7bb 490 return -1;
7a8e76a3
SR
491 }
492
493 return 0;
494}
495
7a8e76a3
SR
496static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
497 unsigned nr_pages)
498{
499 struct list_head *head = &cpu_buffer->pages;
044fa782 500 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
501 unsigned long addr;
502 LIST_HEAD(pages);
503 unsigned i;
504
505 for (i = 0; i < nr_pages; i++) {
044fa782 506 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
aa1e0e3b 507 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
044fa782 508 if (!bpage)
e4c2ce82 509 goto free_pages;
044fa782 510 list_add(&bpage->list, &pages);
e4c2ce82 511
7a8e76a3
SR
512 addr = __get_free_page(GFP_KERNEL);
513 if (!addr)
514 goto free_pages;
044fa782
SR
515 bpage->page = (void *)addr;
516 rb_init_page(bpage->page);
7a8e76a3
SR
517 }
518
519 list_splice(&pages, head);
520
521 rb_check_pages(cpu_buffer);
522
523 return 0;
524
525 free_pages:
044fa782
SR
526 list_for_each_entry_safe(bpage, tmp, &pages, list) {
527 list_del_init(&bpage->list);
528 free_buffer_page(bpage);
7a8e76a3
SR
529 }
530 return -ENOMEM;
531}
532
533static struct ring_buffer_per_cpu *
534rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
535{
536 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 537 struct buffer_page *bpage;
d769041f 538 unsigned long addr;
7a8e76a3
SR
539 int ret;
540
541 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
542 GFP_KERNEL, cpu_to_node(cpu));
543 if (!cpu_buffer)
544 return NULL;
545
546 cpu_buffer->cpu = cpu;
547 cpu_buffer->buffer = buffer;
f83c9d0f 548 spin_lock_init(&cpu_buffer->reader_lock);
3e03fb7f 549 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
550 INIT_LIST_HEAD(&cpu_buffer->pages);
551
044fa782 552 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 553 GFP_KERNEL, cpu_to_node(cpu));
044fa782 554 if (!bpage)
e4c2ce82
SR
555 goto fail_free_buffer;
556
044fa782 557 cpu_buffer->reader_page = bpage;
d769041f
SR
558 addr = __get_free_page(GFP_KERNEL);
559 if (!addr)
e4c2ce82 560 goto fail_free_reader;
044fa782
SR
561 bpage->page = (void *)addr;
562 rb_init_page(bpage->page);
e4c2ce82 563
d769041f 564 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 565
7a8e76a3
SR
566 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
567 if (ret < 0)
d769041f 568 goto fail_free_reader;
7a8e76a3
SR
569
570 cpu_buffer->head_page
571 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 572 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
573
574 return cpu_buffer;
575
d769041f
SR
576 fail_free_reader:
577 free_buffer_page(cpu_buffer->reader_page);
578
7a8e76a3
SR
579 fail_free_buffer:
580 kfree(cpu_buffer);
581 return NULL;
582}
583
584static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
585{
586 struct list_head *head = &cpu_buffer->pages;
044fa782 587 struct buffer_page *bpage, *tmp;
7a8e76a3 588
d769041f
SR
589 free_buffer_page(cpu_buffer->reader_page);
590
044fa782
SR
591 list_for_each_entry_safe(bpage, tmp, head, list) {
592 list_del_init(&bpage->list);
593 free_buffer_page(bpage);
7a8e76a3
SR
594 }
595 kfree(cpu_buffer);
596}
597
a7b13743
SR
598/*
599 * Causes compile errors if the struct buffer_page gets bigger
600 * than the struct page.
601 */
602extern int ring_buffer_page_too_big(void);
603
59222efe 604#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
605static int rb_cpu_notify(struct notifier_block *self,
606 unsigned long action, void *hcpu);
554f786e
SR
607#endif
608
7a8e76a3
SR
609/**
610 * ring_buffer_alloc - allocate a new ring_buffer
68814b58 611 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
612 * @flags: attributes to set for the ring buffer.
613 *
614 * Currently the only flag that is available is the RB_FL_OVERWRITE
615 * flag. This flag means that the buffer will overwrite old data
616 * when the buffer wraps. If this flag is not set, the buffer will
617 * drop data when the tail hits the head.
618 */
619struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
620{
621 struct ring_buffer *buffer;
622 int bsize;
623 int cpu;
624
a7b13743
SR
625 /* Paranoid! Optimizes out when all is well */
626 if (sizeof(struct buffer_page) > sizeof(struct page))
627 ring_buffer_page_too_big();
628
629
7a8e76a3
SR
630 /* keep it in its own cache line */
631 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
632 GFP_KERNEL);
633 if (!buffer)
634 return NULL;
635
9e01c1b7
RR
636 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
637 goto fail_free_buffer;
638
7a8e76a3
SR
639 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
640 buffer->flags = flags;
37886f6a 641 buffer->clock = trace_clock_local;
7a8e76a3
SR
642
643 /* need at least two pages */
644 if (buffer->pages == 1)
645 buffer->pages++;
646
3bf832ce
FW
647 /*
648 * In case of non-hotplug cpu, if the ring-buffer is allocated
649 * in early initcall, it will not be notified of secondary cpus.
650 * In that off case, we need to allocate for all possible cpus.
651 */
652#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
653 get_online_cpus();
654 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
655#else
656 cpumask_copy(buffer->cpumask, cpu_possible_mask);
657#endif
7a8e76a3
SR
658 buffer->cpus = nr_cpu_ids;
659
660 bsize = sizeof(void *) * nr_cpu_ids;
661 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
662 GFP_KERNEL);
663 if (!buffer->buffers)
9e01c1b7 664 goto fail_free_cpumask;
7a8e76a3
SR
665
666 for_each_buffer_cpu(buffer, cpu) {
667 buffer->buffers[cpu] =
668 rb_allocate_cpu_buffer(buffer, cpu);
669 if (!buffer->buffers[cpu])
670 goto fail_free_buffers;
671 }
672
59222efe 673#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
674 buffer->cpu_notify.notifier_call = rb_cpu_notify;
675 buffer->cpu_notify.priority = 0;
676 register_cpu_notifier(&buffer->cpu_notify);
677#endif
678
679 put_online_cpus();
7a8e76a3
SR
680 mutex_init(&buffer->mutex);
681
682 return buffer;
683
684 fail_free_buffers:
685 for_each_buffer_cpu(buffer, cpu) {
686 if (buffer->buffers[cpu])
687 rb_free_cpu_buffer(buffer->buffers[cpu]);
688 }
689 kfree(buffer->buffers);
690
9e01c1b7
RR
691 fail_free_cpumask:
692 free_cpumask_var(buffer->cpumask);
554f786e 693 put_online_cpus();
9e01c1b7 694
7a8e76a3
SR
695 fail_free_buffer:
696 kfree(buffer);
697 return NULL;
698}
c4f50183 699EXPORT_SYMBOL_GPL(ring_buffer_alloc);
7a8e76a3
SR
700
701/**
702 * ring_buffer_free - free a ring buffer.
703 * @buffer: the buffer to free.
704 */
705void
706ring_buffer_free(struct ring_buffer *buffer)
707{
708 int cpu;
709
554f786e
SR
710 get_online_cpus();
711
59222efe 712#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
713 unregister_cpu_notifier(&buffer->cpu_notify);
714#endif
715
7a8e76a3
SR
716 for_each_buffer_cpu(buffer, cpu)
717 rb_free_cpu_buffer(buffer->buffers[cpu]);
718
554f786e
SR
719 put_online_cpus();
720
9e01c1b7
RR
721 free_cpumask_var(buffer->cpumask);
722
7a8e76a3
SR
723 kfree(buffer);
724}
c4f50183 725EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 726
37886f6a
SR
727void ring_buffer_set_clock(struct ring_buffer *buffer,
728 u64 (*clock)(void))
729{
730 buffer->clock = clock;
731}
732
7a8e76a3
SR
733static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
734
735static void
736rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
737{
044fa782 738 struct buffer_page *bpage;
7a8e76a3
SR
739 struct list_head *p;
740 unsigned i;
741
742 atomic_inc(&cpu_buffer->record_disabled);
743 synchronize_sched();
744
745 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
746 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
747 return;
7a8e76a3 748 p = cpu_buffer->pages.next;
044fa782
SR
749 bpage = list_entry(p, struct buffer_page, list);
750 list_del_init(&bpage->list);
751 free_buffer_page(bpage);
7a8e76a3 752 }
3e89c7bb
SR
753 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
754 return;
7a8e76a3
SR
755
756 rb_reset_cpu(cpu_buffer);
757
758 rb_check_pages(cpu_buffer);
759
760 atomic_dec(&cpu_buffer->record_disabled);
761
762}
763
764static void
765rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
766 struct list_head *pages, unsigned nr_pages)
767{
044fa782 768 struct buffer_page *bpage;
7a8e76a3
SR
769 struct list_head *p;
770 unsigned i;
771
772 atomic_inc(&cpu_buffer->record_disabled);
773 synchronize_sched();
774
775 for (i = 0; i < nr_pages; i++) {
3e89c7bb
SR
776 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
777 return;
7a8e76a3 778 p = pages->next;
044fa782
SR
779 bpage = list_entry(p, struct buffer_page, list);
780 list_del_init(&bpage->list);
781 list_add_tail(&bpage->list, &cpu_buffer->pages);
7a8e76a3
SR
782 }
783 rb_reset_cpu(cpu_buffer);
784
785 rb_check_pages(cpu_buffer);
786
787 atomic_dec(&cpu_buffer->record_disabled);
788}
789
790/**
791 * ring_buffer_resize - resize the ring buffer
792 * @buffer: the buffer to resize.
793 * @size: the new size.
794 *
795 * The tracer is responsible for making sure that the buffer is
796 * not being used while changing the size.
797 * Note: We may be able to change the above requirement by using
798 * RCU synchronizations.
799 *
800 * Minimum size is 2 * BUF_PAGE_SIZE.
801 *
802 * Returns -1 on failure.
803 */
804int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
805{
806 struct ring_buffer_per_cpu *cpu_buffer;
807 unsigned nr_pages, rm_pages, new_pages;
044fa782 808 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
809 unsigned long buffer_size;
810 unsigned long addr;
811 LIST_HEAD(pages);
812 int i, cpu;
813
ee51a1de
IM
814 /*
815 * Always succeed at resizing a non-existent buffer:
816 */
817 if (!buffer)
818 return size;
819
7a8e76a3
SR
820 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
821 size *= BUF_PAGE_SIZE;
822 buffer_size = buffer->pages * BUF_PAGE_SIZE;
823
824 /* we need a minimum of two pages */
825 if (size < BUF_PAGE_SIZE * 2)
826 size = BUF_PAGE_SIZE * 2;
827
828 if (size == buffer_size)
829 return size;
830
831 mutex_lock(&buffer->mutex);
554f786e 832 get_online_cpus();
7a8e76a3
SR
833
834 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
835
836 if (size < buffer_size) {
837
838 /* easy case, just free pages */
554f786e
SR
839 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
840 goto out_fail;
7a8e76a3
SR
841
842 rm_pages = buffer->pages - nr_pages;
843
844 for_each_buffer_cpu(buffer, cpu) {
845 cpu_buffer = buffer->buffers[cpu];
846 rb_remove_pages(cpu_buffer, rm_pages);
847 }
848 goto out;
849 }
850
851 /*
852 * This is a bit more difficult. We only want to add pages
853 * when we can allocate enough for all CPUs. We do this
854 * by allocating all the pages and storing them on a local
855 * link list. If we succeed in our allocation, then we
856 * add these pages to the cpu_buffers. Otherwise we just free
857 * them all and return -ENOMEM;
858 */
554f786e
SR
859 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
860 goto out_fail;
f536aafc 861
7a8e76a3
SR
862 new_pages = nr_pages - buffer->pages;
863
864 for_each_buffer_cpu(buffer, cpu) {
865 for (i = 0; i < new_pages; i++) {
044fa782 866 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
e4c2ce82
SR
867 cache_line_size()),
868 GFP_KERNEL, cpu_to_node(cpu));
044fa782 869 if (!bpage)
e4c2ce82 870 goto free_pages;
044fa782 871 list_add(&bpage->list, &pages);
7a8e76a3
SR
872 addr = __get_free_page(GFP_KERNEL);
873 if (!addr)
874 goto free_pages;
044fa782
SR
875 bpage->page = (void *)addr;
876 rb_init_page(bpage->page);
7a8e76a3
SR
877 }
878 }
879
880 for_each_buffer_cpu(buffer, cpu) {
881 cpu_buffer = buffer->buffers[cpu];
882 rb_insert_pages(cpu_buffer, &pages, new_pages);
883 }
884
554f786e
SR
885 if (RB_WARN_ON(buffer, !list_empty(&pages)))
886 goto out_fail;
7a8e76a3
SR
887
888 out:
889 buffer->pages = nr_pages;
554f786e 890 put_online_cpus();
7a8e76a3
SR
891 mutex_unlock(&buffer->mutex);
892
893 return size;
894
895 free_pages:
044fa782
SR
896 list_for_each_entry_safe(bpage, tmp, &pages, list) {
897 list_del_init(&bpage->list);
898 free_buffer_page(bpage);
7a8e76a3 899 }
554f786e 900 put_online_cpus();
641d2f63 901 mutex_unlock(&buffer->mutex);
7a8e76a3 902 return -ENOMEM;
554f786e
SR
903
904 /*
905 * Something went totally wrong, and we are too paranoid
906 * to even clean up the mess.
907 */
908 out_fail:
909 put_online_cpus();
910 mutex_unlock(&buffer->mutex);
911 return -1;
7a8e76a3 912}
c4f50183 913EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 914
8789a9e7 915static inline void *
044fa782 916__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 917{
044fa782 918 return bpage->data + index;
8789a9e7
SR
919}
920
044fa782 921static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 922{
044fa782 923 return bpage->page->data + index;
7a8e76a3
SR
924}
925
926static inline struct ring_buffer_event *
d769041f 927rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 928{
6f807acd
SR
929 return __rb_page_index(cpu_buffer->reader_page,
930 cpu_buffer->reader_page->read);
931}
932
933static inline struct ring_buffer_event *
934rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
935{
936 return __rb_page_index(cpu_buffer->head_page,
937 cpu_buffer->head_page->read);
7a8e76a3
SR
938}
939
940static inline struct ring_buffer_event *
941rb_iter_head_event(struct ring_buffer_iter *iter)
942{
6f807acd 943 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
944}
945
bf41a158
SR
946static inline unsigned rb_page_write(struct buffer_page *bpage)
947{
948 return local_read(&bpage->write);
949}
950
951static inline unsigned rb_page_commit(struct buffer_page *bpage)
952{
abc9b56d 953 return local_read(&bpage->page->commit);
bf41a158
SR
954}
955
956/* Size is determined by what has been commited */
957static inline unsigned rb_page_size(struct buffer_page *bpage)
958{
959 return rb_page_commit(bpage);
960}
961
962static inline unsigned
963rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
964{
965 return rb_page_commit(cpu_buffer->commit_page);
966}
967
968static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
969{
970 return rb_page_commit(cpu_buffer->head_page);
971}
972
7a8e76a3
SR
973/*
974 * When the tail hits the head and the buffer is in overwrite mode,
975 * the head jumps to the next page and all content on the previous
976 * page is discarded. But before doing so, we update the overrun
977 * variable of the buffer.
978 */
979static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
980{
981 struct ring_buffer_event *event;
982 unsigned long head;
983
984 for (head = 0; head < rb_head_size(cpu_buffer);
985 head += rb_event_length(event)) {
986
6f807acd 987 event = __rb_page_index(cpu_buffer->head_page, head);
3e89c7bb
SR
988 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
989 return;
7a8e76a3
SR
990 /* Only count data entries */
991 if (event->type != RINGBUF_TYPE_DATA)
992 continue;
993 cpu_buffer->overrun++;
994 cpu_buffer->entries--;
995 }
996}
997
998static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
044fa782 999 struct buffer_page **bpage)
7a8e76a3 1000{
044fa782 1001 struct list_head *p = (*bpage)->list.next;
7a8e76a3
SR
1002
1003 if (p == &cpu_buffer->pages)
1004 p = p->next;
1005
044fa782 1006 *bpage = list_entry(p, struct buffer_page, list);
7a8e76a3
SR
1007}
1008
bf41a158
SR
1009static inline unsigned
1010rb_event_index(struct ring_buffer_event *event)
1011{
1012 unsigned long addr = (unsigned long)event;
1013
1014 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1015}
1016
34a148bf 1017static int
bf41a158
SR
1018rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1019 struct ring_buffer_event *event)
1020{
1021 unsigned long addr = (unsigned long)event;
1022 unsigned long index;
1023
1024 index = rb_event_index(event);
1025 addr &= PAGE_MASK;
1026
1027 return cpu_buffer->commit_page->page == (void *)addr &&
1028 rb_commit_index(cpu_buffer) == index;
1029}
1030
34a148bf 1031static void
bf41a158
SR
1032rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1033 struct ring_buffer_event *event)
7a8e76a3 1034{
bf41a158
SR
1035 unsigned long addr = (unsigned long)event;
1036 unsigned long index;
1037
1038 index = rb_event_index(event);
1039 addr &= PAGE_MASK;
1040
1041 while (cpu_buffer->commit_page->page != (void *)addr) {
3e89c7bb
SR
1042 if (RB_WARN_ON(cpu_buffer,
1043 cpu_buffer->commit_page == cpu_buffer->tail_page))
1044 return;
abc9b56d 1045 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1046 cpu_buffer->commit_page->write;
1047 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1048 cpu_buffer->write_stamp =
1049 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1050 }
1051
1052 /* Now set the commit to the event's index */
abc9b56d 1053 local_set(&cpu_buffer->commit_page->page->commit, index);
7a8e76a3
SR
1054}
1055
34a148bf 1056static void
bf41a158 1057rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1058{
bf41a158
SR
1059 /*
1060 * We only race with interrupts and NMIs on this CPU.
1061 * If we own the commit event, then we can commit
1062 * all others that interrupted us, since the interruptions
1063 * are in stack format (they finish before they come
1064 * back to us). This allows us to do a simple loop to
1065 * assign the commit to the tail.
1066 */
a8ccf1d6 1067 again:
bf41a158 1068 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
abc9b56d 1069 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1070 cpu_buffer->commit_page->write;
1071 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1072 cpu_buffer->write_stamp =
1073 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1074 /* add barrier to keep gcc from optimizing too much */
1075 barrier();
1076 }
1077 while (rb_commit_index(cpu_buffer) !=
1078 rb_page_write(cpu_buffer->commit_page)) {
abc9b56d 1079 cpu_buffer->commit_page->page->commit =
bf41a158
SR
1080 cpu_buffer->commit_page->write;
1081 barrier();
1082 }
a8ccf1d6
SR
1083
1084 /* again, keep gcc from optimizing */
1085 barrier();
1086
1087 /*
1088 * If an interrupt came in just after the first while loop
1089 * and pushed the tail page forward, we will be left with
1090 * a dangling commit that will never go forward.
1091 */
1092 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1093 goto again;
7a8e76a3
SR
1094}
1095
d769041f 1096static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1097{
abc9b56d 1098 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1099 cpu_buffer->reader_page->read = 0;
d769041f
SR
1100}
1101
34a148bf 1102static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1103{
1104 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1105
1106 /*
1107 * The iterator could be on the reader page (it starts there).
1108 * But the head could have moved, since the reader was
1109 * found. Check for this case and assign the iterator
1110 * to the head page instead of next.
1111 */
1112 if (iter->head_page == cpu_buffer->reader_page)
1113 iter->head_page = cpu_buffer->head_page;
1114 else
1115 rb_inc_page(cpu_buffer, &iter->head_page);
1116
abc9b56d 1117 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1118 iter->head = 0;
1119}
1120
1121/**
1122 * ring_buffer_update_event - update event type and data
1123 * @event: the even to update
1124 * @type: the type of event
1125 * @length: the size of the event field in the ring buffer
1126 *
1127 * Update the type and data fields of the event. The length
1128 * is the actual size that is written to the ring buffer,
1129 * and with this, we can determine what to place into the
1130 * data field.
1131 */
34a148bf 1132static void
7a8e76a3
SR
1133rb_update_event(struct ring_buffer_event *event,
1134 unsigned type, unsigned length)
1135{
1136 event->type = type;
1137
1138 switch (type) {
1139
1140 case RINGBUF_TYPE_PADDING:
1141 break;
1142
1143 case RINGBUF_TYPE_TIME_EXTEND:
67d34724 1144 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
7a8e76a3
SR
1145 break;
1146
1147 case RINGBUF_TYPE_TIME_STAMP:
67d34724 1148 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
7a8e76a3
SR
1149 break;
1150
1151 case RINGBUF_TYPE_DATA:
1152 length -= RB_EVNT_HDR_SIZE;
1153 if (length > RB_MAX_SMALL_DATA) {
1154 event->len = 0;
1155 event->array[0] = length;
1156 } else
67d34724 1157 event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
1158 break;
1159 default:
1160 BUG();
1161 }
1162}
1163
34a148bf 1164static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
1165{
1166 struct ring_buffer_event event; /* Used only for sizeof array */
1167
1168 /* zero length can cause confusions */
1169 if (!length)
1170 length = 1;
1171
1172 if (length > RB_MAX_SMALL_DATA)
1173 length += sizeof(event.array[0]);
1174
1175 length += RB_EVNT_HDR_SIZE;
1176 length = ALIGN(length, RB_ALIGNMENT);
1177
1178 return length;
1179}
1180
1181static struct ring_buffer_event *
1182__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1183 unsigned type, unsigned long length, u64 *ts)
1184{
98db8df7 1185 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
bf41a158 1186 unsigned long tail, write;
7a8e76a3
SR
1187 struct ring_buffer *buffer = cpu_buffer->buffer;
1188 struct ring_buffer_event *event;
bf41a158 1189 unsigned long flags;
78d904b4 1190 bool lock_taken = false;
7a8e76a3 1191
98db8df7
SR
1192 commit_page = cpu_buffer->commit_page;
1193 /* we just need to protect against interrupts */
1194 barrier();
7a8e76a3 1195 tail_page = cpu_buffer->tail_page;
bf41a158
SR
1196 write = local_add_return(length, &tail_page->write);
1197 tail = write - length;
7a8e76a3 1198
bf41a158
SR
1199 /* See if we shot pass the end of this buffer page */
1200 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
1201 struct buffer_page *next_page = tail_page;
1202
3e03fb7f 1203 local_irq_save(flags);
78d904b4 1204 /*
a81bd80a
SR
1205 * Since the write to the buffer is still not
1206 * fully lockless, we must be careful with NMIs.
1207 * The locks in the writers are taken when a write
1208 * crosses to a new page. The locks protect against
1209 * races with the readers (this will soon be fixed
1210 * with a lockless solution).
1211 *
1212 * Because we can not protect against NMIs, and we
1213 * want to keep traces reentrant, we need to manage
1214 * what happens when we are in an NMI.
1215 *
78d904b4
SR
1216 * NMIs can happen after we take the lock.
1217 * If we are in an NMI, only take the lock
1218 * if it is not already taken. Otherwise
1219 * simply fail.
1220 */
a81bd80a 1221 if (unlikely(in_nmi())) {
78d904b4 1222 if (!__raw_spin_trylock(&cpu_buffer->lock))
45141d46 1223 goto out_reset;
78d904b4
SR
1224 } else
1225 __raw_spin_lock(&cpu_buffer->lock);
1226
1227 lock_taken = true;
bf41a158 1228
7a8e76a3
SR
1229 rb_inc_page(cpu_buffer, &next_page);
1230
d769041f
SR
1231 head_page = cpu_buffer->head_page;
1232 reader_page = cpu_buffer->reader_page;
1233
1234 /* we grabbed the lock before incrementing */
3e89c7bb 1235 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
45141d46 1236 goto out_reset;
bf41a158
SR
1237
1238 /*
1239 * If for some reason, we had an interrupt storm that made
1240 * it all the way around the buffer, bail, and warn
1241 * about it.
1242 */
98db8df7 1243 if (unlikely(next_page == commit_page)) {
3554228d
SR
1244 /* This can easily happen on small ring buffers */
1245 WARN_ON_ONCE(buffer->pages > 2);
45141d46 1246 goto out_reset;
bf41a158 1247 }
d769041f 1248
7a8e76a3 1249 if (next_page == head_page) {
6f3b3440 1250 if (!(buffer->flags & RB_FL_OVERWRITE))
45141d46 1251 goto out_reset;
7a8e76a3 1252
bf41a158
SR
1253 /* tail_page has not moved yet? */
1254 if (tail_page == cpu_buffer->tail_page) {
1255 /* count overflows */
1256 rb_update_overflow(cpu_buffer);
1257
1258 rb_inc_page(cpu_buffer, &head_page);
1259 cpu_buffer->head_page = head_page;
1260 cpu_buffer->head_page->read = 0;
1261 }
1262 }
7a8e76a3 1263
bf41a158
SR
1264 /*
1265 * If the tail page is still the same as what we think
1266 * it is, then it is up to us to update the tail
1267 * pointer.
1268 */
1269 if (tail_page == cpu_buffer->tail_page) {
1270 local_set(&next_page->write, 0);
abc9b56d 1271 local_set(&next_page->page->commit, 0);
bf41a158
SR
1272 cpu_buffer->tail_page = next_page;
1273
1274 /* reread the time stamp */
37886f6a 1275 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
abc9b56d 1276 cpu_buffer->tail_page->page->time_stamp = *ts;
7a8e76a3
SR
1277 }
1278
bf41a158
SR
1279 /*
1280 * The actual tail page has moved forward.
1281 */
1282 if (tail < BUF_PAGE_SIZE) {
1283 /* Mark the rest of the page with padding */
6f807acd 1284 event = __rb_page_index(tail_page, tail);
2d622719 1285 rb_event_set_padding(event);
7a8e76a3
SR
1286 }
1287
bf41a158
SR
1288 if (tail <= BUF_PAGE_SIZE)
1289 /* Set the write back to the previous setting */
1290 local_set(&tail_page->write, tail);
1291
1292 /*
1293 * If this was a commit entry that failed,
1294 * increment that too
1295 */
1296 if (tail_page == cpu_buffer->commit_page &&
1297 tail == rb_commit_index(cpu_buffer)) {
1298 rb_set_commit_to_write(cpu_buffer);
1299 }
1300
3e03fb7f
SR
1301 __raw_spin_unlock(&cpu_buffer->lock);
1302 local_irq_restore(flags);
bf41a158
SR
1303
1304 /* fail and let the caller try again */
1305 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
1306 }
1307
bf41a158
SR
1308 /* We reserved something on the buffer */
1309
3e89c7bb
SR
1310 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1311 return NULL;
7a8e76a3 1312
6f807acd 1313 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
1314 rb_update_event(event, type, length);
1315
bf41a158
SR
1316 /*
1317 * If this is a commit and the tail is zero, then update
1318 * this page's time stamp.
1319 */
1320 if (!tail && rb_is_commit(cpu_buffer, event))
abc9b56d 1321 cpu_buffer->commit_page->page->time_stamp = *ts;
bf41a158 1322
7a8e76a3 1323 return event;
bf41a158 1324
45141d46 1325 out_reset:
6f3b3440
LJ
1326 /* reset write */
1327 if (tail <= BUF_PAGE_SIZE)
1328 local_set(&tail_page->write, tail);
1329
78d904b4
SR
1330 if (likely(lock_taken))
1331 __raw_spin_unlock(&cpu_buffer->lock);
3e03fb7f 1332 local_irq_restore(flags);
bf41a158 1333 return NULL;
7a8e76a3
SR
1334}
1335
1336static int
1337rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1338 u64 *ts, u64 *delta)
1339{
1340 struct ring_buffer_event *event;
1341 static int once;
bf41a158 1342 int ret;
7a8e76a3
SR
1343
1344 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1345 printk(KERN_WARNING "Delta way too big! %llu"
1346 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1347 (unsigned long long)*delta,
1348 (unsigned long long)*ts,
1349 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1350 WARN_ON(1);
1351 }
1352
1353 /*
1354 * The delta is too big, we to add a
1355 * new timestamp.
1356 */
1357 event = __rb_reserve_next(cpu_buffer,
1358 RINGBUF_TYPE_TIME_EXTEND,
1359 RB_LEN_TIME_EXTEND,
1360 ts);
1361 if (!event)
bf41a158 1362 return -EBUSY;
7a8e76a3 1363
bf41a158
SR
1364 if (PTR_ERR(event) == -EAGAIN)
1365 return -EAGAIN;
1366
1367 /* Only a commited time event can update the write stamp */
1368 if (rb_is_commit(cpu_buffer, event)) {
1369 /*
1370 * If this is the first on the page, then we need to
1371 * update the page itself, and just put in a zero.
1372 */
1373 if (rb_event_index(event)) {
1374 event->time_delta = *delta & TS_MASK;
1375 event->array[0] = *delta >> TS_SHIFT;
1376 } else {
abc9b56d 1377 cpu_buffer->commit_page->page->time_stamp = *ts;
bf41a158
SR
1378 event->time_delta = 0;
1379 event->array[0] = 0;
1380 }
7a8e76a3 1381 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1382 /* let the caller know this was the commit */
1383 ret = 1;
1384 } else {
1385 /* Darn, this is just wasted space */
1386 event->time_delta = 0;
1387 event->array[0] = 0;
1388 ret = 0;
7a8e76a3
SR
1389 }
1390
bf41a158
SR
1391 *delta = 0;
1392
1393 return ret;
7a8e76a3
SR
1394}
1395
1396static struct ring_buffer_event *
1397rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1398 unsigned type, unsigned long length)
1399{
1400 struct ring_buffer_event *event;
1401 u64 ts, delta;
bf41a158 1402 int commit = 0;
818e3dd3 1403 int nr_loops = 0;
7a8e76a3 1404
bf41a158 1405 again:
818e3dd3
SR
1406 /*
1407 * We allow for interrupts to reenter here and do a trace.
1408 * If one does, it will cause this original code to loop
1409 * back here. Even with heavy interrupts happening, this
1410 * should only happen a few times in a row. If this happens
1411 * 1000 times in a row, there must be either an interrupt
1412 * storm or we have something buggy.
1413 * Bail!
1414 */
3e89c7bb 1415 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
818e3dd3 1416 return NULL;
818e3dd3 1417
37886f6a 1418 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
7a8e76a3 1419
bf41a158
SR
1420 /*
1421 * Only the first commit can update the timestamp.
1422 * Yes there is a race here. If an interrupt comes in
1423 * just after the conditional and it traces too, then it
1424 * will also check the deltas. More than one timestamp may
1425 * also be made. But only the entry that did the actual
1426 * commit will be something other than zero.
1427 */
1428 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1429 rb_page_write(cpu_buffer->tail_page) ==
1430 rb_commit_index(cpu_buffer)) {
1431
7a8e76a3
SR
1432 delta = ts - cpu_buffer->write_stamp;
1433
bf41a158
SR
1434 /* make sure this delta is calculated here */
1435 barrier();
1436
1437 /* Did the write stamp get updated already? */
1438 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1439 delta = 0;
bf41a158 1440
7a8e76a3 1441 if (test_time_stamp(delta)) {
7a8e76a3 1442
bf41a158
SR
1443 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1444
1445 if (commit == -EBUSY)
7a8e76a3 1446 return NULL;
bf41a158
SR
1447
1448 if (commit == -EAGAIN)
1449 goto again;
1450
1451 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1452 }
bf41a158
SR
1453 } else
1454 /* Non commits have zero deltas */
7a8e76a3 1455 delta = 0;
7a8e76a3
SR
1456
1457 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1458 if (PTR_ERR(event) == -EAGAIN)
1459 goto again;
1460
1461 if (!event) {
1462 if (unlikely(commit))
1463 /*
1464 * Ouch! We needed a timestamp and it was commited. But
1465 * we didn't get our event reserved.
1466 */
1467 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1468 return NULL;
bf41a158 1469 }
7a8e76a3 1470
bf41a158
SR
1471 /*
1472 * If the timestamp was commited, make the commit our entry
1473 * now so that we will update it when needed.
1474 */
1475 if (commit)
1476 rb_set_commit_event(cpu_buffer, event);
1477 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1478 delta = 0;
1479
1480 event->time_delta = delta;
1481
1482 return event;
1483}
1484
aa18efb2 1485#define TRACE_RECURSIVE_DEPTH 16
261842b7
SR
1486
1487static int trace_recursive_lock(void)
1488{
aa18efb2 1489 current->trace_recursion++;
261842b7 1490
aa18efb2
SR
1491 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1492 return 0;
e057a5e5 1493
aa18efb2
SR
1494 /* Disable all tracing before we do anything else */
1495 tracing_off_permanent();
261842b7 1496
aa18efb2
SR
1497 printk_once(KERN_WARNING "Tracing recursion: depth[%d]:"
1498 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1499 current->trace_recursion,
1500 hardirq_count() >> HARDIRQ_SHIFT,
1501 softirq_count() >> SOFTIRQ_SHIFT,
1502 in_nmi());
261842b7 1503
aa18efb2
SR
1504 WARN_ON_ONCE(1);
1505 return -1;
261842b7
SR
1506}
1507
1508static void trace_recursive_unlock(void)
1509{
aa18efb2 1510 WARN_ON_ONCE(!current->trace_recursion);
261842b7 1511
aa18efb2 1512 current->trace_recursion--;
261842b7
SR
1513}
1514
bf41a158
SR
1515static DEFINE_PER_CPU(int, rb_need_resched);
1516
7a8e76a3
SR
1517/**
1518 * ring_buffer_lock_reserve - reserve a part of the buffer
1519 * @buffer: the ring buffer to reserve from
1520 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
1521 *
1522 * Returns a reseverd event on the ring buffer to copy directly to.
1523 * The user of this interface will need to get the body to write into
1524 * and can use the ring_buffer_event_data() interface.
1525 *
1526 * The length is the length of the data needed, not the event length
1527 * which also includes the event header.
1528 *
1529 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1530 * If NULL is returned, then nothing has been allocated or locked.
1531 */
1532struct ring_buffer_event *
0a987751 1533ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
1534{
1535 struct ring_buffer_per_cpu *cpu_buffer;
1536 struct ring_buffer_event *event;
bf41a158 1537 int cpu, resched;
7a8e76a3 1538
033601a3 1539 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1540 return NULL;
1541
7a8e76a3
SR
1542 if (atomic_read(&buffer->record_disabled))
1543 return NULL;
1544
bf41a158 1545 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1546 resched = ftrace_preempt_disable();
bf41a158 1547
261842b7
SR
1548 if (trace_recursive_lock())
1549 goto out_nocheck;
1550
7a8e76a3
SR
1551 cpu = raw_smp_processor_id();
1552
9e01c1b7 1553 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 1554 goto out;
7a8e76a3
SR
1555
1556 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1557
1558 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1559 goto out;
7a8e76a3
SR
1560
1561 length = rb_calculate_event_length(length);
1562 if (length > BUF_PAGE_SIZE)
bf41a158 1563 goto out;
7a8e76a3
SR
1564
1565 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1566 if (!event)
d769041f 1567 goto out;
7a8e76a3 1568
bf41a158
SR
1569 /*
1570 * Need to store resched state on this cpu.
1571 * Only the first needs to.
1572 */
1573
1574 if (preempt_count() == 1)
1575 per_cpu(rb_need_resched, cpu) = resched;
1576
7a8e76a3
SR
1577 return event;
1578
d769041f 1579 out:
261842b7
SR
1580 trace_recursive_unlock();
1581
1582 out_nocheck:
182e9f5f 1583 ftrace_preempt_enable(resched);
7a8e76a3
SR
1584 return NULL;
1585}
c4f50183 1586EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3
SR
1587
1588static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1589 struct ring_buffer_event *event)
1590{
7a8e76a3 1591 cpu_buffer->entries++;
bf41a158
SR
1592
1593 /* Only process further if we own the commit */
1594 if (!rb_is_commit(cpu_buffer, event))
1595 return;
1596
1597 cpu_buffer->write_stamp += event->time_delta;
1598
1599 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1600}
1601
1602/**
1603 * ring_buffer_unlock_commit - commit a reserved
1604 * @buffer: The buffer to commit to
1605 * @event: The event pointer to commit.
7a8e76a3
SR
1606 *
1607 * This commits the data to the ring buffer, and releases any locks held.
1608 *
1609 * Must be paired with ring_buffer_lock_reserve.
1610 */
1611int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 1612 struct ring_buffer_event *event)
7a8e76a3
SR
1613{
1614 struct ring_buffer_per_cpu *cpu_buffer;
1615 int cpu = raw_smp_processor_id();
1616
1617 cpu_buffer = buffer->buffers[cpu];
1618
7a8e76a3
SR
1619 rb_commit(cpu_buffer, event);
1620
261842b7
SR
1621 trace_recursive_unlock();
1622
bf41a158
SR
1623 /*
1624 * Only the last preempt count needs to restore preemption.
1625 */
182e9f5f
SR
1626 if (preempt_count() == 1)
1627 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1628 else
bf41a158 1629 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1630
1631 return 0;
1632}
c4f50183 1633EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 1634
f3b9aae1
FW
1635static inline void rb_event_discard(struct ring_buffer_event *event)
1636{
1637 event->type = RINGBUF_TYPE_PADDING;
1638 /* time delta must be non zero */
1639 if (!event->time_delta)
1640 event->time_delta = 1;
1641}
1642
fa1b47dd
SR
1643/**
1644 * ring_buffer_event_discard - discard any event in the ring buffer
1645 * @event: the event to discard
1646 *
1647 * Sometimes a event that is in the ring buffer needs to be ignored.
1648 * This function lets the user discard an event in the ring buffer
1649 * and then that event will not be read later.
1650 *
1651 * Note, it is up to the user to be careful with this, and protect
1652 * against races. If the user discards an event that has been consumed
1653 * it is possible that it could corrupt the ring buffer.
1654 */
1655void ring_buffer_event_discard(struct ring_buffer_event *event)
1656{
f3b9aae1 1657 rb_event_discard(event);
fa1b47dd
SR
1658}
1659EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1660
1661/**
1662 * ring_buffer_commit_discard - discard an event that has not been committed
1663 * @buffer: the ring buffer
1664 * @event: non committed event to discard
1665 *
1666 * This is similar to ring_buffer_event_discard but must only be
1667 * performed on an event that has not been committed yet. The difference
1668 * is that this will also try to free the event from the ring buffer
1669 * if another event has not been added behind it.
1670 *
1671 * If another event has been added behind it, it will set the event
1672 * up as discarded, and perform the commit.
1673 *
1674 * If this function is called, do not call ring_buffer_unlock_commit on
1675 * the event.
1676 */
1677void ring_buffer_discard_commit(struct ring_buffer *buffer,
1678 struct ring_buffer_event *event)
1679{
1680 struct ring_buffer_per_cpu *cpu_buffer;
1681 unsigned long new_index, old_index;
1682 struct buffer_page *bpage;
1683 unsigned long index;
1684 unsigned long addr;
1685 int cpu;
1686
1687 /* The event is discarded regardless */
f3b9aae1 1688 rb_event_discard(event);
fa1b47dd
SR
1689
1690 /*
1691 * This must only be called if the event has not been
1692 * committed yet. Thus we can assume that preemption
1693 * is still disabled.
1694 */
1695 RB_WARN_ON(buffer, !preempt_count());
1696
1697 cpu = smp_processor_id();
1698 cpu_buffer = buffer->buffers[cpu];
1699
1700 new_index = rb_event_index(event);
1701 old_index = new_index + rb_event_length(event);
1702 addr = (unsigned long)event;
1703 addr &= PAGE_MASK;
1704
1705 bpage = cpu_buffer->tail_page;
1706
1707 if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
1708 /*
1709 * This is on the tail page. It is possible that
1710 * a write could come in and move the tail page
1711 * and write to the next page. That is fine
1712 * because we just shorten what is on this page.
1713 */
1714 index = local_cmpxchg(&bpage->write, old_index, new_index);
1715 if (index == old_index)
1716 goto out;
1717 }
1718
1719 /*
1720 * The commit is still visible by the reader, so we
1721 * must increment entries.
1722 */
1723 cpu_buffer->entries++;
1724 out:
1725 /*
1726 * If a write came in and pushed the tail page
1727 * we still need to update the commit pointer
1728 * if we were the commit.
1729 */
1730 if (rb_is_commit(cpu_buffer, event))
1731 rb_set_commit_to_write(cpu_buffer);
1732
f3b9aae1
FW
1733 trace_recursive_unlock();
1734
fa1b47dd
SR
1735 /*
1736 * Only the last preempt count needs to restore preemption.
1737 */
1738 if (preempt_count() == 1)
1739 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1740 else
1741 preempt_enable_no_resched_notrace();
1742
1743}
1744EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1745
7a8e76a3
SR
1746/**
1747 * ring_buffer_write - write data to the buffer without reserving
1748 * @buffer: The ring buffer to write to.
1749 * @length: The length of the data being written (excluding the event header)
1750 * @data: The data to write to the buffer.
1751 *
1752 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1753 * one function. If you already have the data to write to the buffer, it
1754 * may be easier to simply call this function.
1755 *
1756 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1757 * and not the length of the event which would hold the header.
1758 */
1759int ring_buffer_write(struct ring_buffer *buffer,
1760 unsigned long length,
1761 void *data)
1762{
1763 struct ring_buffer_per_cpu *cpu_buffer;
1764 struct ring_buffer_event *event;
bf41a158 1765 unsigned long event_length;
7a8e76a3
SR
1766 void *body;
1767 int ret = -EBUSY;
bf41a158 1768 int cpu, resched;
7a8e76a3 1769
033601a3 1770 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
1771 return -EBUSY;
1772
7a8e76a3
SR
1773 if (atomic_read(&buffer->record_disabled))
1774 return -EBUSY;
1775
182e9f5f 1776 resched = ftrace_preempt_disable();
bf41a158 1777
7a8e76a3
SR
1778 cpu = raw_smp_processor_id();
1779
9e01c1b7 1780 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 1781 goto out;
7a8e76a3
SR
1782
1783 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1784
1785 if (atomic_read(&cpu_buffer->record_disabled))
1786 goto out;
1787
1788 event_length = rb_calculate_event_length(length);
1789 event = rb_reserve_next_event(cpu_buffer,
1790 RINGBUF_TYPE_DATA, event_length);
1791 if (!event)
1792 goto out;
1793
1794 body = rb_event_data(event);
1795
1796 memcpy(body, data, length);
1797
1798 rb_commit(cpu_buffer, event);
1799
1800 ret = 0;
1801 out:
182e9f5f 1802 ftrace_preempt_enable(resched);
7a8e76a3
SR
1803
1804 return ret;
1805}
c4f50183 1806EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 1807
34a148bf 1808static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
1809{
1810 struct buffer_page *reader = cpu_buffer->reader_page;
1811 struct buffer_page *head = cpu_buffer->head_page;
1812 struct buffer_page *commit = cpu_buffer->commit_page;
1813
1814 return reader->read == rb_page_commit(reader) &&
1815 (commit == reader ||
1816 (commit == head &&
1817 head->read == rb_page_commit(commit)));
1818}
1819
7a8e76a3
SR
1820/**
1821 * ring_buffer_record_disable - stop all writes into the buffer
1822 * @buffer: The ring buffer to stop writes to.
1823 *
1824 * This prevents all writes to the buffer. Any attempt to write
1825 * to the buffer after this will fail and return NULL.
1826 *
1827 * The caller should call synchronize_sched() after this.
1828 */
1829void ring_buffer_record_disable(struct ring_buffer *buffer)
1830{
1831 atomic_inc(&buffer->record_disabled);
1832}
c4f50183 1833EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
1834
1835/**
1836 * ring_buffer_record_enable - enable writes to the buffer
1837 * @buffer: The ring buffer to enable writes
1838 *
1839 * Note, multiple disables will need the same number of enables
1840 * to truely enable the writing (much like preempt_disable).
1841 */
1842void ring_buffer_record_enable(struct ring_buffer *buffer)
1843{
1844 atomic_dec(&buffer->record_disabled);
1845}
c4f50183 1846EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3
SR
1847
1848/**
1849 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1850 * @buffer: The ring buffer to stop writes to.
1851 * @cpu: The CPU buffer to stop
1852 *
1853 * This prevents all writes to the buffer. Any attempt to write
1854 * to the buffer after this will fail and return NULL.
1855 *
1856 * The caller should call synchronize_sched() after this.
1857 */
1858void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1859{
1860 struct ring_buffer_per_cpu *cpu_buffer;
1861
9e01c1b7 1862 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1863 return;
7a8e76a3
SR
1864
1865 cpu_buffer = buffer->buffers[cpu];
1866 atomic_inc(&cpu_buffer->record_disabled);
1867}
c4f50183 1868EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
1869
1870/**
1871 * ring_buffer_record_enable_cpu - enable writes to the buffer
1872 * @buffer: The ring buffer to enable writes
1873 * @cpu: The CPU to enable.
1874 *
1875 * Note, multiple disables will need the same number of enables
1876 * to truely enable the writing (much like preempt_disable).
1877 */
1878void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1879{
1880 struct ring_buffer_per_cpu *cpu_buffer;
1881
9e01c1b7 1882 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1883 return;
7a8e76a3
SR
1884
1885 cpu_buffer = buffer->buffers[cpu];
1886 atomic_dec(&cpu_buffer->record_disabled);
1887}
c4f50183 1888EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3
SR
1889
1890/**
1891 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1892 * @buffer: The ring buffer
1893 * @cpu: The per CPU buffer to get the entries from.
1894 */
1895unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1896{
1897 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 1898 unsigned long ret;
7a8e76a3 1899
9e01c1b7 1900 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1901 return 0;
7a8e76a3
SR
1902
1903 cpu_buffer = buffer->buffers[cpu];
554f786e 1904 ret = cpu_buffer->entries;
554f786e
SR
1905
1906 return ret;
7a8e76a3 1907}
c4f50183 1908EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
1909
1910/**
1911 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1912 * @buffer: The ring buffer
1913 * @cpu: The per CPU buffer to get the number of overruns from
1914 */
1915unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1916{
1917 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 1918 unsigned long ret;
7a8e76a3 1919
9e01c1b7 1920 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 1921 return 0;
7a8e76a3
SR
1922
1923 cpu_buffer = buffer->buffers[cpu];
554f786e 1924 ret = cpu_buffer->overrun;
554f786e
SR
1925
1926 return ret;
7a8e76a3 1927}
c4f50183 1928EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3
SR
1929
1930/**
1931 * ring_buffer_entries - get the number of entries in a buffer
1932 * @buffer: The ring buffer
1933 *
1934 * Returns the total number of entries in the ring buffer
1935 * (all CPU entries)
1936 */
1937unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1938{
1939 struct ring_buffer_per_cpu *cpu_buffer;
1940 unsigned long entries = 0;
1941 int cpu;
1942
1943 /* if you care about this being correct, lock the buffer */
1944 for_each_buffer_cpu(buffer, cpu) {
1945 cpu_buffer = buffer->buffers[cpu];
1946 entries += cpu_buffer->entries;
1947 }
1948
1949 return entries;
1950}
c4f50183 1951EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
1952
1953/**
1954 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1955 * @buffer: The ring buffer
1956 *
1957 * Returns the total number of overruns in the ring buffer
1958 * (all CPU entries)
1959 */
1960unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1961{
1962 struct ring_buffer_per_cpu *cpu_buffer;
1963 unsigned long overruns = 0;
1964 int cpu;
1965
1966 /* if you care about this being correct, lock the buffer */
1967 for_each_buffer_cpu(buffer, cpu) {
1968 cpu_buffer = buffer->buffers[cpu];
1969 overruns += cpu_buffer->overrun;
1970 }
1971
1972 return overruns;
1973}
c4f50183 1974EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 1975
642edba5 1976static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
1977{
1978 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1979
d769041f
SR
1980 /* Iterator usage is expected to have record disabled */
1981 if (list_empty(&cpu_buffer->reader_page->list)) {
1982 iter->head_page = cpu_buffer->head_page;
6f807acd 1983 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1984 } else {
1985 iter->head_page = cpu_buffer->reader_page;
6f807acd 1986 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1987 }
1988 if (iter->head)
1989 iter->read_stamp = cpu_buffer->read_stamp;
1990 else
abc9b56d 1991 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 1992}
f83c9d0f 1993
642edba5
SR
1994/**
1995 * ring_buffer_iter_reset - reset an iterator
1996 * @iter: The iterator to reset
1997 *
1998 * Resets the iterator, so that it will start from the beginning
1999 * again.
2000 */
2001void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2002{
554f786e 2003 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
2004 unsigned long flags;
2005
554f786e
SR
2006 if (!iter)
2007 return;
2008
2009 cpu_buffer = iter->cpu_buffer;
2010
642edba5
SR
2011 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2012 rb_iter_reset(iter);
f83c9d0f 2013 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2014}
c4f50183 2015EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
2016
2017/**
2018 * ring_buffer_iter_empty - check if an iterator has no more to read
2019 * @iter: The iterator to check
2020 */
2021int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2022{
2023 struct ring_buffer_per_cpu *cpu_buffer;
2024
2025 cpu_buffer = iter->cpu_buffer;
2026
bf41a158
SR
2027 return iter->head_page == cpu_buffer->commit_page &&
2028 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 2029}
c4f50183 2030EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
2031
2032static void
2033rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2034 struct ring_buffer_event *event)
2035{
2036 u64 delta;
2037
2038 switch (event->type) {
2039 case RINGBUF_TYPE_PADDING:
2040 return;
2041
2042 case RINGBUF_TYPE_TIME_EXTEND:
2043 delta = event->array[0];
2044 delta <<= TS_SHIFT;
2045 delta += event->time_delta;
2046 cpu_buffer->read_stamp += delta;
2047 return;
2048
2049 case RINGBUF_TYPE_TIME_STAMP:
2050 /* FIXME: not implemented */
2051 return;
2052
2053 case RINGBUF_TYPE_DATA:
2054 cpu_buffer->read_stamp += event->time_delta;
2055 return;
2056
2057 default:
2058 BUG();
2059 }
2060 return;
2061}
2062
2063static void
2064rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2065 struct ring_buffer_event *event)
2066{
2067 u64 delta;
2068
2069 switch (event->type) {
2070 case RINGBUF_TYPE_PADDING:
2071 return;
2072
2073 case RINGBUF_TYPE_TIME_EXTEND:
2074 delta = event->array[0];
2075 delta <<= TS_SHIFT;
2076 delta += event->time_delta;
2077 iter->read_stamp += delta;
2078 return;
2079
2080 case RINGBUF_TYPE_TIME_STAMP:
2081 /* FIXME: not implemented */
2082 return;
2083
2084 case RINGBUF_TYPE_DATA:
2085 iter->read_stamp += event->time_delta;
2086 return;
2087
2088 default:
2089 BUG();
2090 }
2091 return;
2092}
2093
d769041f
SR
2094static struct buffer_page *
2095rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2096{
d769041f
SR
2097 struct buffer_page *reader = NULL;
2098 unsigned long flags;
818e3dd3 2099 int nr_loops = 0;
d769041f 2100
3e03fb7f
SR
2101 local_irq_save(flags);
2102 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
2103
2104 again:
818e3dd3
SR
2105 /*
2106 * This should normally only loop twice. But because the
2107 * start of the reader inserts an empty page, it causes
2108 * a case where we will loop three times. There should be no
2109 * reason to loop four times (that I know of).
2110 */
3e89c7bb 2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
2112 reader = NULL;
2113 goto out;
2114 }
2115
d769041f
SR
2116 reader = cpu_buffer->reader_page;
2117
2118 /* If there's more to read, return this page */
bf41a158 2119 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
2120 goto out;
2121
2122 /* Never should we have an index greater than the size */
3e89c7bb
SR
2123 if (RB_WARN_ON(cpu_buffer,
2124 cpu_buffer->reader_page->read > rb_page_size(reader)))
2125 goto out;
d769041f
SR
2126
2127 /* check if we caught up to the tail */
2128 reader = NULL;
bf41a158 2129 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 2130 goto out;
7a8e76a3
SR
2131
2132 /*
d769041f
SR
2133 * Splice the empty reader page into the list around the head.
2134 * Reset the reader page to size zero.
7a8e76a3 2135 */
7a8e76a3 2136
d769041f
SR
2137 reader = cpu_buffer->head_page;
2138 cpu_buffer->reader_page->list.next = reader->list.next;
2139 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
2140
2141 local_set(&cpu_buffer->reader_page->write, 0);
abc9b56d 2142 local_set(&cpu_buffer->reader_page->page->commit, 0);
7a8e76a3 2143
d769041f
SR
2144 /* Make the reader page now replace the head */
2145 reader->list.prev->next = &cpu_buffer->reader_page->list;
2146 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
2147
2148 /*
d769041f
SR
2149 * If the tail is on the reader, then we must set the head
2150 * to the inserted page, otherwise we set it one before.
7a8e76a3 2151 */
d769041f 2152 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 2153
bf41a158 2154 if (cpu_buffer->commit_page != reader)
d769041f
SR
2155 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2156
2157 /* Finally update the reader page to the new head */
2158 cpu_buffer->reader_page = reader;
2159 rb_reset_reader_page(cpu_buffer);
2160
2161 goto again;
2162
2163 out:
3e03fb7f
SR
2164 __raw_spin_unlock(&cpu_buffer->lock);
2165 local_irq_restore(flags);
d769041f
SR
2166
2167 return reader;
2168}
2169
2170static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2171{
2172 struct ring_buffer_event *event;
2173 struct buffer_page *reader;
2174 unsigned length;
2175
2176 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 2177
d769041f 2178 /* This function should not be called when buffer is empty */
3e89c7bb
SR
2179 if (RB_WARN_ON(cpu_buffer, !reader))
2180 return;
7a8e76a3 2181
d769041f
SR
2182 event = rb_reader_event(cpu_buffer);
2183
2d622719 2184 if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
d769041f
SR
2185 cpu_buffer->entries--;
2186
2187 rb_update_read_stamp(cpu_buffer, event);
2188
2189 length = rb_event_length(event);
6f807acd 2190 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
2191}
2192
2193static void rb_advance_iter(struct ring_buffer_iter *iter)
2194{
2195 struct ring_buffer *buffer;
2196 struct ring_buffer_per_cpu *cpu_buffer;
2197 struct ring_buffer_event *event;
2198 unsigned length;
2199
2200 cpu_buffer = iter->cpu_buffer;
2201 buffer = cpu_buffer->buffer;
2202
2203 /*
2204 * Check if we are at the end of the buffer.
2205 */
bf41a158 2206 if (iter->head >= rb_page_size(iter->head_page)) {
3e89c7bb
SR
2207 if (RB_WARN_ON(buffer,
2208 iter->head_page == cpu_buffer->commit_page))
2209 return;
d769041f 2210 rb_inc_iter(iter);
7a8e76a3
SR
2211 return;
2212 }
2213
2214 event = rb_iter_head_event(iter);
2215
2216 length = rb_event_length(event);
2217
2218 /*
2219 * This should not be called to advance the header if we are
2220 * at the tail of the buffer.
2221 */
3e89c7bb 2222 if (RB_WARN_ON(cpu_buffer,
f536aafc 2223 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
2224 (iter->head + length > rb_commit_index(cpu_buffer))))
2225 return;
7a8e76a3
SR
2226
2227 rb_update_iter_read_stamp(iter, event);
2228
2229 iter->head += length;
2230
2231 /* check for end of page padding */
bf41a158
SR
2232 if ((iter->head >= rb_page_size(iter->head_page)) &&
2233 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
2234 rb_advance_iter(iter);
2235}
2236
f83c9d0f
SR
2237static struct ring_buffer_event *
2238rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
7a8e76a3
SR
2239{
2240 struct ring_buffer_per_cpu *cpu_buffer;
2241 struct ring_buffer_event *event;
d769041f 2242 struct buffer_page *reader;
818e3dd3 2243 int nr_loops = 0;
7a8e76a3 2244
7a8e76a3
SR
2245 cpu_buffer = buffer->buffers[cpu];
2246
2247 again:
818e3dd3
SR
2248 /*
2249 * We repeat when a timestamp is encountered. It is possible
2250 * to get multiple timestamps from an interrupt entering just
2251 * as one timestamp is about to be written. The max times
2252 * that this can happen is the number of nested interrupts we
2253 * can have. Nesting 10 deep of interrupts is clearly
2254 * an anomaly.
2255 */
3e89c7bb 2256 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
818e3dd3 2257 return NULL;
818e3dd3 2258
d769041f
SR
2259 reader = rb_get_reader_page(cpu_buffer);
2260 if (!reader)
7a8e76a3
SR
2261 return NULL;
2262
d769041f 2263 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
2264
2265 switch (event->type) {
2266 case RINGBUF_TYPE_PADDING:
2d622719
TZ
2267 if (rb_null_event(event))
2268 RB_WARN_ON(cpu_buffer, 1);
2269 /*
2270 * Because the writer could be discarding every
2271 * event it creates (which would probably be bad)
2272 * if we were to go back to "again" then we may never
2273 * catch up, and will trigger the warn on, or lock
2274 * the box. Return the padding, and we will release
2275 * the current locks, and try again.
2276 */
d769041f 2277 rb_advance_reader(cpu_buffer);
2d622719 2278 return event;
7a8e76a3
SR
2279
2280 case RINGBUF_TYPE_TIME_EXTEND:
2281 /* Internal data, OK to advance */
d769041f 2282 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
2283 goto again;
2284
2285 case RINGBUF_TYPE_TIME_STAMP:
2286 /* FIXME: not implemented */
d769041f 2287 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
2288 goto again;
2289
2290 case RINGBUF_TYPE_DATA:
2291 if (ts) {
2292 *ts = cpu_buffer->read_stamp + event->time_delta;
37886f6a
SR
2293 ring_buffer_normalize_time_stamp(buffer,
2294 cpu_buffer->cpu, ts);
7a8e76a3
SR
2295 }
2296 return event;
2297
2298 default:
2299 BUG();
2300 }
2301
2302 return NULL;
2303}
c4f50183 2304EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 2305
f83c9d0f
SR
2306static struct ring_buffer_event *
2307rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
2308{
2309 struct ring_buffer *buffer;
2310 struct ring_buffer_per_cpu *cpu_buffer;
2311 struct ring_buffer_event *event;
818e3dd3 2312 int nr_loops = 0;
7a8e76a3
SR
2313
2314 if (ring_buffer_iter_empty(iter))
2315 return NULL;
2316
2317 cpu_buffer = iter->cpu_buffer;
2318 buffer = cpu_buffer->buffer;
2319
2320 again:
818e3dd3
SR
2321 /*
2322 * We repeat when a timestamp is encountered. It is possible
2323 * to get multiple timestamps from an interrupt entering just
2324 * as one timestamp is about to be written. The max times
2325 * that this can happen is the number of nested interrupts we
2326 * can have. Nesting 10 deep of interrupts is clearly
2327 * an anomaly.
2328 */
3e89c7bb 2329 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
818e3dd3 2330 return NULL;
818e3dd3 2331
7a8e76a3
SR
2332 if (rb_per_cpu_empty(cpu_buffer))
2333 return NULL;
2334
2335 event = rb_iter_head_event(iter);
2336
2337 switch (event->type) {
2338 case RINGBUF_TYPE_PADDING:
2d622719
TZ
2339 if (rb_null_event(event)) {
2340 rb_inc_iter(iter);
2341 goto again;
2342 }
2343 rb_advance_iter(iter);
2344 return event;
7a8e76a3
SR
2345
2346 case RINGBUF_TYPE_TIME_EXTEND:
2347 /* Internal data, OK to advance */
2348 rb_advance_iter(iter);
2349 goto again;
2350
2351 case RINGBUF_TYPE_TIME_STAMP:
2352 /* FIXME: not implemented */
2353 rb_advance_iter(iter);
2354 goto again;
2355
2356 case RINGBUF_TYPE_DATA:
2357 if (ts) {
2358 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
2359 ring_buffer_normalize_time_stamp(buffer,
2360 cpu_buffer->cpu, ts);
7a8e76a3
SR
2361 }
2362 return event;
2363
2364 default:
2365 BUG();
2366 }
2367
2368 return NULL;
2369}
c4f50183 2370EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 2371
f83c9d0f
SR
2372/**
2373 * ring_buffer_peek - peek at the next event to be read
2374 * @buffer: The ring buffer to read
2375 * @cpu: The cpu to peak at
2376 * @ts: The timestamp counter of this event.
2377 *
2378 * This will return the event that will be read next, but does
2379 * not consume the data.
2380 */
2381struct ring_buffer_event *
2382ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2383{
2384 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 2385 struct ring_buffer_event *event;
f83c9d0f
SR
2386 unsigned long flags;
2387
554f786e 2388 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2389 return NULL;
554f786e 2390
2d622719 2391 again:
f83c9d0f
SR
2392 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2393 event = rb_buffer_peek(buffer, cpu, ts);
2394 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2395
2d622719
TZ
2396 if (event && event->type == RINGBUF_TYPE_PADDING) {
2397 cpu_relax();
2398 goto again;
2399 }
2400
f83c9d0f
SR
2401 return event;
2402}
2403
2404/**
2405 * ring_buffer_iter_peek - peek at the next event to be read
2406 * @iter: The ring buffer iterator
2407 * @ts: The timestamp counter of this event.
2408 *
2409 * This will return the event that will be read next, but does
2410 * not increment the iterator.
2411 */
2412struct ring_buffer_event *
2413ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2414{
2415 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2416 struct ring_buffer_event *event;
2417 unsigned long flags;
2418
2d622719 2419 again:
f83c9d0f
SR
2420 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2421 event = rb_iter_peek(iter, ts);
2422 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2423
2d622719
TZ
2424 if (event && event->type == RINGBUF_TYPE_PADDING) {
2425 cpu_relax();
2426 goto again;
2427 }
2428
f83c9d0f
SR
2429 return event;
2430}
2431
7a8e76a3
SR
2432/**
2433 * ring_buffer_consume - return an event and consume it
2434 * @buffer: The ring buffer to get the next event from
2435 *
2436 * Returns the next event in the ring buffer, and that event is consumed.
2437 * Meaning, that sequential reads will keep returning a different event,
2438 * and eventually empty the ring buffer if the producer is slower.
2439 */
2440struct ring_buffer_event *
2441ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2442{
554f786e
SR
2443 struct ring_buffer_per_cpu *cpu_buffer;
2444 struct ring_buffer_event *event = NULL;
f83c9d0f 2445 unsigned long flags;
7a8e76a3 2446
2d622719 2447 again:
554f786e
SR
2448 /* might be called in atomic */
2449 preempt_disable();
2450
9e01c1b7 2451 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 2452 goto out;
7a8e76a3 2453
554f786e 2454 cpu_buffer = buffer->buffers[cpu];
f83c9d0f
SR
2455 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2456
2457 event = rb_buffer_peek(buffer, cpu, ts);
7a8e76a3 2458 if (!event)
554f786e 2459 goto out_unlock;
7a8e76a3 2460
d769041f 2461 rb_advance_reader(cpu_buffer);
7a8e76a3 2462
554f786e 2463 out_unlock:
f83c9d0f
SR
2464 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2465
554f786e
SR
2466 out:
2467 preempt_enable();
2468
2d622719
TZ
2469 if (event && event->type == RINGBUF_TYPE_PADDING) {
2470 cpu_relax();
2471 goto again;
2472 }
2473
7a8e76a3
SR
2474 return event;
2475}
c4f50183 2476EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
2477
2478/**
2479 * ring_buffer_read_start - start a non consuming read of the buffer
2480 * @buffer: The ring buffer to read from
2481 * @cpu: The cpu buffer to iterate over
2482 *
2483 * This starts up an iteration through the buffer. It also disables
2484 * the recording to the buffer until the reading is finished.
2485 * This prevents the reading from being corrupted. This is not
2486 * a consuming read, so a producer is not expected.
2487 *
2488 * Must be paired with ring_buffer_finish.
2489 */
2490struct ring_buffer_iter *
2491ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2492{
2493 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2494 struct ring_buffer_iter *iter;
d769041f 2495 unsigned long flags;
7a8e76a3 2496
9e01c1b7 2497 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2498 return NULL;
7a8e76a3
SR
2499
2500 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2501 if (!iter)
8aabee57 2502 return NULL;
7a8e76a3
SR
2503
2504 cpu_buffer = buffer->buffers[cpu];
2505
2506 iter->cpu_buffer = cpu_buffer;
2507
2508 atomic_inc(&cpu_buffer->record_disabled);
2509 synchronize_sched();
2510
f83c9d0f 2511 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3e03fb7f 2512 __raw_spin_lock(&cpu_buffer->lock);
642edba5 2513 rb_iter_reset(iter);
3e03fb7f 2514 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 2515 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2516
2517 return iter;
2518}
c4f50183 2519EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
2520
2521/**
2522 * ring_buffer_finish - finish reading the iterator of the buffer
2523 * @iter: The iterator retrieved by ring_buffer_start
2524 *
2525 * This re-enables the recording to the buffer, and frees the
2526 * iterator.
2527 */
2528void
2529ring_buffer_read_finish(struct ring_buffer_iter *iter)
2530{
2531 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2532
2533 atomic_dec(&cpu_buffer->record_disabled);
2534 kfree(iter);
2535}
c4f50183 2536EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
2537
2538/**
2539 * ring_buffer_read - read the next item in the ring buffer by the iterator
2540 * @iter: The ring buffer iterator
2541 * @ts: The time stamp of the event read.
2542 *
2543 * This reads the next event in the ring buffer and increments the iterator.
2544 */
2545struct ring_buffer_event *
2546ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2547{
2548 struct ring_buffer_event *event;
f83c9d0f
SR
2549 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2550 unsigned long flags;
7a8e76a3 2551
2d622719 2552 again:
f83c9d0f
SR
2553 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2554 event = rb_iter_peek(iter, ts);
7a8e76a3 2555 if (!event)
f83c9d0f 2556 goto out;
7a8e76a3
SR
2557
2558 rb_advance_iter(iter);
f83c9d0f
SR
2559 out:
2560 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2561
2d622719
TZ
2562 if (event && event->type == RINGBUF_TYPE_PADDING) {
2563 cpu_relax();
2564 goto again;
2565 }
2566
7a8e76a3
SR
2567 return event;
2568}
c4f50183 2569EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
2570
2571/**
2572 * ring_buffer_size - return the size of the ring buffer (in bytes)
2573 * @buffer: The ring buffer.
2574 */
2575unsigned long ring_buffer_size(struct ring_buffer *buffer)
2576{
2577 return BUF_PAGE_SIZE * buffer->pages;
2578}
c4f50183 2579EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
2580
2581static void
2582rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2583{
2584 cpu_buffer->head_page
2585 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 2586 local_set(&cpu_buffer->head_page->write, 0);
abc9b56d 2587 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 2588
6f807acd 2589 cpu_buffer->head_page->read = 0;
bf41a158
SR
2590
2591 cpu_buffer->tail_page = cpu_buffer->head_page;
2592 cpu_buffer->commit_page = cpu_buffer->head_page;
2593
2594 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2595 local_set(&cpu_buffer->reader_page->write, 0);
abc9b56d 2596 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 2597 cpu_buffer->reader_page->read = 0;
7a8e76a3 2598
7a8e76a3
SR
2599 cpu_buffer->overrun = 0;
2600 cpu_buffer->entries = 0;
69507c06
SR
2601
2602 cpu_buffer->write_stamp = 0;
2603 cpu_buffer->read_stamp = 0;
7a8e76a3
SR
2604}
2605
2606/**
2607 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2608 * @buffer: The ring buffer to reset a per cpu buffer of
2609 * @cpu: The CPU buffer to be reset
2610 */
2611void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2612{
2613 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2614 unsigned long flags;
2615
9e01c1b7 2616 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2617 return;
7a8e76a3 2618
f83c9d0f
SR
2619 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2620
3e03fb7f 2621 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
2622
2623 rb_reset_cpu(cpu_buffer);
2624
3e03fb7f 2625 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f
SR
2626
2627 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2628}
c4f50183 2629EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
2630
2631/**
2632 * ring_buffer_reset - reset a ring buffer
2633 * @buffer: The ring buffer to reset all cpu buffers
2634 */
2635void ring_buffer_reset(struct ring_buffer *buffer)
2636{
7a8e76a3
SR
2637 int cpu;
2638
7a8e76a3 2639 for_each_buffer_cpu(buffer, cpu)
d769041f 2640 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 2641}
c4f50183 2642EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
2643
2644/**
2645 * rind_buffer_empty - is the ring buffer empty?
2646 * @buffer: The ring buffer to test
2647 */
2648int ring_buffer_empty(struct ring_buffer *buffer)
2649{
2650 struct ring_buffer_per_cpu *cpu_buffer;
2651 int cpu;
2652
2653 /* yes this is racy, but if you don't like the race, lock the buffer */
2654 for_each_buffer_cpu(buffer, cpu) {
2655 cpu_buffer = buffer->buffers[cpu];
2656 if (!rb_per_cpu_empty(cpu_buffer))
2657 return 0;
2658 }
554f786e 2659
7a8e76a3
SR
2660 return 1;
2661}
c4f50183 2662EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
2663
2664/**
2665 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2666 * @buffer: The ring buffer
2667 * @cpu: The CPU buffer to test
2668 */
2669int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2670{
2671 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2672 int ret;
7a8e76a3 2673
9e01c1b7 2674 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2675 return 1;
7a8e76a3
SR
2676
2677 cpu_buffer = buffer->buffers[cpu];
554f786e
SR
2678 ret = rb_per_cpu_empty(cpu_buffer);
2679
554f786e
SR
2680
2681 return ret;
7a8e76a3 2682}
c4f50183 2683EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3
SR
2684
2685/**
2686 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2687 * @buffer_a: One buffer to swap with
2688 * @buffer_b: The other buffer to swap with
2689 *
2690 * This function is useful for tracers that want to take a "snapshot"
2691 * of a CPU buffer and has another back up buffer lying around.
2692 * it is expected that the tracer handles the cpu buffer not being
2693 * used at the moment.
2694 */
2695int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2696 struct ring_buffer *buffer_b, int cpu)
2697{
2698 struct ring_buffer_per_cpu *cpu_buffer_a;
2699 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
2700 int ret = -EINVAL;
2701
9e01c1b7
RR
2702 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2703 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 2704 goto out;
7a8e76a3
SR
2705
2706 /* At least make sure the two buffers are somewhat the same */
6d102bc6 2707 if (buffer_a->pages != buffer_b->pages)
554f786e
SR
2708 goto out;
2709
2710 ret = -EAGAIN;
7a8e76a3 2711
97b17efe 2712 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 2713 goto out;
97b17efe
SR
2714
2715 if (atomic_read(&buffer_a->record_disabled))
554f786e 2716 goto out;
97b17efe
SR
2717
2718 if (atomic_read(&buffer_b->record_disabled))
554f786e 2719 goto out;
97b17efe 2720
7a8e76a3
SR
2721 cpu_buffer_a = buffer_a->buffers[cpu];
2722 cpu_buffer_b = buffer_b->buffers[cpu];
2723
97b17efe 2724 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 2725 goto out;
97b17efe
SR
2726
2727 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 2728 goto out;
97b17efe 2729
7a8e76a3
SR
2730 /*
2731 * We can't do a synchronize_sched here because this
2732 * function can be called in atomic context.
2733 * Normally this will be called from the same CPU as cpu.
2734 * If not it's up to the caller to protect this.
2735 */
2736 atomic_inc(&cpu_buffer_a->record_disabled);
2737 atomic_inc(&cpu_buffer_b->record_disabled);
2738
2739 buffer_a->buffers[cpu] = cpu_buffer_b;
2740 buffer_b->buffers[cpu] = cpu_buffer_a;
2741
2742 cpu_buffer_b->buffer = buffer_a;
2743 cpu_buffer_a->buffer = buffer_b;
2744
2745 atomic_dec(&cpu_buffer_a->record_disabled);
2746 atomic_dec(&cpu_buffer_b->record_disabled);
2747
554f786e
SR
2748 ret = 0;
2749out:
554f786e 2750 return ret;
7a8e76a3 2751}
c4f50183 2752EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
7a8e76a3 2753
8789a9e7 2754static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
667d2412
LJ
2755 struct buffer_data_page *bpage,
2756 unsigned int offset)
8789a9e7
SR
2757{
2758 struct ring_buffer_event *event;
2759 unsigned long head;
2760
2761 __raw_spin_lock(&cpu_buffer->lock);
667d2412 2762 for (head = offset; head < local_read(&bpage->commit);
8789a9e7
SR
2763 head += rb_event_length(event)) {
2764
044fa782 2765 event = __rb_data_page_index(bpage, head);
8789a9e7
SR
2766 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2767 return;
2768 /* Only count data entries */
2769 if (event->type != RINGBUF_TYPE_DATA)
2770 continue;
2771 cpu_buffer->entries--;
2772 }
2773 __raw_spin_unlock(&cpu_buffer->lock);
2774}
2775
2776/**
2777 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2778 * @buffer: the buffer to allocate for.
2779 *
2780 * This function is used in conjunction with ring_buffer_read_page.
2781 * When reading a full page from the ring buffer, these functions
2782 * can be used to speed up the process. The calling function should
2783 * allocate a few pages first with this function. Then when it
2784 * needs to get pages from the ring buffer, it passes the result
2785 * of this function into ring_buffer_read_page, which will swap
2786 * the page that was allocated, with the read page of the buffer.
2787 *
2788 * Returns:
2789 * The page allocated, or NULL on error.
2790 */
2791void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2792{
044fa782 2793 struct buffer_data_page *bpage;
ef7a4a16 2794 unsigned long addr;
8789a9e7
SR
2795
2796 addr = __get_free_page(GFP_KERNEL);
2797 if (!addr)
2798 return NULL;
2799
044fa782 2800 bpage = (void *)addr;
8789a9e7 2801
ef7a4a16
SR
2802 rb_init_page(bpage);
2803
044fa782 2804 return bpage;
8789a9e7
SR
2805}
2806
2807/**
2808 * ring_buffer_free_read_page - free an allocated read page
2809 * @buffer: the buffer the page was allocate for
2810 * @data: the page to free
2811 *
2812 * Free a page allocated from ring_buffer_alloc_read_page.
2813 */
2814void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2815{
2816 free_page((unsigned long)data);
2817}
2818
2819/**
2820 * ring_buffer_read_page - extract a page from the ring buffer
2821 * @buffer: buffer to extract from
2822 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 2823 * @len: amount to extract
8789a9e7
SR
2824 * @cpu: the cpu of the buffer to extract
2825 * @full: should the extraction only happen when the page is full.
2826 *
2827 * This function will pull out a page from the ring buffer and consume it.
2828 * @data_page must be the address of the variable that was returned
2829 * from ring_buffer_alloc_read_page. This is because the page might be used
2830 * to swap with a page in the ring buffer.
2831 *
2832 * for example:
b85fa01e 2833 * rpage = ring_buffer_alloc_read_page(buffer);
8789a9e7
SR
2834 * if (!rpage)
2835 * return error;
ef7a4a16 2836 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
2837 * if (ret >= 0)
2838 * process_page(rpage, ret);
8789a9e7
SR
2839 *
2840 * When @full is set, the function will not return true unless
2841 * the writer is off the reader page.
2842 *
2843 * Note: it is up to the calling functions to handle sleeps and wakeups.
2844 * The ring buffer can be used anywhere in the kernel and can not
2845 * blindly call wake_up. The layer that uses the ring buffer must be
2846 * responsible for that.
2847 *
2848 * Returns:
667d2412
LJ
2849 * >=0 if data has been transferred, returns the offset of consumed data.
2850 * <0 if no data has been transferred.
8789a9e7
SR
2851 */
2852int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 2853 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
2854{
2855 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2856 struct ring_buffer_event *event;
044fa782 2857 struct buffer_data_page *bpage;
ef7a4a16 2858 struct buffer_page *reader;
8789a9e7 2859 unsigned long flags;
ef7a4a16 2860 unsigned int commit;
667d2412 2861 unsigned int read;
4f3640f8 2862 u64 save_timestamp;
667d2412 2863 int ret = -1;
8789a9e7 2864
554f786e
SR
2865 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2866 goto out;
2867
474d32b6
SR
2868 /*
2869 * If len is not big enough to hold the page header, then
2870 * we can not copy anything.
2871 */
2872 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 2873 goto out;
474d32b6
SR
2874
2875 len -= BUF_PAGE_HDR_SIZE;
2876
8789a9e7 2877 if (!data_page)
554f786e 2878 goto out;
8789a9e7 2879
044fa782
SR
2880 bpage = *data_page;
2881 if (!bpage)
554f786e 2882 goto out;
8789a9e7
SR
2883
2884 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2885
ef7a4a16
SR
2886 reader = rb_get_reader_page(cpu_buffer);
2887 if (!reader)
554f786e 2888 goto out_unlock;
8789a9e7 2889
ef7a4a16
SR
2890 event = rb_reader_event(cpu_buffer);
2891
2892 read = reader->read;
2893 commit = rb_page_commit(reader);
667d2412 2894
8789a9e7 2895 /*
474d32b6
SR
2896 * If this page has been partially read or
2897 * if len is not big enough to read the rest of the page or
2898 * a writer is still on the page, then
2899 * we must copy the data from the page to the buffer.
2900 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 2901 */
474d32b6 2902 if (read || (len < (commit - read)) ||
ef7a4a16 2903 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 2904 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
2905 unsigned int rpos = read;
2906 unsigned int pos = 0;
ef7a4a16 2907 unsigned int size;
8789a9e7
SR
2908
2909 if (full)
554f786e 2910 goto out_unlock;
8789a9e7 2911
ef7a4a16
SR
2912 if (len > (commit - read))
2913 len = (commit - read);
2914
2915 size = rb_event_length(event);
2916
2917 if (len < size)
554f786e 2918 goto out_unlock;
ef7a4a16 2919
4f3640f8
SR
2920 /* save the current timestamp, since the user will need it */
2921 save_timestamp = cpu_buffer->read_stamp;
2922
ef7a4a16
SR
2923 /* Need to copy one event at a time */
2924 do {
474d32b6 2925 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
2926
2927 len -= size;
2928
2929 rb_advance_reader(cpu_buffer);
474d32b6
SR
2930 rpos = reader->read;
2931 pos += size;
ef7a4a16
SR
2932
2933 event = rb_reader_event(cpu_buffer);
2934 size = rb_event_length(event);
2935 } while (len > size);
667d2412
LJ
2936
2937 /* update bpage */
ef7a4a16 2938 local_set(&bpage->commit, pos);
4f3640f8 2939 bpage->time_stamp = save_timestamp;
ef7a4a16 2940
474d32b6
SR
2941 /* we copied everything to the beginning */
2942 read = 0;
8789a9e7
SR
2943 } else {
2944 /* swap the pages */
044fa782 2945 rb_init_page(bpage);
ef7a4a16
SR
2946 bpage = reader->page;
2947 reader->page = *data_page;
2948 local_set(&reader->write, 0);
2949 reader->read = 0;
044fa782 2950 *data_page = bpage;
ef7a4a16
SR
2951
2952 /* update the entry counter */
2953 rb_remove_entries(cpu_buffer, bpage, read);
8789a9e7 2954 }
667d2412 2955 ret = read;
8789a9e7 2956
554f786e 2957 out_unlock:
8789a9e7
SR
2958 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2959
554f786e 2960 out:
8789a9e7
SR
2961 return ret;
2962}
2963
a3583244
SR
2964static ssize_t
2965rb_simple_read(struct file *filp, char __user *ubuf,
2966 size_t cnt, loff_t *ppos)
2967{
5e39841c 2968 unsigned long *p = filp->private_data;
a3583244
SR
2969 char buf[64];
2970 int r;
2971
033601a3
SR
2972 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2973 r = sprintf(buf, "permanently disabled\n");
2974 else
2975 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
2976
2977 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2978}
2979
2980static ssize_t
2981rb_simple_write(struct file *filp, const char __user *ubuf,
2982 size_t cnt, loff_t *ppos)
2983{
5e39841c 2984 unsigned long *p = filp->private_data;
a3583244 2985 char buf[64];
5e39841c 2986 unsigned long val;
a3583244
SR
2987 int ret;
2988
2989 if (cnt >= sizeof(buf))
2990 return -EINVAL;
2991
2992 if (copy_from_user(&buf, ubuf, cnt))
2993 return -EFAULT;
2994
2995 buf[cnt] = 0;
2996
2997 ret = strict_strtoul(buf, 10, &val);
2998 if (ret < 0)
2999 return ret;
3000
033601a3
SR
3001 if (val)
3002 set_bit(RB_BUFFERS_ON_BIT, p);
3003 else
3004 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
3005
3006 (*ppos)++;
3007
3008 return cnt;
3009}
3010
5e2336a0 3011static const struct file_operations rb_simple_fops = {
a3583244
SR
3012 .open = tracing_open_generic,
3013 .read = rb_simple_read,
3014 .write = rb_simple_write,
3015};
3016
3017
3018static __init int rb_init_debugfs(void)
3019{
3020 struct dentry *d_tracer;
a3583244
SR
3021
3022 d_tracer = tracing_init_dentry();
3023
5452af66
FW
3024 trace_create_file("tracing_on", 0644, d_tracer,
3025 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
3026
3027 return 0;
3028}
3029
3030fs_initcall(rb_init_debugfs);
554f786e 3031
59222efe 3032#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
3033static int rb_cpu_notify(struct notifier_block *self,
3034 unsigned long action, void *hcpu)
554f786e
SR
3035{
3036 struct ring_buffer *buffer =
3037 container_of(self, struct ring_buffer, cpu_notify);
3038 long cpu = (long)hcpu;
3039
3040 switch (action) {
3041 case CPU_UP_PREPARE:
3042 case CPU_UP_PREPARE_FROZEN:
3043 if (cpu_isset(cpu, *buffer->cpumask))
3044 return NOTIFY_OK;
3045
3046 buffer->buffers[cpu] =
3047 rb_allocate_cpu_buffer(buffer, cpu);
3048 if (!buffer->buffers[cpu]) {
3049 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3050 cpu);
3051 return NOTIFY_OK;
3052 }
3053 smp_wmb();
3054 cpu_set(cpu, *buffer->cpumask);
3055 break;
3056 case CPU_DOWN_PREPARE:
3057 case CPU_DOWN_PREPARE_FROZEN:
3058 /*
3059 * Do nothing.
3060 * If we were to free the buffer, then the user would
3061 * lose any trace that was in the buffer.
3062 */
3063 break;
3064 default:
3065 break;
3066 }
3067 return NOTIFY_OK;
3068}
3069#endif