]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/ring_buffer.c
Merge branch 'linus' into tracing/urgent
[net-next-2.6.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
a3583244
SR
19#include "trace.h"
20
21/* Global flag to disable all recording to ring buffers */
22static int ring_buffers_off __read_mostly;
23
24/**
25 * tracing_on - enable all tracing buffers
26 *
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
29 */
30void tracing_on(void)
31{
32 ring_buffers_off = 0;
33}
34
35/**
36 * tracing_off - turn off all tracing buffers
37 *
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
42 */
43void tracing_off(void)
44{
45 ring_buffers_off = 1;
46}
47
7a8e76a3
SR
48/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0
50
51/* FIXME!!! */
52u64 ring_buffer_time_stamp(int cpu)
53{
47e74f2b
SR
54 u64 time;
55
56 preempt_disable_notrace();
7a8e76a3 57 /* shift to debug/test normalization and TIME_EXTENTS */
47e74f2b
SR
58 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace();
60
61 return time;
7a8e76a3
SR
62}
63
64void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
65{
66 /* Just stupid testing the normalize function and deltas */
67 *ts >>= DEBUG_SHIFT;
68}
69
70#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71#define RB_ALIGNMENT_SHIFT 2
72#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
73#define RB_MAX_SMALL_DATA 28
74
75enum {
76 RB_LEN_TIME_EXTEND = 8,
77 RB_LEN_TIME_STAMP = 16,
78};
79
80/* inline for ring buffer fast paths */
81static inline unsigned
82rb_event_length(struct ring_buffer_event *event)
83{
84 unsigned length;
85
86 switch (event->type) {
87 case RINGBUF_TYPE_PADDING:
88 /* undefined */
89 return -1;
90
91 case RINGBUF_TYPE_TIME_EXTEND:
92 return RB_LEN_TIME_EXTEND;
93
94 case RINGBUF_TYPE_TIME_STAMP:
95 return RB_LEN_TIME_STAMP;
96
97 case RINGBUF_TYPE_DATA:
98 if (event->len)
99 length = event->len << RB_ALIGNMENT_SHIFT;
100 else
101 length = event->array[0];
102 return length + RB_EVNT_HDR_SIZE;
103 default:
104 BUG();
105 }
106 /* not hit */
107 return 0;
108}
109
110/**
111 * ring_buffer_event_length - return the length of the event
112 * @event: the event to get the length of
113 */
114unsigned ring_buffer_event_length(struct ring_buffer_event *event)
115{
116 return rb_event_length(event);
117}
118
119/* inline for ring buffer fast paths */
120static inline void *
121rb_event_data(struct ring_buffer_event *event)
122{
123 BUG_ON(event->type != RINGBUF_TYPE_DATA);
124 /* If length is in len field, then array[0] has the data */
125 if (event->len)
126 return (void *)&event->array[0];
127 /* Otherwise length is in array[0] and array[1] has the data */
128 return (void *)&event->array[1];
129}
130
131/**
132 * ring_buffer_event_data - return the data of the event
133 * @event: the event to get the data from
134 */
135void *ring_buffer_event_data(struct ring_buffer_event *event)
136{
137 return rb_event_data(event);
138}
139
140#define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask)
142
143#define TS_SHIFT 27
144#define TS_MASK ((1ULL << TS_SHIFT) - 1)
145#define TS_DELTA_TEST (~TS_MASK)
146
147/*
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
151 */
152struct buffer_page {
e4c2ce82 153 u64 time_stamp; /* page time stamp */
bf41a158
SR
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */
6f807acd 156 unsigned read; /* index for next read */
e4c2ce82
SR
157 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */
7a8e76a3
SR
159};
160
ed56829c
SR
161/*
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
163 * this issue out.
164 */
165static inline void free_buffer_page(struct buffer_page *bpage)
166{
e4c2ce82 167 if (bpage->page)
6ae2a076 168 free_page((unsigned long)bpage->page);
e4c2ce82 169 kfree(bpage);
ed56829c
SR
170}
171
7a8e76a3
SR
172/*
173 * We need to fit the time_stamp delta into 27 bits.
174 */
175static inline int test_time_stamp(u64 delta)
176{
177 if (delta & TS_DELTA_TEST)
178 return 1;
179 return 0;
180}
181
182#define BUF_PAGE_SIZE PAGE_SIZE
183
184/*
185 * head_page == tail_page && head == tail then buffer is empty.
186 */
187struct ring_buffer_per_cpu {
188 int cpu;
189 struct ring_buffer *buffer;
190 spinlock_t lock;
191 struct lock_class_key lock_key;
192 struct list_head pages;
6f807acd
SR
193 struct buffer_page *head_page; /* read from head */
194 struct buffer_page *tail_page; /* write to tail */
bf41a158 195 struct buffer_page *commit_page; /* commited pages */
d769041f 196 struct buffer_page *reader_page;
7a8e76a3
SR
197 unsigned long overrun;
198 unsigned long entries;
199 u64 write_stamp;
200 u64 read_stamp;
201 atomic_t record_disabled;
202};
203
204struct ring_buffer {
205 unsigned long size;
206 unsigned pages;
207 unsigned flags;
208 int cpus;
209 cpumask_t cpumask;
210 atomic_t record_disabled;
211
212 struct mutex mutex;
213
214 struct ring_buffer_per_cpu **buffers;
215};
216
217struct ring_buffer_iter {
218 struct ring_buffer_per_cpu *cpu_buffer;
219 unsigned long head;
220 struct buffer_page *head_page;
221 u64 read_stamp;
222};
223
bf41a158
SR
224#define RB_WARN_ON(buffer, cond) \
225 do { \
226 if (unlikely(cond)) { \
227 atomic_inc(&buffer->record_disabled); \
228 WARN_ON(1); \
229 } \
230 } while (0)
231
232#define RB_WARN_ON_RET(buffer, cond) \
233 do { \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
236 WARN_ON(1); \
237 return -1; \
238 } \
239 } while (0)
240
241#define RB_WARN_ON_ONCE(buffer, cond) \
242 do { \
243 static int once; \
244 if (unlikely(cond) && !once) { \
245 once++; \
246 atomic_inc(&buffer->record_disabled); \
247 WARN_ON(1); \
248 } \
249 } while (0)
7a8e76a3
SR
250
251/**
252 * check_pages - integrity check of buffer pages
253 * @cpu_buffer: CPU buffer with pages to test
254 *
255 * As a safty measure we check to make sure the data pages have not
256 * been corrupted.
257 */
258static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
259{
260 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp;
262
bf41a158
SR
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
7a8e76a3
SR
265
266 list_for_each_entry_safe(page, tmp, head, list) {
bf41a158
SR
267 RB_WARN_ON_RET(cpu_buffer,
268 page->list.next->prev != &page->list);
269 RB_WARN_ON_RET(cpu_buffer,
270 page->list.prev->next != &page->list);
7a8e76a3
SR
271 }
272
273 return 0;
274}
275
7a8e76a3
SR
276static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
277 unsigned nr_pages)
278{
279 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp;
281 unsigned long addr;
282 LIST_HEAD(pages);
283 unsigned i;
284
285 for (i = 0; i < nr_pages; i++) {
e4c2ce82 286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
aa1e0e3b 287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
e4c2ce82
SR
288 if (!page)
289 goto free_pages;
290 list_add(&page->list, &pages);
291
7a8e76a3
SR
292 addr = __get_free_page(GFP_KERNEL);
293 if (!addr)
294 goto free_pages;
e4c2ce82 295 page->page = (void *)addr;
7a8e76a3
SR
296 }
297
298 list_splice(&pages, head);
299
300 rb_check_pages(cpu_buffer);
301
302 return 0;
303
304 free_pages:
305 list_for_each_entry_safe(page, tmp, &pages, list) {
306 list_del_init(&page->list);
ed56829c 307 free_buffer_page(page);
7a8e76a3
SR
308 }
309 return -ENOMEM;
310}
311
312static struct ring_buffer_per_cpu *
313rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
314{
315 struct ring_buffer_per_cpu *cpu_buffer;
e4c2ce82 316 struct buffer_page *page;
d769041f 317 unsigned long addr;
7a8e76a3
SR
318 int ret;
319
320 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
321 GFP_KERNEL, cpu_to_node(cpu));
322 if (!cpu_buffer)
323 return NULL;
324
325 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock);
328 INIT_LIST_HEAD(&cpu_buffer->pages);
329
e4c2ce82
SR
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu));
332 if (!page)
333 goto fail_free_buffer;
334
335 cpu_buffer->reader_page = page;
d769041f
SR
336 addr = __get_free_page(GFP_KERNEL);
337 if (!addr)
e4c2ce82
SR
338 goto fail_free_reader;
339 page->page = (void *)addr;
340
d769041f 341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 342
7a8e76a3
SR
343 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
344 if (ret < 0)
d769041f 345 goto fail_free_reader;
7a8e76a3
SR
346
347 cpu_buffer->head_page
348 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 349 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
350
351 return cpu_buffer;
352
d769041f
SR
353 fail_free_reader:
354 free_buffer_page(cpu_buffer->reader_page);
355
7a8e76a3
SR
356 fail_free_buffer:
357 kfree(cpu_buffer);
358 return NULL;
359}
360
361static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
362{
363 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp;
365
d769041f
SR
366 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page);
368
7a8e76a3
SR
369 list_for_each_entry_safe(page, tmp, head, list) {
370 list_del_init(&page->list);
ed56829c 371 free_buffer_page(page);
7a8e76a3
SR
372 }
373 kfree(cpu_buffer);
374}
375
a7b13743
SR
376/*
377 * Causes compile errors if the struct buffer_page gets bigger
378 * than the struct page.
379 */
380extern int ring_buffer_page_too_big(void);
381
7a8e76a3
SR
382/**
383 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed.
385 * @flags: attributes to set for the ring buffer.
386 *
387 * Currently the only flag that is available is the RB_FL_OVERWRITE
388 * flag. This flag means that the buffer will overwrite old data
389 * when the buffer wraps. If this flag is not set, the buffer will
390 * drop data when the tail hits the head.
391 */
392struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
393{
394 struct ring_buffer *buffer;
395 int bsize;
396 int cpu;
397
a7b13743
SR
398 /* Paranoid! Optimizes out when all is well */
399 if (sizeof(struct buffer_page) > sizeof(struct page))
400 ring_buffer_page_too_big();
401
402
7a8e76a3
SR
403 /* keep it in its own cache line */
404 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
405 GFP_KERNEL);
406 if (!buffer)
407 return NULL;
408
409 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
410 buffer->flags = flags;
411
412 /* need at least two pages */
413 if (buffer->pages == 1)
414 buffer->pages++;
415
416 buffer->cpumask = cpu_possible_map;
417 buffer->cpus = nr_cpu_ids;
418
419 bsize = sizeof(void *) * nr_cpu_ids;
420 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
421 GFP_KERNEL);
422 if (!buffer->buffers)
423 goto fail_free_buffer;
424
425 for_each_buffer_cpu(buffer, cpu) {
426 buffer->buffers[cpu] =
427 rb_allocate_cpu_buffer(buffer, cpu);
428 if (!buffer->buffers[cpu])
429 goto fail_free_buffers;
430 }
431
432 mutex_init(&buffer->mutex);
433
434 return buffer;
435
436 fail_free_buffers:
437 for_each_buffer_cpu(buffer, cpu) {
438 if (buffer->buffers[cpu])
439 rb_free_cpu_buffer(buffer->buffers[cpu]);
440 }
441 kfree(buffer->buffers);
442
443 fail_free_buffer:
444 kfree(buffer);
445 return NULL;
446}
447
448/**
449 * ring_buffer_free - free a ring buffer.
450 * @buffer: the buffer to free.
451 */
452void
453ring_buffer_free(struct ring_buffer *buffer)
454{
455 int cpu;
456
457 for_each_buffer_cpu(buffer, cpu)
458 rb_free_cpu_buffer(buffer->buffers[cpu]);
459
460 kfree(buffer);
461}
462
463static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
464
465static void
466rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
467{
468 struct buffer_page *page;
469 struct list_head *p;
470 unsigned i;
471
472 atomic_inc(&cpu_buffer->record_disabled);
473 synchronize_sched();
474
475 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages));
477 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list);
ed56829c 480 free_buffer_page(page);
7a8e76a3
SR
481 }
482 BUG_ON(list_empty(&cpu_buffer->pages));
483
484 rb_reset_cpu(cpu_buffer);
485
486 rb_check_pages(cpu_buffer);
487
488 atomic_dec(&cpu_buffer->record_disabled);
489
490}
491
492static void
493rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages)
495{
496 struct buffer_page *page;
497 struct list_head *p;
498 unsigned i;
499
500 atomic_inc(&cpu_buffer->record_disabled);
501 synchronize_sched();
502
503 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages));
505 p = pages->next;
506 page = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list);
508 list_add_tail(&page->list, &cpu_buffer->pages);
509 }
510 rb_reset_cpu(cpu_buffer);
511
512 rb_check_pages(cpu_buffer);
513
514 atomic_dec(&cpu_buffer->record_disabled);
515}
516
517/**
518 * ring_buffer_resize - resize the ring buffer
519 * @buffer: the buffer to resize.
520 * @size: the new size.
521 *
522 * The tracer is responsible for making sure that the buffer is
523 * not being used while changing the size.
524 * Note: We may be able to change the above requirement by using
525 * RCU synchronizations.
526 *
527 * Minimum size is 2 * BUF_PAGE_SIZE.
528 *
529 * Returns -1 on failure.
530 */
531int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
532{
533 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp;
536 unsigned long buffer_size;
537 unsigned long addr;
538 LIST_HEAD(pages);
539 int i, cpu;
540
ee51a1de
IM
541 /*
542 * Always succeed at resizing a non-existent buffer:
543 */
544 if (!buffer)
545 return size;
546
7a8e76a3
SR
547 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
548 size *= BUF_PAGE_SIZE;
549 buffer_size = buffer->pages * BUF_PAGE_SIZE;
550
551 /* we need a minimum of two pages */
552 if (size < BUF_PAGE_SIZE * 2)
553 size = BUF_PAGE_SIZE * 2;
554
555 if (size == buffer_size)
556 return size;
557
558 mutex_lock(&buffer->mutex);
559
560 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
561
562 if (size < buffer_size) {
563
564 /* easy case, just free pages */
565 BUG_ON(nr_pages >= buffer->pages);
566
567 rm_pages = buffer->pages - nr_pages;
568
569 for_each_buffer_cpu(buffer, cpu) {
570 cpu_buffer = buffer->buffers[cpu];
571 rb_remove_pages(cpu_buffer, rm_pages);
572 }
573 goto out;
574 }
575
576 /*
577 * This is a bit more difficult. We only want to add pages
578 * when we can allocate enough for all CPUs. We do this
579 * by allocating all the pages and storing them on a local
580 * link list. If we succeed in our allocation, then we
581 * add these pages to the cpu_buffers. Otherwise we just free
582 * them all and return -ENOMEM;
583 */
584 BUG_ON(nr_pages <= buffer->pages);
585 new_pages = nr_pages - buffer->pages;
586
587 for_each_buffer_cpu(buffer, cpu) {
588 for (i = 0; i < new_pages; i++) {
e4c2ce82
SR
589 page = kzalloc_node(ALIGN(sizeof(*page),
590 cache_line_size()),
591 GFP_KERNEL, cpu_to_node(cpu));
592 if (!page)
593 goto free_pages;
594 list_add(&page->list, &pages);
7a8e76a3
SR
595 addr = __get_free_page(GFP_KERNEL);
596 if (!addr)
597 goto free_pages;
e4c2ce82 598 page->page = (void *)addr;
7a8e76a3
SR
599 }
600 }
601
602 for_each_buffer_cpu(buffer, cpu) {
603 cpu_buffer = buffer->buffers[cpu];
604 rb_insert_pages(cpu_buffer, &pages, new_pages);
605 }
606
607 BUG_ON(!list_empty(&pages));
608
609 out:
610 buffer->pages = nr_pages;
611 mutex_unlock(&buffer->mutex);
612
613 return size;
614
615 free_pages:
616 list_for_each_entry_safe(page, tmp, &pages, list) {
617 list_del_init(&page->list);
ed56829c 618 free_buffer_page(page);
7a8e76a3
SR
619 }
620 return -ENOMEM;
621}
622
7a8e76a3
SR
623static inline int rb_null_event(struct ring_buffer_event *event)
624{
625 return event->type == RINGBUF_TYPE_PADDING;
626}
627
6f807acd 628static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
7a8e76a3 629{
e4c2ce82 630 return page->page + index;
7a8e76a3
SR
631}
632
633static inline struct ring_buffer_event *
d769041f 634rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 635{
6f807acd
SR
636 return __rb_page_index(cpu_buffer->reader_page,
637 cpu_buffer->reader_page->read);
638}
639
640static inline struct ring_buffer_event *
641rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
642{
643 return __rb_page_index(cpu_buffer->head_page,
644 cpu_buffer->head_page->read);
7a8e76a3
SR
645}
646
647static inline struct ring_buffer_event *
648rb_iter_head_event(struct ring_buffer_iter *iter)
649{
6f807acd 650 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
651}
652
bf41a158
SR
653static inline unsigned rb_page_write(struct buffer_page *bpage)
654{
655 return local_read(&bpage->write);
656}
657
658static inline unsigned rb_page_commit(struct buffer_page *bpage)
659{
660 return local_read(&bpage->commit);
661}
662
663/* Size is determined by what has been commited */
664static inline unsigned rb_page_size(struct buffer_page *bpage)
665{
666 return rb_page_commit(bpage);
667}
668
669static inline unsigned
670rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
671{
672 return rb_page_commit(cpu_buffer->commit_page);
673}
674
675static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
676{
677 return rb_page_commit(cpu_buffer->head_page);
678}
679
7a8e76a3
SR
680/*
681 * When the tail hits the head and the buffer is in overwrite mode,
682 * the head jumps to the next page and all content on the previous
683 * page is discarded. But before doing so, we update the overrun
684 * variable of the buffer.
685 */
686static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
687{
688 struct ring_buffer_event *event;
689 unsigned long head;
690
691 for (head = 0; head < rb_head_size(cpu_buffer);
692 head += rb_event_length(event)) {
693
6f807acd 694 event = __rb_page_index(cpu_buffer->head_page, head);
7a8e76a3
SR
695 BUG_ON(rb_null_event(event));
696 /* Only count data entries */
697 if (event->type != RINGBUF_TYPE_DATA)
698 continue;
699 cpu_buffer->overrun++;
700 cpu_buffer->entries--;
701 }
702}
703
704static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
705 struct buffer_page **page)
706{
707 struct list_head *p = (*page)->list.next;
708
709 if (p == &cpu_buffer->pages)
710 p = p->next;
711
712 *page = list_entry(p, struct buffer_page, list);
713}
714
bf41a158
SR
715static inline unsigned
716rb_event_index(struct ring_buffer_event *event)
717{
718 unsigned long addr = (unsigned long)event;
719
720 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
721}
722
723static inline int
724rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
725 struct ring_buffer_event *event)
726{
727 unsigned long addr = (unsigned long)event;
728 unsigned long index;
729
730 index = rb_event_index(event);
731 addr &= PAGE_MASK;
732
733 return cpu_buffer->commit_page->page == (void *)addr &&
734 rb_commit_index(cpu_buffer) == index;
735}
736
7a8e76a3 737static inline void
bf41a158
SR
738rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
739 struct ring_buffer_event *event)
7a8e76a3 740{
bf41a158
SR
741 unsigned long addr = (unsigned long)event;
742 unsigned long index;
743
744 index = rb_event_index(event);
745 addr &= PAGE_MASK;
746
747 while (cpu_buffer->commit_page->page != (void *)addr) {
748 RB_WARN_ON(cpu_buffer,
749 cpu_buffer->commit_page == cpu_buffer->tail_page);
750 cpu_buffer->commit_page->commit =
751 cpu_buffer->commit_page->write;
752 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
753 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
754 }
755
756 /* Now set the commit to the event's index */
757 local_set(&cpu_buffer->commit_page->commit, index);
7a8e76a3
SR
758}
759
bf41a158
SR
760static inline void
761rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 762{
bf41a158
SR
763 /*
764 * We only race with interrupts and NMIs on this CPU.
765 * If we own the commit event, then we can commit
766 * all others that interrupted us, since the interruptions
767 * are in stack format (they finish before they come
768 * back to us). This allows us to do a simple loop to
769 * assign the commit to the tail.
770 */
771 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
772 cpu_buffer->commit_page->commit =
773 cpu_buffer->commit_page->write;
774 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
775 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
776 /* add barrier to keep gcc from optimizing too much */
777 barrier();
778 }
779 while (rb_commit_index(cpu_buffer) !=
780 rb_page_write(cpu_buffer->commit_page)) {
781 cpu_buffer->commit_page->commit =
782 cpu_buffer->commit_page->write;
783 barrier();
784 }
7a8e76a3
SR
785}
786
d769041f 787static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 788{
d769041f 789 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
6f807acd 790 cpu_buffer->reader_page->read = 0;
d769041f
SR
791}
792
793static inline void rb_inc_iter(struct ring_buffer_iter *iter)
794{
795 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
796
797 /*
798 * The iterator could be on the reader page (it starts there).
799 * But the head could have moved, since the reader was
800 * found. Check for this case and assign the iterator
801 * to the head page instead of next.
802 */
803 if (iter->head_page == cpu_buffer->reader_page)
804 iter->head_page = cpu_buffer->head_page;
805 else
806 rb_inc_page(cpu_buffer, &iter->head_page);
807
7a8e76a3
SR
808 iter->read_stamp = iter->head_page->time_stamp;
809 iter->head = 0;
810}
811
812/**
813 * ring_buffer_update_event - update event type and data
814 * @event: the even to update
815 * @type: the type of event
816 * @length: the size of the event field in the ring buffer
817 *
818 * Update the type and data fields of the event. The length
819 * is the actual size that is written to the ring buffer,
820 * and with this, we can determine what to place into the
821 * data field.
822 */
823static inline void
824rb_update_event(struct ring_buffer_event *event,
825 unsigned type, unsigned length)
826{
827 event->type = type;
828
829 switch (type) {
830
831 case RINGBUF_TYPE_PADDING:
832 break;
833
834 case RINGBUF_TYPE_TIME_EXTEND:
835 event->len =
836 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
837 >> RB_ALIGNMENT_SHIFT;
838 break;
839
840 case RINGBUF_TYPE_TIME_STAMP:
841 event->len =
842 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
843 >> RB_ALIGNMENT_SHIFT;
844 break;
845
846 case RINGBUF_TYPE_DATA:
847 length -= RB_EVNT_HDR_SIZE;
848 if (length > RB_MAX_SMALL_DATA) {
849 event->len = 0;
850 event->array[0] = length;
851 } else
852 event->len =
853 (length + (RB_ALIGNMENT-1))
854 >> RB_ALIGNMENT_SHIFT;
855 break;
856 default:
857 BUG();
858 }
859}
860
861static inline unsigned rb_calculate_event_length(unsigned length)
862{
863 struct ring_buffer_event event; /* Used only for sizeof array */
864
865 /* zero length can cause confusions */
866 if (!length)
867 length = 1;
868
869 if (length > RB_MAX_SMALL_DATA)
870 length += sizeof(event.array[0]);
871
872 length += RB_EVNT_HDR_SIZE;
873 length = ALIGN(length, RB_ALIGNMENT);
874
875 return length;
876}
877
878static struct ring_buffer_event *
879__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
880 unsigned type, unsigned long length, u64 *ts)
881{
d769041f 882 struct buffer_page *tail_page, *head_page, *reader_page;
bf41a158 883 unsigned long tail, write;
7a8e76a3
SR
884 struct ring_buffer *buffer = cpu_buffer->buffer;
885 struct ring_buffer_event *event;
bf41a158 886 unsigned long flags;
7a8e76a3
SR
887
888 tail_page = cpu_buffer->tail_page;
bf41a158
SR
889 write = local_add_return(length, &tail_page->write);
890 tail = write - length;
7a8e76a3 891
bf41a158
SR
892 /* See if we shot pass the end of this buffer page */
893 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
894 struct buffer_page *next_page = tail_page;
895
bf41a158
SR
896 spin_lock_irqsave(&cpu_buffer->lock, flags);
897
7a8e76a3
SR
898 rb_inc_page(cpu_buffer, &next_page);
899
d769041f
SR
900 head_page = cpu_buffer->head_page;
901 reader_page = cpu_buffer->reader_page;
902
903 /* we grabbed the lock before incrementing */
bf41a158
SR
904 RB_WARN_ON(cpu_buffer, next_page == reader_page);
905
906 /*
907 * If for some reason, we had an interrupt storm that made
908 * it all the way around the buffer, bail, and warn
909 * about it.
910 */
911 if (unlikely(next_page == cpu_buffer->commit_page)) {
912 WARN_ON_ONCE(1);
913 goto out_unlock;
914 }
d769041f 915
7a8e76a3 916 if (next_page == head_page) {
d769041f 917 if (!(buffer->flags & RB_FL_OVERWRITE)) {
bf41a158
SR
918 /* reset write */
919 if (tail <= BUF_PAGE_SIZE)
920 local_set(&tail_page->write, tail);
921 goto out_unlock;
d769041f 922 }
7a8e76a3 923
bf41a158
SR
924 /* tail_page has not moved yet? */
925 if (tail_page == cpu_buffer->tail_page) {
926 /* count overflows */
927 rb_update_overflow(cpu_buffer);
928
929 rb_inc_page(cpu_buffer, &head_page);
930 cpu_buffer->head_page = head_page;
931 cpu_buffer->head_page->read = 0;
932 }
933 }
7a8e76a3 934
bf41a158
SR
935 /*
936 * If the tail page is still the same as what we think
937 * it is, then it is up to us to update the tail
938 * pointer.
939 */
940 if (tail_page == cpu_buffer->tail_page) {
941 local_set(&next_page->write, 0);
942 local_set(&next_page->commit, 0);
943 cpu_buffer->tail_page = next_page;
944
945 /* reread the time stamp */
946 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
947 cpu_buffer->tail_page->time_stamp = *ts;
7a8e76a3
SR
948 }
949
bf41a158
SR
950 /*
951 * The actual tail page has moved forward.
952 */
953 if (tail < BUF_PAGE_SIZE) {
954 /* Mark the rest of the page with padding */
6f807acd 955 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
956 event->type = RINGBUF_TYPE_PADDING;
957 }
958
bf41a158
SR
959 if (tail <= BUF_PAGE_SIZE)
960 /* Set the write back to the previous setting */
961 local_set(&tail_page->write, tail);
962
963 /*
964 * If this was a commit entry that failed,
965 * increment that too
966 */
967 if (tail_page == cpu_buffer->commit_page &&
968 tail == rb_commit_index(cpu_buffer)) {
969 rb_set_commit_to_write(cpu_buffer);
970 }
971
972 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
973
974 /* fail and let the caller try again */
975 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
976 }
977
bf41a158
SR
978 /* We reserved something on the buffer */
979
980 BUG_ON(write > BUF_PAGE_SIZE);
7a8e76a3 981
6f807acd 982 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
983 rb_update_event(event, type, length);
984
bf41a158
SR
985 /*
986 * If this is a commit and the tail is zero, then update
987 * this page's time stamp.
988 */
989 if (!tail && rb_is_commit(cpu_buffer, event))
990 cpu_buffer->commit_page->time_stamp = *ts;
991
7a8e76a3 992 return event;
bf41a158
SR
993
994 out_unlock:
995 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
996 return NULL;
7a8e76a3
SR
997}
998
999static int
1000rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1001 u64 *ts, u64 *delta)
1002{
1003 struct ring_buffer_event *event;
1004 static int once;
bf41a158 1005 int ret;
7a8e76a3
SR
1006
1007 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1008 printk(KERN_WARNING "Delta way too big! %llu"
1009 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1010 (unsigned long long)*delta,
1011 (unsigned long long)*ts,
1012 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1013 WARN_ON(1);
1014 }
1015
1016 /*
1017 * The delta is too big, we to add a
1018 * new timestamp.
1019 */
1020 event = __rb_reserve_next(cpu_buffer,
1021 RINGBUF_TYPE_TIME_EXTEND,
1022 RB_LEN_TIME_EXTEND,
1023 ts);
1024 if (!event)
bf41a158 1025 return -EBUSY;
7a8e76a3 1026
bf41a158
SR
1027 if (PTR_ERR(event) == -EAGAIN)
1028 return -EAGAIN;
1029
1030 /* Only a commited time event can update the write stamp */
1031 if (rb_is_commit(cpu_buffer, event)) {
1032 /*
1033 * If this is the first on the page, then we need to
1034 * update the page itself, and just put in a zero.
1035 */
1036 if (rb_event_index(event)) {
1037 event->time_delta = *delta & TS_MASK;
1038 event->array[0] = *delta >> TS_SHIFT;
1039 } else {
1040 cpu_buffer->commit_page->time_stamp = *ts;
1041 event->time_delta = 0;
1042 event->array[0] = 0;
1043 }
7a8e76a3 1044 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1045 /* let the caller know this was the commit */
1046 ret = 1;
1047 } else {
1048 /* Darn, this is just wasted space */
1049 event->time_delta = 0;
1050 event->array[0] = 0;
1051 ret = 0;
7a8e76a3
SR
1052 }
1053
bf41a158
SR
1054 *delta = 0;
1055
1056 return ret;
7a8e76a3
SR
1057}
1058
1059static struct ring_buffer_event *
1060rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1061 unsigned type, unsigned long length)
1062{
1063 struct ring_buffer_event *event;
1064 u64 ts, delta;
bf41a158 1065 int commit = 0;
818e3dd3 1066 int nr_loops = 0;
7a8e76a3 1067
bf41a158 1068 again:
818e3dd3
SR
1069 /*
1070 * We allow for interrupts to reenter here and do a trace.
1071 * If one does, it will cause this original code to loop
1072 * back here. Even with heavy interrupts happening, this
1073 * should only happen a few times in a row. If this happens
1074 * 1000 times in a row, there must be either an interrupt
1075 * storm or we have something buggy.
1076 * Bail!
1077 */
1078 if (unlikely(++nr_loops > 1000)) {
1079 RB_WARN_ON(cpu_buffer, 1);
1080 return NULL;
1081 }
1082
7a8e76a3
SR
1083 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1084
bf41a158
SR
1085 /*
1086 * Only the first commit can update the timestamp.
1087 * Yes there is a race here. If an interrupt comes in
1088 * just after the conditional and it traces too, then it
1089 * will also check the deltas. More than one timestamp may
1090 * also be made. But only the entry that did the actual
1091 * commit will be something other than zero.
1092 */
1093 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1094 rb_page_write(cpu_buffer->tail_page) ==
1095 rb_commit_index(cpu_buffer)) {
1096
7a8e76a3
SR
1097 delta = ts - cpu_buffer->write_stamp;
1098
bf41a158
SR
1099 /* make sure this delta is calculated here */
1100 barrier();
1101
1102 /* Did the write stamp get updated already? */
1103 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1104 delta = 0;
bf41a158 1105
7a8e76a3 1106 if (test_time_stamp(delta)) {
7a8e76a3 1107
bf41a158
SR
1108 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1109
1110 if (commit == -EBUSY)
7a8e76a3 1111 return NULL;
bf41a158
SR
1112
1113 if (commit == -EAGAIN)
1114 goto again;
1115
1116 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1117 }
bf41a158
SR
1118 } else
1119 /* Non commits have zero deltas */
7a8e76a3 1120 delta = 0;
7a8e76a3
SR
1121
1122 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1123 if (PTR_ERR(event) == -EAGAIN)
1124 goto again;
1125
1126 if (!event) {
1127 if (unlikely(commit))
1128 /*
1129 * Ouch! We needed a timestamp and it was commited. But
1130 * we didn't get our event reserved.
1131 */
1132 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1133 return NULL;
bf41a158 1134 }
7a8e76a3 1135
bf41a158
SR
1136 /*
1137 * If the timestamp was commited, make the commit our entry
1138 * now so that we will update it when needed.
1139 */
1140 if (commit)
1141 rb_set_commit_event(cpu_buffer, event);
1142 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1143 delta = 0;
1144
1145 event->time_delta = delta;
1146
1147 return event;
1148}
1149
bf41a158
SR
1150static DEFINE_PER_CPU(int, rb_need_resched);
1151
7a8e76a3
SR
1152/**
1153 * ring_buffer_lock_reserve - reserve a part of the buffer
1154 * @buffer: the ring buffer to reserve from
1155 * @length: the length of the data to reserve (excluding event header)
1156 * @flags: a pointer to save the interrupt flags
1157 *
1158 * Returns a reseverd event on the ring buffer to copy directly to.
1159 * The user of this interface will need to get the body to write into
1160 * and can use the ring_buffer_event_data() interface.
1161 *
1162 * The length is the length of the data needed, not the event length
1163 * which also includes the event header.
1164 *
1165 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1166 * If NULL is returned, then nothing has been allocated or locked.
1167 */
1168struct ring_buffer_event *
1169ring_buffer_lock_reserve(struct ring_buffer *buffer,
1170 unsigned long length,
1171 unsigned long *flags)
1172{
1173 struct ring_buffer_per_cpu *cpu_buffer;
1174 struct ring_buffer_event *event;
bf41a158 1175 int cpu, resched;
7a8e76a3 1176
a3583244
SR
1177 if (ring_buffers_off)
1178 return NULL;
1179
7a8e76a3
SR
1180 if (atomic_read(&buffer->record_disabled))
1181 return NULL;
1182
bf41a158
SR
1183 /* If we are tracing schedule, we don't want to recurse */
1184 resched = need_resched();
1185 preempt_disable_notrace();
1186
7a8e76a3
SR
1187 cpu = raw_smp_processor_id();
1188
1189 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1190 goto out;
7a8e76a3
SR
1191
1192 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1193
1194 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1195 goto out;
7a8e76a3
SR
1196
1197 length = rb_calculate_event_length(length);
1198 if (length > BUF_PAGE_SIZE)
bf41a158 1199 goto out;
7a8e76a3
SR
1200
1201 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1202 if (!event)
d769041f 1203 goto out;
7a8e76a3 1204
bf41a158
SR
1205 /*
1206 * Need to store resched state on this cpu.
1207 * Only the first needs to.
1208 */
1209
1210 if (preempt_count() == 1)
1211 per_cpu(rb_need_resched, cpu) = resched;
1212
7a8e76a3
SR
1213 return event;
1214
d769041f 1215 out:
bf41a158
SR
1216 if (resched)
1217 preempt_enable_notrace();
1218 else
1219 preempt_enable_notrace();
7a8e76a3
SR
1220 return NULL;
1221}
1222
1223static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct ring_buffer_event *event)
1225{
7a8e76a3 1226 cpu_buffer->entries++;
bf41a158
SR
1227
1228 /* Only process further if we own the commit */
1229 if (!rb_is_commit(cpu_buffer, event))
1230 return;
1231
1232 cpu_buffer->write_stamp += event->time_delta;
1233
1234 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1235}
1236
1237/**
1238 * ring_buffer_unlock_commit - commit a reserved
1239 * @buffer: The buffer to commit to
1240 * @event: The event pointer to commit.
1241 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1242 *
1243 * This commits the data to the ring buffer, and releases any locks held.
1244 *
1245 * Must be paired with ring_buffer_lock_reserve.
1246 */
1247int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1248 struct ring_buffer_event *event,
1249 unsigned long flags)
1250{
1251 struct ring_buffer_per_cpu *cpu_buffer;
1252 int cpu = raw_smp_processor_id();
1253
1254 cpu_buffer = buffer->buffers[cpu];
1255
7a8e76a3
SR
1256 rb_commit(cpu_buffer, event);
1257
bf41a158
SR
1258 /*
1259 * Only the last preempt count needs to restore preemption.
1260 */
1261 if (preempt_count() == 1) {
1262 if (per_cpu(rb_need_resched, cpu))
1263 preempt_enable_no_resched_notrace();
1264 else
1265 preempt_enable_notrace();
1266 } else
1267 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1268
1269 return 0;
1270}
1271
1272/**
1273 * ring_buffer_write - write data to the buffer without reserving
1274 * @buffer: The ring buffer to write to.
1275 * @length: The length of the data being written (excluding the event header)
1276 * @data: The data to write to the buffer.
1277 *
1278 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1279 * one function. If you already have the data to write to the buffer, it
1280 * may be easier to simply call this function.
1281 *
1282 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1283 * and not the length of the event which would hold the header.
1284 */
1285int ring_buffer_write(struct ring_buffer *buffer,
1286 unsigned long length,
1287 void *data)
1288{
1289 struct ring_buffer_per_cpu *cpu_buffer;
1290 struct ring_buffer_event *event;
bf41a158 1291 unsigned long event_length;
7a8e76a3
SR
1292 void *body;
1293 int ret = -EBUSY;
bf41a158 1294 int cpu, resched;
7a8e76a3 1295
a3583244
SR
1296 if (ring_buffers_off)
1297 return -EBUSY;
1298
7a8e76a3
SR
1299 if (atomic_read(&buffer->record_disabled))
1300 return -EBUSY;
1301
bf41a158
SR
1302 resched = need_resched();
1303 preempt_disable_notrace();
1304
7a8e76a3
SR
1305 cpu = raw_smp_processor_id();
1306
1307 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1308 goto out;
7a8e76a3
SR
1309
1310 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1311
1312 if (atomic_read(&cpu_buffer->record_disabled))
1313 goto out;
1314
1315 event_length = rb_calculate_event_length(length);
1316 event = rb_reserve_next_event(cpu_buffer,
1317 RINGBUF_TYPE_DATA, event_length);
1318 if (!event)
1319 goto out;
1320
1321 body = rb_event_data(event);
1322
1323 memcpy(body, data, length);
1324
1325 rb_commit(cpu_buffer, event);
1326
1327 ret = 0;
1328 out:
bf41a158
SR
1329 if (resched)
1330 preempt_enable_no_resched_notrace();
1331 else
1332 preempt_enable_notrace();
7a8e76a3
SR
1333
1334 return ret;
1335}
1336
bf41a158
SR
1337static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1338{
1339 struct buffer_page *reader = cpu_buffer->reader_page;
1340 struct buffer_page *head = cpu_buffer->head_page;
1341 struct buffer_page *commit = cpu_buffer->commit_page;
1342
1343 return reader->read == rb_page_commit(reader) &&
1344 (commit == reader ||
1345 (commit == head &&
1346 head->read == rb_page_commit(commit)));
1347}
1348
7a8e76a3
SR
1349/**
1350 * ring_buffer_record_disable - stop all writes into the buffer
1351 * @buffer: The ring buffer to stop writes to.
1352 *
1353 * This prevents all writes to the buffer. Any attempt to write
1354 * to the buffer after this will fail and return NULL.
1355 *
1356 * The caller should call synchronize_sched() after this.
1357 */
1358void ring_buffer_record_disable(struct ring_buffer *buffer)
1359{
1360 atomic_inc(&buffer->record_disabled);
1361}
1362
1363/**
1364 * ring_buffer_record_enable - enable writes to the buffer
1365 * @buffer: The ring buffer to enable writes
1366 *
1367 * Note, multiple disables will need the same number of enables
1368 * to truely enable the writing (much like preempt_disable).
1369 */
1370void ring_buffer_record_enable(struct ring_buffer *buffer)
1371{
1372 atomic_dec(&buffer->record_disabled);
1373}
1374
1375/**
1376 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1377 * @buffer: The ring buffer to stop writes to.
1378 * @cpu: The CPU buffer to stop
1379 *
1380 * This prevents all writes to the buffer. Any attempt to write
1381 * to the buffer after this will fail and return NULL.
1382 *
1383 * The caller should call synchronize_sched() after this.
1384 */
1385void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1386{
1387 struct ring_buffer_per_cpu *cpu_buffer;
1388
1389 if (!cpu_isset(cpu, buffer->cpumask))
1390 return;
1391
1392 cpu_buffer = buffer->buffers[cpu];
1393 atomic_inc(&cpu_buffer->record_disabled);
1394}
1395
1396/**
1397 * ring_buffer_record_enable_cpu - enable writes to the buffer
1398 * @buffer: The ring buffer to enable writes
1399 * @cpu: The CPU to enable.
1400 *
1401 * Note, multiple disables will need the same number of enables
1402 * to truely enable the writing (much like preempt_disable).
1403 */
1404void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1405{
1406 struct ring_buffer_per_cpu *cpu_buffer;
1407
1408 if (!cpu_isset(cpu, buffer->cpumask))
1409 return;
1410
1411 cpu_buffer = buffer->buffers[cpu];
1412 atomic_dec(&cpu_buffer->record_disabled);
1413}
1414
1415/**
1416 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1417 * @buffer: The ring buffer
1418 * @cpu: The per CPU buffer to get the entries from.
1419 */
1420unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1421{
1422 struct ring_buffer_per_cpu *cpu_buffer;
1423
1424 if (!cpu_isset(cpu, buffer->cpumask))
1425 return 0;
1426
1427 cpu_buffer = buffer->buffers[cpu];
1428 return cpu_buffer->entries;
1429}
1430
1431/**
1432 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1433 * @buffer: The ring buffer
1434 * @cpu: The per CPU buffer to get the number of overruns from
1435 */
1436unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1437{
1438 struct ring_buffer_per_cpu *cpu_buffer;
1439
1440 if (!cpu_isset(cpu, buffer->cpumask))
1441 return 0;
1442
1443 cpu_buffer = buffer->buffers[cpu];
1444 return cpu_buffer->overrun;
1445}
1446
1447/**
1448 * ring_buffer_entries - get the number of entries in a buffer
1449 * @buffer: The ring buffer
1450 *
1451 * Returns the total number of entries in the ring buffer
1452 * (all CPU entries)
1453 */
1454unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1455{
1456 struct ring_buffer_per_cpu *cpu_buffer;
1457 unsigned long entries = 0;
1458 int cpu;
1459
1460 /* if you care about this being correct, lock the buffer */
1461 for_each_buffer_cpu(buffer, cpu) {
1462 cpu_buffer = buffer->buffers[cpu];
1463 entries += cpu_buffer->entries;
1464 }
1465
1466 return entries;
1467}
1468
1469/**
1470 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1471 * @buffer: The ring buffer
1472 *
1473 * Returns the total number of overruns in the ring buffer
1474 * (all CPU entries)
1475 */
1476unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1477{
1478 struct ring_buffer_per_cpu *cpu_buffer;
1479 unsigned long overruns = 0;
1480 int cpu;
1481
1482 /* if you care about this being correct, lock the buffer */
1483 for_each_buffer_cpu(buffer, cpu) {
1484 cpu_buffer = buffer->buffers[cpu];
1485 overruns += cpu_buffer->overrun;
1486 }
1487
1488 return overruns;
1489}
1490
1491/**
1492 * ring_buffer_iter_reset - reset an iterator
1493 * @iter: The iterator to reset
1494 *
1495 * Resets the iterator, so that it will start from the beginning
1496 * again.
1497 */
1498void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1499{
1500 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1501
d769041f
SR
1502 /* Iterator usage is expected to have record disabled */
1503 if (list_empty(&cpu_buffer->reader_page->list)) {
1504 iter->head_page = cpu_buffer->head_page;
6f807acd 1505 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1506 } else {
1507 iter->head_page = cpu_buffer->reader_page;
6f807acd 1508 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1509 }
1510 if (iter->head)
1511 iter->read_stamp = cpu_buffer->read_stamp;
1512 else
1513 iter->read_stamp = iter->head_page->time_stamp;
7a8e76a3
SR
1514}
1515
1516/**
1517 * ring_buffer_iter_empty - check if an iterator has no more to read
1518 * @iter: The iterator to check
1519 */
1520int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1521{
1522 struct ring_buffer_per_cpu *cpu_buffer;
1523
1524 cpu_buffer = iter->cpu_buffer;
1525
bf41a158
SR
1526 return iter->head_page == cpu_buffer->commit_page &&
1527 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3
SR
1528}
1529
1530static void
1531rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1532 struct ring_buffer_event *event)
1533{
1534 u64 delta;
1535
1536 switch (event->type) {
1537 case RINGBUF_TYPE_PADDING:
1538 return;
1539
1540 case RINGBUF_TYPE_TIME_EXTEND:
1541 delta = event->array[0];
1542 delta <<= TS_SHIFT;
1543 delta += event->time_delta;
1544 cpu_buffer->read_stamp += delta;
1545 return;
1546
1547 case RINGBUF_TYPE_TIME_STAMP:
1548 /* FIXME: not implemented */
1549 return;
1550
1551 case RINGBUF_TYPE_DATA:
1552 cpu_buffer->read_stamp += event->time_delta;
1553 return;
1554
1555 default:
1556 BUG();
1557 }
1558 return;
1559}
1560
1561static void
1562rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1563 struct ring_buffer_event *event)
1564{
1565 u64 delta;
1566
1567 switch (event->type) {
1568 case RINGBUF_TYPE_PADDING:
1569 return;
1570
1571 case RINGBUF_TYPE_TIME_EXTEND:
1572 delta = event->array[0];
1573 delta <<= TS_SHIFT;
1574 delta += event->time_delta;
1575 iter->read_stamp += delta;
1576 return;
1577
1578 case RINGBUF_TYPE_TIME_STAMP:
1579 /* FIXME: not implemented */
1580 return;
1581
1582 case RINGBUF_TYPE_DATA:
1583 iter->read_stamp += event->time_delta;
1584 return;
1585
1586 default:
1587 BUG();
1588 }
1589 return;
1590}
1591
d769041f
SR
1592static struct buffer_page *
1593rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1594{
d769041f
SR
1595 struct buffer_page *reader = NULL;
1596 unsigned long flags;
818e3dd3 1597 int nr_loops = 0;
d769041f
SR
1598
1599 spin_lock_irqsave(&cpu_buffer->lock, flags);
1600
1601 again:
818e3dd3
SR
1602 /*
1603 * This should normally only loop twice. But because the
1604 * start of the reader inserts an empty page, it causes
1605 * a case where we will loop three times. There should be no
1606 * reason to loop four times (that I know of).
1607 */
1608 if (unlikely(++nr_loops > 3)) {
1609 RB_WARN_ON(cpu_buffer, 1);
1610 reader = NULL;
1611 goto out;
1612 }
1613
d769041f
SR
1614 reader = cpu_buffer->reader_page;
1615
1616 /* If there's more to read, return this page */
bf41a158 1617 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
1618 goto out;
1619
1620 /* Never should we have an index greater than the size */
bf41a158
SR
1621 RB_WARN_ON(cpu_buffer,
1622 cpu_buffer->reader_page->read > rb_page_size(reader));
d769041f
SR
1623
1624 /* check if we caught up to the tail */
1625 reader = NULL;
bf41a158 1626 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 1627 goto out;
7a8e76a3
SR
1628
1629 /*
d769041f
SR
1630 * Splice the empty reader page into the list around the head.
1631 * Reset the reader page to size zero.
7a8e76a3 1632 */
7a8e76a3 1633
d769041f
SR
1634 reader = cpu_buffer->head_page;
1635 cpu_buffer->reader_page->list.next = reader->list.next;
1636 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
1637
1638 local_set(&cpu_buffer->reader_page->write, 0);
1639 local_set(&cpu_buffer->reader_page->commit, 0);
7a8e76a3 1640
d769041f
SR
1641 /* Make the reader page now replace the head */
1642 reader->list.prev->next = &cpu_buffer->reader_page->list;
1643 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
1644
1645 /*
d769041f
SR
1646 * If the tail is on the reader, then we must set the head
1647 * to the inserted page, otherwise we set it one before.
7a8e76a3 1648 */
d769041f 1649 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 1650
bf41a158 1651 if (cpu_buffer->commit_page != reader)
d769041f
SR
1652 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1653
1654 /* Finally update the reader page to the new head */
1655 cpu_buffer->reader_page = reader;
1656 rb_reset_reader_page(cpu_buffer);
1657
1658 goto again;
1659
1660 out:
1661 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1662
1663 return reader;
1664}
1665
1666static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1667{
1668 struct ring_buffer_event *event;
1669 struct buffer_page *reader;
1670 unsigned length;
1671
1672 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 1673
d769041f
SR
1674 /* This function should not be called when buffer is empty */
1675 BUG_ON(!reader);
7a8e76a3 1676
d769041f
SR
1677 event = rb_reader_event(cpu_buffer);
1678
1679 if (event->type == RINGBUF_TYPE_DATA)
1680 cpu_buffer->entries--;
1681
1682 rb_update_read_stamp(cpu_buffer, event);
1683
1684 length = rb_event_length(event);
6f807acd 1685 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
1686}
1687
1688static void rb_advance_iter(struct ring_buffer_iter *iter)
1689{
1690 struct ring_buffer *buffer;
1691 struct ring_buffer_per_cpu *cpu_buffer;
1692 struct ring_buffer_event *event;
1693 unsigned length;
1694
1695 cpu_buffer = iter->cpu_buffer;
1696 buffer = cpu_buffer->buffer;
1697
1698 /*
1699 * Check if we are at the end of the buffer.
1700 */
bf41a158
SR
1701 if (iter->head >= rb_page_size(iter->head_page)) {
1702 BUG_ON(iter->head_page == cpu_buffer->commit_page);
d769041f 1703 rb_inc_iter(iter);
7a8e76a3
SR
1704 return;
1705 }
1706
1707 event = rb_iter_head_event(iter);
1708
1709 length = rb_event_length(event);
1710
1711 /*
1712 * This should not be called to advance the header if we are
1713 * at the tail of the buffer.
1714 */
bf41a158
SR
1715 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1716 (iter->head + length > rb_commit_index(cpu_buffer)));
7a8e76a3
SR
1717
1718 rb_update_iter_read_stamp(iter, event);
1719
1720 iter->head += length;
1721
1722 /* check for end of page padding */
bf41a158
SR
1723 if ((iter->head >= rb_page_size(iter->head_page)) &&
1724 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
1725 rb_advance_iter(iter);
1726}
1727
1728/**
1729 * ring_buffer_peek - peek at the next event to be read
1730 * @buffer: The ring buffer to read
1731 * @cpu: The cpu to peak at
1732 * @ts: The timestamp counter of this event.
1733 *
1734 * This will return the event that will be read next, but does
1735 * not consume the data.
1736 */
1737struct ring_buffer_event *
1738ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1739{
1740 struct ring_buffer_per_cpu *cpu_buffer;
1741 struct ring_buffer_event *event;
d769041f 1742 struct buffer_page *reader;
818e3dd3 1743 int nr_loops = 0;
7a8e76a3
SR
1744
1745 if (!cpu_isset(cpu, buffer->cpumask))
1746 return NULL;
1747
1748 cpu_buffer = buffer->buffers[cpu];
1749
1750 again:
818e3dd3
SR
1751 /*
1752 * We repeat when a timestamp is encountered. It is possible
1753 * to get multiple timestamps from an interrupt entering just
1754 * as one timestamp is about to be written. The max times
1755 * that this can happen is the number of nested interrupts we
1756 * can have. Nesting 10 deep of interrupts is clearly
1757 * an anomaly.
1758 */
1759 if (unlikely(++nr_loops > 10)) {
1760 RB_WARN_ON(cpu_buffer, 1);
1761 return NULL;
1762 }
1763
d769041f
SR
1764 reader = rb_get_reader_page(cpu_buffer);
1765 if (!reader)
7a8e76a3
SR
1766 return NULL;
1767
d769041f 1768 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
1769
1770 switch (event->type) {
1771 case RINGBUF_TYPE_PADDING:
bf41a158 1772 RB_WARN_ON(cpu_buffer, 1);
d769041f
SR
1773 rb_advance_reader(cpu_buffer);
1774 return NULL;
7a8e76a3
SR
1775
1776 case RINGBUF_TYPE_TIME_EXTEND:
1777 /* Internal data, OK to advance */
d769041f 1778 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1779 goto again;
1780
1781 case RINGBUF_TYPE_TIME_STAMP:
1782 /* FIXME: not implemented */
d769041f 1783 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1784 goto again;
1785
1786 case RINGBUF_TYPE_DATA:
1787 if (ts) {
1788 *ts = cpu_buffer->read_stamp + event->time_delta;
1789 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1790 }
1791 return event;
1792
1793 default:
1794 BUG();
1795 }
1796
1797 return NULL;
1798}
1799
1800/**
1801 * ring_buffer_iter_peek - peek at the next event to be read
1802 * @iter: The ring buffer iterator
1803 * @ts: The timestamp counter of this event.
1804 *
1805 * This will return the event that will be read next, but does
1806 * not increment the iterator.
1807 */
1808struct ring_buffer_event *
1809ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1810{
1811 struct ring_buffer *buffer;
1812 struct ring_buffer_per_cpu *cpu_buffer;
1813 struct ring_buffer_event *event;
818e3dd3 1814 int nr_loops = 0;
7a8e76a3
SR
1815
1816 if (ring_buffer_iter_empty(iter))
1817 return NULL;
1818
1819 cpu_buffer = iter->cpu_buffer;
1820 buffer = cpu_buffer->buffer;
1821
1822 again:
818e3dd3
SR
1823 /*
1824 * We repeat when a timestamp is encountered. It is possible
1825 * to get multiple timestamps from an interrupt entering just
1826 * as one timestamp is about to be written. The max times
1827 * that this can happen is the number of nested interrupts we
1828 * can have. Nesting 10 deep of interrupts is clearly
1829 * an anomaly.
1830 */
1831 if (unlikely(++nr_loops > 10)) {
1832 RB_WARN_ON(cpu_buffer, 1);
1833 return NULL;
1834 }
1835
7a8e76a3
SR
1836 if (rb_per_cpu_empty(cpu_buffer))
1837 return NULL;
1838
1839 event = rb_iter_head_event(iter);
1840
1841 switch (event->type) {
1842 case RINGBUF_TYPE_PADDING:
d769041f 1843 rb_inc_iter(iter);
7a8e76a3
SR
1844 goto again;
1845
1846 case RINGBUF_TYPE_TIME_EXTEND:
1847 /* Internal data, OK to advance */
1848 rb_advance_iter(iter);
1849 goto again;
1850
1851 case RINGBUF_TYPE_TIME_STAMP:
1852 /* FIXME: not implemented */
1853 rb_advance_iter(iter);
1854 goto again;
1855
1856 case RINGBUF_TYPE_DATA:
1857 if (ts) {
1858 *ts = iter->read_stamp + event->time_delta;
1859 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1860 }
1861 return event;
1862
1863 default:
1864 BUG();
1865 }
1866
1867 return NULL;
1868}
1869
1870/**
1871 * ring_buffer_consume - return an event and consume it
1872 * @buffer: The ring buffer to get the next event from
1873 *
1874 * Returns the next event in the ring buffer, and that event is consumed.
1875 * Meaning, that sequential reads will keep returning a different event,
1876 * and eventually empty the ring buffer if the producer is slower.
1877 */
1878struct ring_buffer_event *
1879ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1880{
1881 struct ring_buffer_per_cpu *cpu_buffer;
1882 struct ring_buffer_event *event;
1883
1884 if (!cpu_isset(cpu, buffer->cpumask))
1885 return NULL;
1886
1887 event = ring_buffer_peek(buffer, cpu, ts);
1888 if (!event)
1889 return NULL;
1890
1891 cpu_buffer = buffer->buffers[cpu];
d769041f 1892 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1893
1894 return event;
1895}
1896
1897/**
1898 * ring_buffer_read_start - start a non consuming read of the buffer
1899 * @buffer: The ring buffer to read from
1900 * @cpu: The cpu buffer to iterate over
1901 *
1902 * This starts up an iteration through the buffer. It also disables
1903 * the recording to the buffer until the reading is finished.
1904 * This prevents the reading from being corrupted. This is not
1905 * a consuming read, so a producer is not expected.
1906 *
1907 * Must be paired with ring_buffer_finish.
1908 */
1909struct ring_buffer_iter *
1910ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1911{
1912 struct ring_buffer_per_cpu *cpu_buffer;
1913 struct ring_buffer_iter *iter;
d769041f 1914 unsigned long flags;
7a8e76a3
SR
1915
1916 if (!cpu_isset(cpu, buffer->cpumask))
1917 return NULL;
1918
1919 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1920 if (!iter)
1921 return NULL;
1922
1923 cpu_buffer = buffer->buffers[cpu];
1924
1925 iter->cpu_buffer = cpu_buffer;
1926
1927 atomic_inc(&cpu_buffer->record_disabled);
1928 synchronize_sched();
1929
d769041f
SR
1930 spin_lock_irqsave(&cpu_buffer->lock, flags);
1931 ring_buffer_iter_reset(iter);
1932 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
7a8e76a3
SR
1933
1934 return iter;
1935}
1936
1937/**
1938 * ring_buffer_finish - finish reading the iterator of the buffer
1939 * @iter: The iterator retrieved by ring_buffer_start
1940 *
1941 * This re-enables the recording to the buffer, and frees the
1942 * iterator.
1943 */
1944void
1945ring_buffer_read_finish(struct ring_buffer_iter *iter)
1946{
1947 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1948
1949 atomic_dec(&cpu_buffer->record_disabled);
1950 kfree(iter);
1951}
1952
1953/**
1954 * ring_buffer_read - read the next item in the ring buffer by the iterator
1955 * @iter: The ring buffer iterator
1956 * @ts: The time stamp of the event read.
1957 *
1958 * This reads the next event in the ring buffer and increments the iterator.
1959 */
1960struct ring_buffer_event *
1961ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1962{
1963 struct ring_buffer_event *event;
1964
1965 event = ring_buffer_iter_peek(iter, ts);
1966 if (!event)
1967 return NULL;
1968
1969 rb_advance_iter(iter);
1970
1971 return event;
1972}
1973
1974/**
1975 * ring_buffer_size - return the size of the ring buffer (in bytes)
1976 * @buffer: The ring buffer.
1977 */
1978unsigned long ring_buffer_size(struct ring_buffer *buffer)
1979{
1980 return BUF_PAGE_SIZE * buffer->pages;
1981}
1982
1983static void
1984rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1985{
1986 cpu_buffer->head_page
1987 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158
SR
1988 local_set(&cpu_buffer->head_page->write, 0);
1989 local_set(&cpu_buffer->head_page->commit, 0);
d769041f 1990
6f807acd 1991 cpu_buffer->head_page->read = 0;
bf41a158
SR
1992
1993 cpu_buffer->tail_page = cpu_buffer->head_page;
1994 cpu_buffer->commit_page = cpu_buffer->head_page;
1995
1996 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1997 local_set(&cpu_buffer->reader_page->write, 0);
1998 local_set(&cpu_buffer->reader_page->commit, 0);
6f807acd 1999 cpu_buffer->reader_page->read = 0;
7a8e76a3 2000
7a8e76a3
SR
2001 cpu_buffer->overrun = 0;
2002 cpu_buffer->entries = 0;
2003}
2004
2005/**
2006 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2007 * @buffer: The ring buffer to reset a per cpu buffer of
2008 * @cpu: The CPU buffer to be reset
2009 */
2010void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2011{
2012 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2013 unsigned long flags;
2014
2015 if (!cpu_isset(cpu, buffer->cpumask))
2016 return;
2017
d769041f 2018 spin_lock_irqsave(&cpu_buffer->lock, flags);
7a8e76a3
SR
2019
2020 rb_reset_cpu(cpu_buffer);
2021
d769041f 2022 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
7a8e76a3
SR
2023}
2024
2025/**
2026 * ring_buffer_reset - reset a ring buffer
2027 * @buffer: The ring buffer to reset all cpu buffers
2028 */
2029void ring_buffer_reset(struct ring_buffer *buffer)
2030{
7a8e76a3
SR
2031 int cpu;
2032
7a8e76a3 2033 for_each_buffer_cpu(buffer, cpu)
d769041f 2034 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3
SR
2035}
2036
2037/**
2038 * rind_buffer_empty - is the ring buffer empty?
2039 * @buffer: The ring buffer to test
2040 */
2041int ring_buffer_empty(struct ring_buffer *buffer)
2042{
2043 struct ring_buffer_per_cpu *cpu_buffer;
2044 int cpu;
2045
2046 /* yes this is racy, but if you don't like the race, lock the buffer */
2047 for_each_buffer_cpu(buffer, cpu) {
2048 cpu_buffer = buffer->buffers[cpu];
2049 if (!rb_per_cpu_empty(cpu_buffer))
2050 return 0;
2051 }
2052 return 1;
2053}
2054
2055/**
2056 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2057 * @buffer: The ring buffer
2058 * @cpu: The CPU buffer to test
2059 */
2060int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2061{
2062 struct ring_buffer_per_cpu *cpu_buffer;
2063
2064 if (!cpu_isset(cpu, buffer->cpumask))
2065 return 1;
2066
2067 cpu_buffer = buffer->buffers[cpu];
2068 return rb_per_cpu_empty(cpu_buffer);
2069}
2070
2071/**
2072 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2073 * @buffer_a: One buffer to swap with
2074 * @buffer_b: The other buffer to swap with
2075 *
2076 * This function is useful for tracers that want to take a "snapshot"
2077 * of a CPU buffer and has another back up buffer lying around.
2078 * it is expected that the tracer handles the cpu buffer not being
2079 * used at the moment.
2080 */
2081int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2082 struct ring_buffer *buffer_b, int cpu)
2083{
2084 struct ring_buffer_per_cpu *cpu_buffer_a;
2085 struct ring_buffer_per_cpu *cpu_buffer_b;
2086
2087 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2088 !cpu_isset(cpu, buffer_b->cpumask))
2089 return -EINVAL;
2090
2091 /* At least make sure the two buffers are somewhat the same */
2092 if (buffer_a->size != buffer_b->size ||
2093 buffer_a->pages != buffer_b->pages)
2094 return -EINVAL;
2095
2096 cpu_buffer_a = buffer_a->buffers[cpu];
2097 cpu_buffer_b = buffer_b->buffers[cpu];
2098
2099 /*
2100 * We can't do a synchronize_sched here because this
2101 * function can be called in atomic context.
2102 * Normally this will be called from the same CPU as cpu.
2103 * If not it's up to the caller to protect this.
2104 */
2105 atomic_inc(&cpu_buffer_a->record_disabled);
2106 atomic_inc(&cpu_buffer_b->record_disabled);
2107
2108 buffer_a->buffers[cpu] = cpu_buffer_b;
2109 buffer_b->buffers[cpu] = cpu_buffer_a;
2110
2111 cpu_buffer_b->buffer = buffer_a;
2112 cpu_buffer_a->buffer = buffer_b;
2113
2114 atomic_dec(&cpu_buffer_a->record_disabled);
2115 atomic_dec(&cpu_buffer_b->record_disabled);
2116
2117 return 0;
2118}
2119
a3583244
SR
2120static ssize_t
2121rb_simple_read(struct file *filp, char __user *ubuf,
2122 size_t cnt, loff_t *ppos)
2123{
2124 int *p = filp->private_data;
2125 char buf[64];
2126 int r;
2127
2128 /* !ring_buffers_off == tracing_on */
2129 r = sprintf(buf, "%d\n", !*p);
2130
2131 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2132}
2133
2134static ssize_t
2135rb_simple_write(struct file *filp, const char __user *ubuf,
2136 size_t cnt, loff_t *ppos)
2137{
2138 int *p = filp->private_data;
2139 char buf[64];
2140 long val;
2141 int ret;
2142
2143 if (cnt >= sizeof(buf))
2144 return -EINVAL;
2145
2146 if (copy_from_user(&buf, ubuf, cnt))
2147 return -EFAULT;
2148
2149 buf[cnt] = 0;
2150
2151 ret = strict_strtoul(buf, 10, &val);
2152 if (ret < 0)
2153 return ret;
2154
2155 /* !ring_buffers_off == tracing_on */
2156 *p = !val;
2157
2158 (*ppos)++;
2159
2160 return cnt;
2161}
2162
2163static struct file_operations rb_simple_fops = {
2164 .open = tracing_open_generic,
2165 .read = rb_simple_read,
2166 .write = rb_simple_write,
2167};
2168
2169
2170static __init int rb_init_debugfs(void)
2171{
2172 struct dentry *d_tracer;
2173 struct dentry *entry;
2174
2175 d_tracer = tracing_init_dentry();
2176
2177 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2178 &ring_buffers_off, &rb_simple_fops);
2179 if (!entry)
2180 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2181
2182 return 0;
2183}
2184
2185fs_initcall(rb_init_debugfs);