]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/ring_buffer.c
xps: Transmit Packet Steering
[net-next-2.6.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
14131f2f 7#include <linux/trace_clock.h>
78d904b4 8#include <linux/ftrace_irq.h>
7a8e76a3
SR
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
a81bd80a 12#include <linux/hardirq.h>
1744a21d 13#include <linux/kmemcheck.h>
7a8e76a3
SR
14#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
5a0e3ad6 17#include <linux/slab.h>
7a8e76a3
SR
18#include <linux/init.h>
19#include <linux/hash.h>
20#include <linux/list.h>
554f786e 21#include <linux/cpu.h>
7a8e76a3
SR
22#include <linux/fs.h>
23
79615760 24#include <asm/local.h>
182e9f5f
SR
25#include "trace.h"
26
d1b182a8
SR
27/*
28 * The ring buffer header is special. We must manually up keep it.
29 */
30int ring_buffer_print_entry_header(struct trace_seq *s)
31{
32 int ret;
33
334d4169
LJ
34 ret = trace_seq_printf(s, "# compressed entry header\n");
35 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
d1b182a8
SR
36 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
37 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
38 ret = trace_seq_printf(s, "\n");
39 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
40 RINGBUF_TYPE_PADDING);
41 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
42 RINGBUF_TYPE_TIME_EXTEND);
334d4169
LJ
43 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
44 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
d1b182a8
SR
45
46 return ret;
47}
48
5cc98548
SR
49/*
50 * The ring buffer is made up of a list of pages. A separate list of pages is
51 * allocated for each CPU. A writer may only write to a buffer that is
52 * associated with the CPU it is currently executing on. A reader may read
53 * from any per cpu buffer.
54 *
55 * The reader is special. For each per cpu buffer, the reader has its own
56 * reader page. When a reader has read the entire reader page, this reader
57 * page is swapped with another page in the ring buffer.
58 *
59 * Now, as long as the writer is off the reader page, the reader can do what
60 * ever it wants with that page. The writer will never write to that page
61 * again (as long as it is out of the ring buffer).
62 *
63 * Here's some silly ASCII art.
64 *
65 * +------+
66 * |reader| RING BUFFER
67 * |page |
68 * +------+ +---+ +---+ +---+
69 * | |-->| |-->| |
70 * +---+ +---+ +---+
71 * ^ |
72 * | |
73 * +---------------+
74 *
75 *
76 * +------+
77 * |reader| RING BUFFER
78 * |page |------------------v
79 * +------+ +---+ +---+ +---+
80 * | |-->| |-->| |
81 * +---+ +---+ +---+
82 * ^ |
83 * | |
84 * +---------------+
85 *
86 *
87 * +------+
88 * |reader| RING BUFFER
89 * |page |------------------v
90 * +------+ +---+ +---+ +---+
91 * ^ | |-->| |-->| |
92 * | +---+ +---+ +---+
93 * | |
94 * | |
95 * +------------------------------+
96 *
97 *
98 * +------+
99 * |buffer| RING BUFFER
100 * |page |------------------v
101 * +------+ +---+ +---+ +---+
102 * ^ | | | |-->| |
103 * | New +---+ +---+ +---+
104 * | Reader------^ |
105 * | page |
106 * +------------------------------+
107 *
108 *
109 * After we make this swap, the reader can hand this page off to the splice
110 * code and be done with it. It can even allocate a new page if it needs to
111 * and swap that into the ring buffer.
112 *
113 * We will be using cmpxchg soon to make all this lockless.
114 *
115 */
116
033601a3
SR
117/*
118 * A fast way to enable or disable all ring buffers is to
119 * call tracing_on or tracing_off. Turning off the ring buffers
120 * prevents all ring buffers from being recorded to.
121 * Turning this switch on, makes it OK to write to the
122 * ring buffer, if the ring buffer is enabled itself.
123 *
124 * There's three layers that must be on in order to write
125 * to the ring buffer.
126 *
127 * 1) This global flag must be set.
128 * 2) The ring buffer must be enabled for recording.
129 * 3) The per cpu buffer must be enabled for recording.
130 *
131 * In case of an anomaly, this global flag has a bit set that
132 * will permantly disable all ring buffers.
133 */
134
135/*
136 * Global flag to disable all recording to ring buffers
137 * This has two bits: ON, DISABLED
138 *
139 * ON DISABLED
140 * ---- ----------
141 * 0 0 : ring buffers are off
142 * 1 0 : ring buffers are on
143 * X 1 : ring buffers are permanently disabled
144 */
145
146enum {
147 RB_BUFFERS_ON_BIT = 0,
148 RB_BUFFERS_DISABLED_BIT = 1,
149};
150
151enum {
152 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
153 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
154};
155
5e39841c 156static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 157
474d32b6
SR
158#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
159
a3583244
SR
160/**
161 * tracing_on - enable all tracing buffers
162 *
163 * This function enables all tracing buffers that may have been
164 * disabled with tracing_off.
165 */
166void tracing_on(void)
167{
033601a3 168 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
a3583244 169}
c4f50183 170EXPORT_SYMBOL_GPL(tracing_on);
a3583244
SR
171
172/**
173 * tracing_off - turn off all tracing buffers
174 *
175 * This function stops all tracing buffers from recording data.
176 * It does not disable any overhead the tracers themselves may
177 * be causing. This function simply causes all recording to
178 * the ring buffers to fail.
179 */
180void tracing_off(void)
181{
033601a3
SR
182 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
183}
c4f50183 184EXPORT_SYMBOL_GPL(tracing_off);
033601a3
SR
185
186/**
187 * tracing_off_permanent - permanently disable ring buffers
188 *
189 * This function, once called, will disable all ring buffers
c3706f00 190 * permanently.
033601a3
SR
191 */
192void tracing_off_permanent(void)
193{
194 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
195}
196
988ae9d6
SR
197/**
198 * tracing_is_on - show state of ring buffers enabled
199 */
200int tracing_is_on(void)
201{
202 return ring_buffer_flags == RB_BUFFERS_ON;
203}
204EXPORT_SYMBOL_GPL(tracing_is_on);
205
e3d6bf0a 206#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 207#define RB_ALIGNMENT 4U
334d4169 208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
334d4169 210
2271048d
SR
211#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
212# define RB_FORCE_8BYTE_ALIGNMENT 0
213# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
214#else
215# define RB_FORCE_8BYTE_ALIGNMENT 1
216# define RB_ARCH_ALIGNMENT 8U
217#endif
218
334d4169
LJ
219/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
220#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
221
222enum {
223 RB_LEN_TIME_EXTEND = 8,
224 RB_LEN_TIME_STAMP = 16,
225};
226
69d1b839
SR
227#define skip_time_extend(event) \
228 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
229
2d622719
TZ
230static inline int rb_null_event(struct ring_buffer_event *event)
231{
a1863c21 232 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
233}
234
235static void rb_event_set_padding(struct ring_buffer_event *event)
236{
a1863c21 237 /* padding has a NULL time_delta */
334d4169 238 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
239 event->time_delta = 0;
240}
241
34a148bf 242static unsigned
2d622719 243rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
244{
245 unsigned length;
246
334d4169
LJ
247 if (event->type_len)
248 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
249 else
250 length = event->array[0];
251 return length + RB_EVNT_HDR_SIZE;
252}
253
69d1b839
SR
254/*
255 * Return the length of the given event. Will return
256 * the length of the time extend if the event is a
257 * time extend.
258 */
259static inline unsigned
2d622719
TZ
260rb_event_length(struct ring_buffer_event *event)
261{
334d4169 262 switch (event->type_len) {
7a8e76a3 263 case RINGBUF_TYPE_PADDING:
2d622719
TZ
264 if (rb_null_event(event))
265 /* undefined */
266 return -1;
334d4169 267 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
268
269 case RINGBUF_TYPE_TIME_EXTEND:
270 return RB_LEN_TIME_EXTEND;
271
272 case RINGBUF_TYPE_TIME_STAMP:
273 return RB_LEN_TIME_STAMP;
274
275 case RINGBUF_TYPE_DATA:
2d622719 276 return rb_event_data_length(event);
7a8e76a3
SR
277 default:
278 BUG();
279 }
280 /* not hit */
281 return 0;
282}
283
69d1b839
SR
284/*
285 * Return total length of time extend and data,
286 * or just the event length for all other events.
287 */
288static inline unsigned
289rb_event_ts_length(struct ring_buffer_event *event)
290{
291 unsigned len = 0;
292
293 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
294 /* time extends include the data event after it */
295 len = RB_LEN_TIME_EXTEND;
296 event = skip_time_extend(event);
297 }
298 return len + rb_event_length(event);
299}
300
7a8e76a3
SR
301/**
302 * ring_buffer_event_length - return the length of the event
303 * @event: the event to get the length of
69d1b839
SR
304 *
305 * Returns the size of the data load of a data event.
306 * If the event is something other than a data event, it
307 * returns the size of the event itself. With the exception
308 * of a TIME EXTEND, where it still returns the size of the
309 * data load of the data event after it.
7a8e76a3
SR
310 */
311unsigned ring_buffer_event_length(struct ring_buffer_event *event)
312{
69d1b839
SR
313 unsigned length;
314
315 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
316 event = skip_time_extend(event);
317
318 length = rb_event_length(event);
334d4169 319 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
320 return length;
321 length -= RB_EVNT_HDR_SIZE;
322 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
323 length -= sizeof(event->array[0]);
324 return length;
7a8e76a3 325}
c4f50183 326EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
327
328/* inline for ring buffer fast paths */
34a148bf 329static void *
7a8e76a3
SR
330rb_event_data(struct ring_buffer_event *event)
331{
69d1b839
SR
332 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
333 event = skip_time_extend(event);
334d4169 334 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 335 /* If length is in len field, then array[0] has the data */
334d4169 336 if (event->type_len)
7a8e76a3
SR
337 return (void *)&event->array[0];
338 /* Otherwise length is in array[0] and array[1] has the data */
339 return (void *)&event->array[1];
340}
341
342/**
343 * ring_buffer_event_data - return the data of the event
344 * @event: the event to get the data from
345 */
346void *ring_buffer_event_data(struct ring_buffer_event *event)
347{
348 return rb_event_data(event);
349}
c4f50183 350EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
351
352#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 353 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
354
355#define TS_SHIFT 27
356#define TS_MASK ((1ULL << TS_SHIFT) - 1)
357#define TS_DELTA_TEST (~TS_MASK)
358
66a8cb95
SR
359/* Flag when events were overwritten */
360#define RB_MISSED_EVENTS (1 << 31)
ff0ff84a
SR
361/* Missed count stored at end */
362#define RB_MISSED_STORED (1 << 30)
66a8cb95 363
abc9b56d 364struct buffer_data_page {
e4c2ce82 365 u64 time_stamp; /* page time stamp */
c3706f00 366 local_t commit; /* write committed index */
abc9b56d
SR
367 unsigned char data[]; /* data of buffer page */
368};
369
77ae365e
SR
370/*
371 * Note, the buffer_page list must be first. The buffer pages
372 * are allocated in cache lines, which means that each buffer
373 * page will be at the beginning of a cache line, and thus
374 * the least significant bits will be zero. We use this to
375 * add flags in the list struct pointers, to make the ring buffer
376 * lockless.
377 */
abc9b56d 378struct buffer_page {
778c55d4 379 struct list_head list; /* list of buffer pages */
abc9b56d 380 local_t write; /* index for next write */
6f807acd 381 unsigned read; /* index for next read */
778c55d4 382 local_t entries; /* entries on this page */
ff0ff84a 383 unsigned long real_end; /* real end of data */
abc9b56d 384 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
385};
386
77ae365e
SR
387/*
388 * The buffer page counters, write and entries, must be reset
389 * atomically when crossing page boundaries. To synchronize this
390 * update, two counters are inserted into the number. One is
391 * the actual counter for the write position or count on the page.
392 *
393 * The other is a counter of updaters. Before an update happens
394 * the update partition of the counter is incremented. This will
395 * allow the updater to update the counter atomically.
396 *
397 * The counter is 20 bits, and the state data is 12.
398 */
399#define RB_WRITE_MASK 0xfffff
400#define RB_WRITE_INTCNT (1 << 20)
401
044fa782 402static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 403{
044fa782 404 local_set(&bpage->commit, 0);
abc9b56d
SR
405}
406
474d32b6
SR
407/**
408 * ring_buffer_page_len - the size of data on the page.
409 * @page: The page to read
410 *
411 * Returns the amount of data on the page, including buffer page header.
412 */
ef7a4a16
SR
413size_t ring_buffer_page_len(void *page)
414{
474d32b6
SR
415 return local_read(&((struct buffer_data_page *)page)->commit)
416 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
417}
418
ed56829c
SR
419/*
420 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
421 * this issue out.
422 */
34a148bf 423static void free_buffer_page(struct buffer_page *bpage)
ed56829c 424{
34a148bf 425 free_page((unsigned long)bpage->page);
e4c2ce82 426 kfree(bpage);
ed56829c
SR
427}
428
7a8e76a3
SR
429/*
430 * We need to fit the time_stamp delta into 27 bits.
431 */
432static inline int test_time_stamp(u64 delta)
433{
434 if (delta & TS_DELTA_TEST)
435 return 1;
436 return 0;
437}
438
474d32b6 439#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 440
be957c44
SR
441/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
442#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
443
d1b182a8
SR
444int ring_buffer_print_page_header(struct trace_seq *s)
445{
446 struct buffer_data_page field;
447 int ret;
448
449 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
26a50744
TZ
450 "offset:0;\tsize:%u;\tsigned:%u;\n",
451 (unsigned int)sizeof(field.time_stamp),
452 (unsigned int)is_signed_type(u64));
d1b182a8
SR
453
454 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
26a50744 455 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 456 (unsigned int)offsetof(typeof(field), commit),
26a50744
TZ
457 (unsigned int)sizeof(field.commit),
458 (unsigned int)is_signed_type(long));
d1b182a8 459
66a8cb95
SR
460 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
461 "offset:%u;\tsize:%u;\tsigned:%u;\n",
462 (unsigned int)offsetof(typeof(field), commit),
463 1,
464 (unsigned int)is_signed_type(long));
465
d1b182a8 466 ret = trace_seq_printf(s, "\tfield: char data;\t"
26a50744 467 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 468 (unsigned int)offsetof(typeof(field), data),
26a50744
TZ
469 (unsigned int)BUF_PAGE_SIZE,
470 (unsigned int)is_signed_type(char));
d1b182a8
SR
471
472 return ret;
473}
474
7a8e76a3
SR
475/*
476 * head_page == tail_page && head == tail then buffer is empty.
477 */
478struct ring_buffer_per_cpu {
479 int cpu;
985023de 480 atomic_t record_disabled;
7a8e76a3 481 struct ring_buffer *buffer;
77ae365e 482 spinlock_t reader_lock; /* serialize readers */
445c8951 483 arch_spinlock_t lock;
7a8e76a3 484 struct lock_class_key lock_key;
3adc54fa 485 struct list_head *pages;
6f807acd
SR
486 struct buffer_page *head_page; /* read from head */
487 struct buffer_page *tail_page; /* write to tail */
c3706f00 488 struct buffer_page *commit_page; /* committed pages */
d769041f 489 struct buffer_page *reader_page;
66a8cb95
SR
490 unsigned long lost_events;
491 unsigned long last_overrun;
77ae365e
SR
492 local_t commit_overrun;
493 local_t overrun;
e4906eff 494 local_t entries;
fa743953
SR
495 local_t committing;
496 local_t commits;
77ae365e 497 unsigned long read;
7a8e76a3
SR
498 u64 write_stamp;
499 u64 read_stamp;
7a8e76a3
SR
500};
501
502struct ring_buffer {
7a8e76a3
SR
503 unsigned pages;
504 unsigned flags;
505 int cpus;
7a8e76a3 506 atomic_t record_disabled;
00f62f61 507 cpumask_var_t cpumask;
7a8e76a3 508
1f8a6a10
PZ
509 struct lock_class_key *reader_lock_key;
510
7a8e76a3
SR
511 struct mutex mutex;
512
513 struct ring_buffer_per_cpu **buffers;
554f786e 514
59222efe 515#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
516 struct notifier_block cpu_notify;
517#endif
37886f6a 518 u64 (*clock)(void);
7a8e76a3
SR
519};
520
521struct ring_buffer_iter {
522 struct ring_buffer_per_cpu *cpu_buffer;
523 unsigned long head;
524 struct buffer_page *head_page;
492a74f4
SR
525 struct buffer_page *cache_reader_page;
526 unsigned long cache_read;
7a8e76a3
SR
527 u64 read_stamp;
528};
529
f536aafc 530/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
531#define RB_WARN_ON(b, cond) \
532 ({ \
533 int _____ret = unlikely(cond); \
534 if (_____ret) { \
535 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
536 struct ring_buffer_per_cpu *__b = \
537 (void *)b; \
538 atomic_inc(&__b->buffer->record_disabled); \
539 } else \
540 atomic_inc(&b->record_disabled); \
541 WARN_ON(1); \
542 } \
543 _____ret; \
3e89c7bb 544 })
f536aafc 545
37886f6a
SR
546/* Up this if you want to test the TIME_EXTENTS and normalization */
547#define DEBUG_SHIFT 0
548
6d3f1e12 549static inline u64 rb_time_stamp(struct ring_buffer *buffer)
88eb0125
SR
550{
551 /* shift to debug/test normalization and TIME_EXTENTS */
552 return buffer->clock() << DEBUG_SHIFT;
553}
554
37886f6a
SR
555u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
556{
557 u64 time;
558
559 preempt_disable_notrace();
6d3f1e12 560 time = rb_time_stamp(buffer);
37886f6a
SR
561 preempt_enable_no_resched_notrace();
562
563 return time;
564}
565EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
566
567void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
568 int cpu, u64 *ts)
569{
570 /* Just stupid testing the normalize function and deltas */
571 *ts >>= DEBUG_SHIFT;
572}
573EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
574
77ae365e
SR
575/*
576 * Making the ring buffer lockless makes things tricky.
577 * Although writes only happen on the CPU that they are on,
578 * and they only need to worry about interrupts. Reads can
579 * happen on any CPU.
580 *
581 * The reader page is always off the ring buffer, but when the
582 * reader finishes with a page, it needs to swap its page with
583 * a new one from the buffer. The reader needs to take from
584 * the head (writes go to the tail). But if a writer is in overwrite
585 * mode and wraps, it must push the head page forward.
586 *
587 * Here lies the problem.
588 *
589 * The reader must be careful to replace only the head page, and
590 * not another one. As described at the top of the file in the
591 * ASCII art, the reader sets its old page to point to the next
592 * page after head. It then sets the page after head to point to
593 * the old reader page. But if the writer moves the head page
594 * during this operation, the reader could end up with the tail.
595 *
596 * We use cmpxchg to help prevent this race. We also do something
597 * special with the page before head. We set the LSB to 1.
598 *
599 * When the writer must push the page forward, it will clear the
600 * bit that points to the head page, move the head, and then set
601 * the bit that points to the new head page.
602 *
603 * We also don't want an interrupt coming in and moving the head
604 * page on another writer. Thus we use the second LSB to catch
605 * that too. Thus:
606 *
607 * head->list->prev->next bit 1 bit 0
608 * ------- -------
609 * Normal page 0 0
610 * Points to head page 0 1
611 * New head page 1 0
612 *
613 * Note we can not trust the prev pointer of the head page, because:
614 *
615 * +----+ +-----+ +-----+
616 * | |------>| T |---X--->| N |
617 * | |<------| | | |
618 * +----+ +-----+ +-----+
619 * ^ ^ |
620 * | +-----+ | |
621 * +----------| R |----------+ |
622 * | |<-----------+
623 * +-----+
624 *
625 * Key: ---X--> HEAD flag set in pointer
626 * T Tail page
627 * R Reader page
628 * N Next page
629 *
630 * (see __rb_reserve_next() to see where this happens)
631 *
632 * What the above shows is that the reader just swapped out
633 * the reader page with a page in the buffer, but before it
634 * could make the new header point back to the new page added
635 * it was preempted by a writer. The writer moved forward onto
636 * the new page added by the reader and is about to move forward
637 * again.
638 *
639 * You can see, it is legitimate for the previous pointer of
640 * the head (or any page) not to point back to itself. But only
641 * temporarially.
642 */
643
644#define RB_PAGE_NORMAL 0UL
645#define RB_PAGE_HEAD 1UL
646#define RB_PAGE_UPDATE 2UL
647
648
649#define RB_FLAG_MASK 3UL
650
651/* PAGE_MOVED is not part of the mask */
652#define RB_PAGE_MOVED 4UL
653
654/*
655 * rb_list_head - remove any bit
656 */
657static struct list_head *rb_list_head(struct list_head *list)
658{
659 unsigned long val = (unsigned long)list;
660
661 return (struct list_head *)(val & ~RB_FLAG_MASK);
662}
663
664/*
6d3f1e12 665 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
666 *
667 * Because the reader may move the head_page pointer, we can
668 * not trust what the head page is (it may be pointing to
669 * the reader page). But if the next page is a header page,
670 * its flags will be non zero.
671 */
672static int inline
673rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
674 struct buffer_page *page, struct list_head *list)
675{
676 unsigned long val;
677
678 val = (unsigned long)list->next;
679
680 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
681 return RB_PAGE_MOVED;
682
683 return val & RB_FLAG_MASK;
684}
685
686/*
687 * rb_is_reader_page
688 *
689 * The unique thing about the reader page, is that, if the
690 * writer is ever on it, the previous pointer never points
691 * back to the reader page.
692 */
693static int rb_is_reader_page(struct buffer_page *page)
694{
695 struct list_head *list = page->list.prev;
696
697 return rb_list_head(list->next) != &page->list;
698}
699
700/*
701 * rb_set_list_to_head - set a list_head to be pointing to head.
702 */
703static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
704 struct list_head *list)
705{
706 unsigned long *ptr;
707
708 ptr = (unsigned long *)&list->next;
709 *ptr |= RB_PAGE_HEAD;
710 *ptr &= ~RB_PAGE_UPDATE;
711}
712
713/*
714 * rb_head_page_activate - sets up head page
715 */
716static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
717{
718 struct buffer_page *head;
719
720 head = cpu_buffer->head_page;
721 if (!head)
722 return;
723
724 /*
725 * Set the previous list pointer to have the HEAD flag.
726 */
727 rb_set_list_to_head(cpu_buffer, head->list.prev);
728}
729
730static void rb_list_head_clear(struct list_head *list)
731{
732 unsigned long *ptr = (unsigned long *)&list->next;
733
734 *ptr &= ~RB_FLAG_MASK;
735}
736
737/*
738 * rb_head_page_dactivate - clears head page ptr (for free list)
739 */
740static void
741rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
742{
743 struct list_head *hd;
744
745 /* Go through the whole list and clear any pointers found. */
746 rb_list_head_clear(cpu_buffer->pages);
747
748 list_for_each(hd, cpu_buffer->pages)
749 rb_list_head_clear(hd);
750}
751
752static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
753 struct buffer_page *head,
754 struct buffer_page *prev,
755 int old_flag, int new_flag)
756{
757 struct list_head *list;
758 unsigned long val = (unsigned long)&head->list;
759 unsigned long ret;
760
761 list = &prev->list;
762
763 val &= ~RB_FLAG_MASK;
764
08a40816
SR
765 ret = cmpxchg((unsigned long *)&list->next,
766 val | old_flag, val | new_flag);
77ae365e
SR
767
768 /* check if the reader took the page */
769 if ((ret & ~RB_FLAG_MASK) != val)
770 return RB_PAGE_MOVED;
771
772 return ret & RB_FLAG_MASK;
773}
774
775static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
776 struct buffer_page *head,
777 struct buffer_page *prev,
778 int old_flag)
779{
780 return rb_head_page_set(cpu_buffer, head, prev,
781 old_flag, RB_PAGE_UPDATE);
782}
783
784static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
785 struct buffer_page *head,
786 struct buffer_page *prev,
787 int old_flag)
788{
789 return rb_head_page_set(cpu_buffer, head, prev,
790 old_flag, RB_PAGE_HEAD);
791}
792
793static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
794 struct buffer_page *head,
795 struct buffer_page *prev,
796 int old_flag)
797{
798 return rb_head_page_set(cpu_buffer, head, prev,
799 old_flag, RB_PAGE_NORMAL);
800}
801
802static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
803 struct buffer_page **bpage)
804{
805 struct list_head *p = rb_list_head((*bpage)->list.next);
806
807 *bpage = list_entry(p, struct buffer_page, list);
808}
809
810static struct buffer_page *
811rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
812{
813 struct buffer_page *head;
814 struct buffer_page *page;
815 struct list_head *list;
816 int i;
817
818 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
819 return NULL;
820
821 /* sanity check */
822 list = cpu_buffer->pages;
823 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
824 return NULL;
825
826 page = head = cpu_buffer->head_page;
827 /*
828 * It is possible that the writer moves the header behind
829 * where we started, and we miss in one loop.
830 * A second loop should grab the header, but we'll do
831 * three loops just because I'm paranoid.
832 */
833 for (i = 0; i < 3; i++) {
834 do {
835 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
836 cpu_buffer->head_page = page;
837 return page;
838 }
839 rb_inc_page(cpu_buffer, &page);
840 } while (page != head);
841 }
842
843 RB_WARN_ON(cpu_buffer, 1);
844
845 return NULL;
846}
847
848static int rb_head_page_replace(struct buffer_page *old,
849 struct buffer_page *new)
850{
851 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
852 unsigned long val;
853 unsigned long ret;
854
855 val = *ptr & ~RB_FLAG_MASK;
856 val |= RB_PAGE_HEAD;
857
08a40816 858 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
77ae365e
SR
859
860 return ret == val;
861}
862
863/*
864 * rb_tail_page_update - move the tail page forward
865 *
866 * Returns 1 if moved tail page, 0 if someone else did.
867 */
868static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
869 struct buffer_page *tail_page,
870 struct buffer_page *next_page)
871{
872 struct buffer_page *old_tail;
873 unsigned long old_entries;
874 unsigned long old_write;
875 int ret = 0;
876
877 /*
878 * The tail page now needs to be moved forward.
879 *
880 * We need to reset the tail page, but without messing
881 * with possible erasing of data brought in by interrupts
882 * that have moved the tail page and are currently on it.
883 *
884 * We add a counter to the write field to denote this.
885 */
886 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
887 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
888
889 /*
890 * Just make sure we have seen our old_write and synchronize
891 * with any interrupts that come in.
892 */
893 barrier();
894
895 /*
896 * If the tail page is still the same as what we think
897 * it is, then it is up to us to update the tail
898 * pointer.
899 */
900 if (tail_page == cpu_buffer->tail_page) {
901 /* Zero the write counter */
902 unsigned long val = old_write & ~RB_WRITE_MASK;
903 unsigned long eval = old_entries & ~RB_WRITE_MASK;
904
905 /*
906 * This will only succeed if an interrupt did
907 * not come in and change it. In which case, we
908 * do not want to modify it.
da706d8b
LJ
909 *
910 * We add (void) to let the compiler know that we do not care
911 * about the return value of these functions. We use the
912 * cmpxchg to only update if an interrupt did not already
913 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 914 */
da706d8b
LJ
915 (void)local_cmpxchg(&next_page->write, old_write, val);
916 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
917
918 /*
919 * No need to worry about races with clearing out the commit.
920 * it only can increment when a commit takes place. But that
921 * only happens in the outer most nested commit.
922 */
923 local_set(&next_page->page->commit, 0);
924
925 old_tail = cmpxchg(&cpu_buffer->tail_page,
926 tail_page, next_page);
927
928 if (old_tail == tail_page)
929 ret = 1;
930 }
931
932 return ret;
933}
934
935static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
936 struct buffer_page *bpage)
937{
938 unsigned long val = (unsigned long)bpage;
939
940 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
941 return 1;
942
943 return 0;
944}
945
946/**
947 * rb_check_list - make sure a pointer to a list has the last bits zero
948 */
949static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
950 struct list_head *list)
951{
952 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
953 return 1;
954 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
955 return 1;
956 return 0;
957}
958
7a8e76a3
SR
959/**
960 * check_pages - integrity check of buffer pages
961 * @cpu_buffer: CPU buffer with pages to test
962 *
c3706f00 963 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
964 * been corrupted.
965 */
966static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
967{
3adc54fa 968 struct list_head *head = cpu_buffer->pages;
044fa782 969 struct buffer_page *bpage, *tmp;
7a8e76a3 970
77ae365e
SR
971 rb_head_page_deactivate(cpu_buffer);
972
3e89c7bb
SR
973 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
974 return -1;
975 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
976 return -1;
7a8e76a3 977
77ae365e
SR
978 if (rb_check_list(cpu_buffer, head))
979 return -1;
980
044fa782 981 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 982 if (RB_WARN_ON(cpu_buffer,
044fa782 983 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
984 return -1;
985 if (RB_WARN_ON(cpu_buffer,
044fa782 986 bpage->list.prev->next != &bpage->list))
3e89c7bb 987 return -1;
77ae365e
SR
988 if (rb_check_list(cpu_buffer, &bpage->list))
989 return -1;
7a8e76a3
SR
990 }
991
77ae365e
SR
992 rb_head_page_activate(cpu_buffer);
993
7a8e76a3
SR
994 return 0;
995}
996
7a8e76a3
SR
997static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
998 unsigned nr_pages)
999{
044fa782 1000 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
1001 unsigned long addr;
1002 LIST_HEAD(pages);
1003 unsigned i;
1004
3adc54fa
SR
1005 WARN_ON(!nr_pages);
1006
7a8e76a3 1007 for (i = 0; i < nr_pages; i++) {
044fa782 1008 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
aa1e0e3b 1009 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
044fa782 1010 if (!bpage)
e4c2ce82 1011 goto free_pages;
77ae365e
SR
1012
1013 rb_check_bpage(cpu_buffer, bpage);
1014
044fa782 1015 list_add(&bpage->list, &pages);
e4c2ce82 1016
7a8e76a3
SR
1017 addr = __get_free_page(GFP_KERNEL);
1018 if (!addr)
1019 goto free_pages;
044fa782
SR
1020 bpage->page = (void *)addr;
1021 rb_init_page(bpage->page);
7a8e76a3
SR
1022 }
1023
3adc54fa
SR
1024 /*
1025 * The ring buffer page list is a circular list that does not
1026 * start and end with a list head. All page list items point to
1027 * other pages.
1028 */
1029 cpu_buffer->pages = pages.next;
1030 list_del(&pages);
7a8e76a3
SR
1031
1032 rb_check_pages(cpu_buffer);
1033
1034 return 0;
1035
1036 free_pages:
044fa782
SR
1037 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1038 list_del_init(&bpage->list);
1039 free_buffer_page(bpage);
7a8e76a3
SR
1040 }
1041 return -ENOMEM;
1042}
1043
1044static struct ring_buffer_per_cpu *
1045rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1046{
1047 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 1048 struct buffer_page *bpage;
d769041f 1049 unsigned long addr;
7a8e76a3
SR
1050 int ret;
1051
1052 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1053 GFP_KERNEL, cpu_to_node(cpu));
1054 if (!cpu_buffer)
1055 return NULL;
1056
1057 cpu_buffer->cpu = cpu;
1058 cpu_buffer->buffer = buffer;
f83c9d0f 1059 spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 1060 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
edc35bd7 1061 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7a8e76a3 1062
044fa782 1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1064 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1065 if (!bpage)
e4c2ce82
SR
1066 goto fail_free_buffer;
1067
77ae365e
SR
1068 rb_check_bpage(cpu_buffer, bpage);
1069
044fa782 1070 cpu_buffer->reader_page = bpage;
d769041f
SR
1071 addr = __get_free_page(GFP_KERNEL);
1072 if (!addr)
e4c2ce82 1073 goto fail_free_reader;
044fa782
SR
1074 bpage->page = (void *)addr;
1075 rb_init_page(bpage->page);
e4c2ce82 1076
d769041f 1077 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 1078
7a8e76a3
SR
1079 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1080 if (ret < 0)
d769041f 1081 goto fail_free_reader;
7a8e76a3
SR
1082
1083 cpu_buffer->head_page
3adc54fa 1084 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1085 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1086
77ae365e
SR
1087 rb_head_page_activate(cpu_buffer);
1088
7a8e76a3
SR
1089 return cpu_buffer;
1090
d769041f
SR
1091 fail_free_reader:
1092 free_buffer_page(cpu_buffer->reader_page);
1093
7a8e76a3
SR
1094 fail_free_buffer:
1095 kfree(cpu_buffer);
1096 return NULL;
1097}
1098
1099static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1100{
3adc54fa 1101 struct list_head *head = cpu_buffer->pages;
044fa782 1102 struct buffer_page *bpage, *tmp;
7a8e76a3 1103
d769041f
SR
1104 free_buffer_page(cpu_buffer->reader_page);
1105
77ae365e
SR
1106 rb_head_page_deactivate(cpu_buffer);
1107
3adc54fa
SR
1108 if (head) {
1109 list_for_each_entry_safe(bpage, tmp, head, list) {
1110 list_del_init(&bpage->list);
1111 free_buffer_page(bpage);
1112 }
1113 bpage = list_entry(head, struct buffer_page, list);
044fa782 1114 free_buffer_page(bpage);
7a8e76a3 1115 }
3adc54fa 1116
7a8e76a3
SR
1117 kfree(cpu_buffer);
1118}
1119
59222efe 1120#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
1121static int rb_cpu_notify(struct notifier_block *self,
1122 unsigned long action, void *hcpu);
554f786e
SR
1123#endif
1124
7a8e76a3
SR
1125/**
1126 * ring_buffer_alloc - allocate a new ring_buffer
68814b58 1127 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
1128 * @flags: attributes to set for the ring buffer.
1129 *
1130 * Currently the only flag that is available is the RB_FL_OVERWRITE
1131 * flag. This flag means that the buffer will overwrite old data
1132 * when the buffer wraps. If this flag is not set, the buffer will
1133 * drop data when the tail hits the head.
1134 */
1f8a6a10
PZ
1135struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1136 struct lock_class_key *key)
7a8e76a3
SR
1137{
1138 struct ring_buffer *buffer;
1139 int bsize;
1140 int cpu;
1141
1142 /* keep it in its own cache line */
1143 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1144 GFP_KERNEL);
1145 if (!buffer)
1146 return NULL;
1147
9e01c1b7
RR
1148 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1149 goto fail_free_buffer;
1150
7a8e76a3
SR
1151 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1152 buffer->flags = flags;
37886f6a 1153 buffer->clock = trace_clock_local;
1f8a6a10 1154 buffer->reader_lock_key = key;
7a8e76a3
SR
1155
1156 /* need at least two pages */
5f78abee
SR
1157 if (buffer->pages < 2)
1158 buffer->pages = 2;
7a8e76a3 1159
3bf832ce
FW
1160 /*
1161 * In case of non-hotplug cpu, if the ring-buffer is allocated
1162 * in early initcall, it will not be notified of secondary cpus.
1163 * In that off case, we need to allocate for all possible cpus.
1164 */
1165#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1166 get_online_cpus();
1167 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
1168#else
1169 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1170#endif
7a8e76a3
SR
1171 buffer->cpus = nr_cpu_ids;
1172
1173 bsize = sizeof(void *) * nr_cpu_ids;
1174 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1175 GFP_KERNEL);
1176 if (!buffer->buffers)
9e01c1b7 1177 goto fail_free_cpumask;
7a8e76a3
SR
1178
1179 for_each_buffer_cpu(buffer, cpu) {
1180 buffer->buffers[cpu] =
1181 rb_allocate_cpu_buffer(buffer, cpu);
1182 if (!buffer->buffers[cpu])
1183 goto fail_free_buffers;
1184 }
1185
59222efe 1186#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1187 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1188 buffer->cpu_notify.priority = 0;
1189 register_cpu_notifier(&buffer->cpu_notify);
1190#endif
1191
1192 put_online_cpus();
7a8e76a3
SR
1193 mutex_init(&buffer->mutex);
1194
1195 return buffer;
1196
1197 fail_free_buffers:
1198 for_each_buffer_cpu(buffer, cpu) {
1199 if (buffer->buffers[cpu])
1200 rb_free_cpu_buffer(buffer->buffers[cpu]);
1201 }
1202 kfree(buffer->buffers);
1203
9e01c1b7
RR
1204 fail_free_cpumask:
1205 free_cpumask_var(buffer->cpumask);
554f786e 1206 put_online_cpus();
9e01c1b7 1207
7a8e76a3
SR
1208 fail_free_buffer:
1209 kfree(buffer);
1210 return NULL;
1211}
1f8a6a10 1212EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1213
1214/**
1215 * ring_buffer_free - free a ring buffer.
1216 * @buffer: the buffer to free.
1217 */
1218void
1219ring_buffer_free(struct ring_buffer *buffer)
1220{
1221 int cpu;
1222
554f786e
SR
1223 get_online_cpus();
1224
59222efe 1225#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1226 unregister_cpu_notifier(&buffer->cpu_notify);
1227#endif
1228
7a8e76a3
SR
1229 for_each_buffer_cpu(buffer, cpu)
1230 rb_free_cpu_buffer(buffer->buffers[cpu]);
1231
554f786e
SR
1232 put_online_cpus();
1233
bd3f0221 1234 kfree(buffer->buffers);
9e01c1b7
RR
1235 free_cpumask_var(buffer->cpumask);
1236
7a8e76a3
SR
1237 kfree(buffer);
1238}
c4f50183 1239EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1240
37886f6a
SR
1241void ring_buffer_set_clock(struct ring_buffer *buffer,
1242 u64 (*clock)(void))
1243{
1244 buffer->clock = clock;
1245}
1246
7a8e76a3
SR
1247static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1248
1249static void
1250rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1251{
044fa782 1252 struct buffer_page *bpage;
7a8e76a3
SR
1253 struct list_head *p;
1254 unsigned i;
1255
f7112949 1256 spin_lock_irq(&cpu_buffer->reader_lock);
77ae365e
SR
1257 rb_head_page_deactivate(cpu_buffer);
1258
7a8e76a3 1259 for (i = 0; i < nr_pages; i++) {
3adc54fa 1260 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
292f60c0 1261 goto out;
3adc54fa 1262 p = cpu_buffer->pages->next;
044fa782
SR
1263 bpage = list_entry(p, struct buffer_page, list);
1264 list_del_init(&bpage->list);
1265 free_buffer_page(bpage);
7a8e76a3 1266 }
3adc54fa 1267 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
292f60c0 1268 goto out;
7a8e76a3
SR
1269
1270 rb_reset_cpu(cpu_buffer);
7a8e76a3
SR
1271 rb_check_pages(cpu_buffer);
1272
292f60c0 1273out:
dd7f5943 1274 spin_unlock_irq(&cpu_buffer->reader_lock);
7a8e76a3
SR
1275}
1276
1277static void
1278rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1279 struct list_head *pages, unsigned nr_pages)
1280{
044fa782 1281 struct buffer_page *bpage;
7a8e76a3
SR
1282 struct list_head *p;
1283 unsigned i;
1284
77ae365e
SR
1285 spin_lock_irq(&cpu_buffer->reader_lock);
1286 rb_head_page_deactivate(cpu_buffer);
1287
7a8e76a3 1288 for (i = 0; i < nr_pages; i++) {
3e89c7bb 1289 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
292f60c0 1290 goto out;
7a8e76a3 1291 p = pages->next;
044fa782
SR
1292 bpage = list_entry(p, struct buffer_page, list);
1293 list_del_init(&bpage->list);
3adc54fa 1294 list_add_tail(&bpage->list, cpu_buffer->pages);
7a8e76a3
SR
1295 }
1296 rb_reset_cpu(cpu_buffer);
7a8e76a3
SR
1297 rb_check_pages(cpu_buffer);
1298
292f60c0 1299out:
dd7f5943 1300 spin_unlock_irq(&cpu_buffer->reader_lock);
7a8e76a3
SR
1301}
1302
1303/**
1304 * ring_buffer_resize - resize the ring buffer
1305 * @buffer: the buffer to resize.
1306 * @size: the new size.
1307 *
7a8e76a3
SR
1308 * Minimum size is 2 * BUF_PAGE_SIZE.
1309 *
1310 * Returns -1 on failure.
1311 */
1312int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1313{
1314 struct ring_buffer_per_cpu *cpu_buffer;
1315 unsigned nr_pages, rm_pages, new_pages;
044fa782 1316 struct buffer_page *bpage, *tmp;
7a8e76a3
SR
1317 unsigned long buffer_size;
1318 unsigned long addr;
1319 LIST_HEAD(pages);
1320 int i, cpu;
1321
ee51a1de
IM
1322 /*
1323 * Always succeed at resizing a non-existent buffer:
1324 */
1325 if (!buffer)
1326 return size;
1327
7a8e76a3
SR
1328 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1329 size *= BUF_PAGE_SIZE;
1330 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1331
1332 /* we need a minimum of two pages */
1333 if (size < BUF_PAGE_SIZE * 2)
1334 size = BUF_PAGE_SIZE * 2;
1335
1336 if (size == buffer_size)
1337 return size;
1338
18421015
SR
1339 atomic_inc(&buffer->record_disabled);
1340
1341 /* Make sure all writers are done with this buffer. */
1342 synchronize_sched();
1343
7a8e76a3 1344 mutex_lock(&buffer->mutex);
554f786e 1345 get_online_cpus();
7a8e76a3
SR
1346
1347 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1348
1349 if (size < buffer_size) {
1350
1351 /* easy case, just free pages */
554f786e
SR
1352 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1353 goto out_fail;
7a8e76a3
SR
1354
1355 rm_pages = buffer->pages - nr_pages;
1356
1357 for_each_buffer_cpu(buffer, cpu) {
1358 cpu_buffer = buffer->buffers[cpu];
1359 rb_remove_pages(cpu_buffer, rm_pages);
1360 }
1361 goto out;
1362 }
1363
1364 /*
1365 * This is a bit more difficult. We only want to add pages
1366 * when we can allocate enough for all CPUs. We do this
1367 * by allocating all the pages and storing them on a local
1368 * link list. If we succeed in our allocation, then we
1369 * add these pages to the cpu_buffers. Otherwise we just free
1370 * them all and return -ENOMEM;
1371 */
554f786e
SR
1372 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1373 goto out_fail;
f536aafc 1374
7a8e76a3
SR
1375 new_pages = nr_pages - buffer->pages;
1376
1377 for_each_buffer_cpu(buffer, cpu) {
1378 for (i = 0; i < new_pages; i++) {
044fa782 1379 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
e4c2ce82
SR
1380 cache_line_size()),
1381 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1382 if (!bpage)
e4c2ce82 1383 goto free_pages;
044fa782 1384 list_add(&bpage->list, &pages);
7a8e76a3
SR
1385 addr = __get_free_page(GFP_KERNEL);
1386 if (!addr)
1387 goto free_pages;
044fa782
SR
1388 bpage->page = (void *)addr;
1389 rb_init_page(bpage->page);
7a8e76a3
SR
1390 }
1391 }
1392
1393 for_each_buffer_cpu(buffer, cpu) {
1394 cpu_buffer = buffer->buffers[cpu];
1395 rb_insert_pages(cpu_buffer, &pages, new_pages);
1396 }
1397
554f786e
SR
1398 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1399 goto out_fail;
7a8e76a3
SR
1400
1401 out:
1402 buffer->pages = nr_pages;
554f786e 1403 put_online_cpus();
7a8e76a3
SR
1404 mutex_unlock(&buffer->mutex);
1405
18421015
SR
1406 atomic_dec(&buffer->record_disabled);
1407
7a8e76a3
SR
1408 return size;
1409
1410 free_pages:
044fa782
SR
1411 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1412 list_del_init(&bpage->list);
1413 free_buffer_page(bpage);
7a8e76a3 1414 }
554f786e 1415 put_online_cpus();
641d2f63 1416 mutex_unlock(&buffer->mutex);
18421015 1417 atomic_dec(&buffer->record_disabled);
7a8e76a3 1418 return -ENOMEM;
554f786e
SR
1419
1420 /*
1421 * Something went totally wrong, and we are too paranoid
1422 * to even clean up the mess.
1423 */
1424 out_fail:
1425 put_online_cpus();
1426 mutex_unlock(&buffer->mutex);
18421015 1427 atomic_dec(&buffer->record_disabled);
554f786e 1428 return -1;
7a8e76a3 1429}
c4f50183 1430EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 1431
8789a9e7 1432static inline void *
044fa782 1433__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 1434{
044fa782 1435 return bpage->data + index;
8789a9e7
SR
1436}
1437
044fa782 1438static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 1439{
044fa782 1440 return bpage->page->data + index;
7a8e76a3
SR
1441}
1442
1443static inline struct ring_buffer_event *
d769041f 1444rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1445{
6f807acd
SR
1446 return __rb_page_index(cpu_buffer->reader_page,
1447 cpu_buffer->reader_page->read);
1448}
1449
7a8e76a3
SR
1450static inline struct ring_buffer_event *
1451rb_iter_head_event(struct ring_buffer_iter *iter)
1452{
6f807acd 1453 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
1454}
1455
77ae365e 1456static inline unsigned long rb_page_write(struct buffer_page *bpage)
bf41a158 1457{
77ae365e 1458 return local_read(&bpage->write) & RB_WRITE_MASK;
bf41a158
SR
1459}
1460
1461static inline unsigned rb_page_commit(struct buffer_page *bpage)
1462{
abc9b56d 1463 return local_read(&bpage->page->commit);
bf41a158
SR
1464}
1465
77ae365e
SR
1466static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1467{
1468 return local_read(&bpage->entries) & RB_WRITE_MASK;
1469}
1470
bf41a158
SR
1471/* Size is determined by what has been commited */
1472static inline unsigned rb_page_size(struct buffer_page *bpage)
1473{
1474 return rb_page_commit(bpage);
1475}
1476
1477static inline unsigned
1478rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1479{
1480 return rb_page_commit(cpu_buffer->commit_page);
1481}
1482
bf41a158
SR
1483static inline unsigned
1484rb_event_index(struct ring_buffer_event *event)
1485{
1486 unsigned long addr = (unsigned long)event;
1487
22f470f8 1488 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
bf41a158
SR
1489}
1490
0f0c85fc 1491static inline int
fa743953
SR
1492rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1493 struct ring_buffer_event *event)
bf41a158
SR
1494{
1495 unsigned long addr = (unsigned long)event;
1496 unsigned long index;
1497
1498 index = rb_event_index(event);
1499 addr &= PAGE_MASK;
1500
1501 return cpu_buffer->commit_page->page == (void *)addr &&
1502 rb_commit_index(cpu_buffer) == index;
1503}
1504
34a148bf 1505static void
bf41a158 1506rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1507{
77ae365e
SR
1508 unsigned long max_count;
1509
bf41a158
SR
1510 /*
1511 * We only race with interrupts and NMIs on this CPU.
1512 * If we own the commit event, then we can commit
1513 * all others that interrupted us, since the interruptions
1514 * are in stack format (they finish before they come
1515 * back to us). This allows us to do a simple loop to
1516 * assign the commit to the tail.
1517 */
a8ccf1d6 1518 again:
77ae365e
SR
1519 max_count = cpu_buffer->buffer->pages * 100;
1520
bf41a158 1521 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
77ae365e
SR
1522 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1523 return;
1524 if (RB_WARN_ON(cpu_buffer,
1525 rb_is_reader_page(cpu_buffer->tail_page)))
1526 return;
1527 local_set(&cpu_buffer->commit_page->page->commit,
1528 rb_page_write(cpu_buffer->commit_page));
bf41a158 1529 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1530 cpu_buffer->write_stamp =
1531 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1532 /* add barrier to keep gcc from optimizing too much */
1533 barrier();
1534 }
1535 while (rb_commit_index(cpu_buffer) !=
1536 rb_page_write(cpu_buffer->commit_page)) {
77ae365e
SR
1537
1538 local_set(&cpu_buffer->commit_page->page->commit,
1539 rb_page_write(cpu_buffer->commit_page));
1540 RB_WARN_ON(cpu_buffer,
1541 local_read(&cpu_buffer->commit_page->page->commit) &
1542 ~RB_WRITE_MASK);
bf41a158
SR
1543 barrier();
1544 }
a8ccf1d6
SR
1545
1546 /* again, keep gcc from optimizing */
1547 barrier();
1548
1549 /*
1550 * If an interrupt came in just after the first while loop
1551 * and pushed the tail page forward, we will be left with
1552 * a dangling commit that will never go forward.
1553 */
1554 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1555 goto again;
7a8e76a3
SR
1556}
1557
d769041f 1558static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1559{
abc9b56d 1560 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1561 cpu_buffer->reader_page->read = 0;
d769041f
SR
1562}
1563
34a148bf 1564static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1565{
1566 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1567
1568 /*
1569 * The iterator could be on the reader page (it starts there).
1570 * But the head could have moved, since the reader was
1571 * found. Check for this case and assign the iterator
1572 * to the head page instead of next.
1573 */
1574 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 1575 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f
SR
1576 else
1577 rb_inc_page(cpu_buffer, &iter->head_page);
1578
abc9b56d 1579 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1580 iter->head = 0;
1581}
1582
69d1b839
SR
1583/* Slow path, do not inline */
1584static noinline struct ring_buffer_event *
1585rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1586{
1587 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1588
1589 /* Not the first event on the page? */
1590 if (rb_event_index(event)) {
1591 event->time_delta = delta & TS_MASK;
1592 event->array[0] = delta >> TS_SHIFT;
1593 } else {
1594 /* nope, just zero it */
1595 event->time_delta = 0;
1596 event->array[0] = 0;
1597 }
1598
1599 return skip_time_extend(event);
1600}
1601
7a8e76a3
SR
1602/**
1603 * ring_buffer_update_event - update event type and data
1604 * @event: the even to update
1605 * @type: the type of event
1606 * @length: the size of the event field in the ring buffer
1607 *
1608 * Update the type and data fields of the event. The length
1609 * is the actual size that is written to the ring buffer,
1610 * and with this, we can determine what to place into the
1611 * data field.
1612 */
34a148bf 1613static void
69d1b839
SR
1614rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1615 struct ring_buffer_event *event, unsigned length,
1616 int add_timestamp, u64 delta)
7a8e76a3 1617{
69d1b839
SR
1618 /* Only a commit updates the timestamp */
1619 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1620 delta = 0;
7a8e76a3 1621
69d1b839
SR
1622 /*
1623 * If we need to add a timestamp, then we
1624 * add it to the start of the resevered space.
1625 */
1626 if (unlikely(add_timestamp)) {
1627 event = rb_add_time_stamp(event, delta);
1628 length -= RB_LEN_TIME_EXTEND;
1629 delta = 0;
7a8e76a3 1630 }
69d1b839
SR
1631
1632 event->time_delta = delta;
1633 length -= RB_EVNT_HDR_SIZE;
1634 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1635 event->type_len = 0;
1636 event->array[0] = length;
1637 } else
1638 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
1639}
1640
77ae365e
SR
1641/*
1642 * rb_handle_head_page - writer hit the head page
1643 *
1644 * Returns: +1 to retry page
1645 * 0 to continue
1646 * -1 on error
1647 */
1648static int
1649rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1650 struct buffer_page *tail_page,
1651 struct buffer_page *next_page)
1652{
1653 struct buffer_page *new_head;
1654 int entries;
1655 int type;
1656 int ret;
1657
1658 entries = rb_page_entries(next_page);
1659
1660 /*
1661 * The hard part is here. We need to move the head
1662 * forward, and protect against both readers on
1663 * other CPUs and writers coming in via interrupts.
1664 */
1665 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1666 RB_PAGE_HEAD);
1667
1668 /*
1669 * type can be one of four:
1670 * NORMAL - an interrupt already moved it for us
1671 * HEAD - we are the first to get here.
1672 * UPDATE - we are the interrupt interrupting
1673 * a current move.
1674 * MOVED - a reader on another CPU moved the next
1675 * pointer to its reader page. Give up
1676 * and try again.
1677 */
1678
1679 switch (type) {
1680 case RB_PAGE_HEAD:
1681 /*
1682 * We changed the head to UPDATE, thus
1683 * it is our responsibility to update
1684 * the counters.
1685 */
1686 local_add(entries, &cpu_buffer->overrun);
1687
1688 /*
1689 * The entries will be zeroed out when we move the
1690 * tail page.
1691 */
1692
1693 /* still more to do */
1694 break;
1695
1696 case RB_PAGE_UPDATE:
1697 /*
1698 * This is an interrupt that interrupt the
1699 * previous update. Still more to do.
1700 */
1701 break;
1702 case RB_PAGE_NORMAL:
1703 /*
1704 * An interrupt came in before the update
1705 * and processed this for us.
1706 * Nothing left to do.
1707 */
1708 return 1;
1709 case RB_PAGE_MOVED:
1710 /*
1711 * The reader is on another CPU and just did
1712 * a swap with our next_page.
1713 * Try again.
1714 */
1715 return 1;
1716 default:
1717 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1718 return -1;
1719 }
1720
1721 /*
1722 * Now that we are here, the old head pointer is
1723 * set to UPDATE. This will keep the reader from
1724 * swapping the head page with the reader page.
1725 * The reader (on another CPU) will spin till
1726 * we are finished.
1727 *
1728 * We just need to protect against interrupts
1729 * doing the job. We will set the next pointer
1730 * to HEAD. After that, we set the old pointer
1731 * to NORMAL, but only if it was HEAD before.
1732 * otherwise we are an interrupt, and only
1733 * want the outer most commit to reset it.
1734 */
1735 new_head = next_page;
1736 rb_inc_page(cpu_buffer, &new_head);
1737
1738 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1739 RB_PAGE_NORMAL);
1740
1741 /*
1742 * Valid returns are:
1743 * HEAD - an interrupt came in and already set it.
1744 * NORMAL - One of two things:
1745 * 1) We really set it.
1746 * 2) A bunch of interrupts came in and moved
1747 * the page forward again.
1748 */
1749 switch (ret) {
1750 case RB_PAGE_HEAD:
1751 case RB_PAGE_NORMAL:
1752 /* OK */
1753 break;
1754 default:
1755 RB_WARN_ON(cpu_buffer, 1);
1756 return -1;
1757 }
1758
1759 /*
1760 * It is possible that an interrupt came in,
1761 * set the head up, then more interrupts came in
1762 * and moved it again. When we get back here,
1763 * the page would have been set to NORMAL but we
1764 * just set it back to HEAD.
1765 *
1766 * How do you detect this? Well, if that happened
1767 * the tail page would have moved.
1768 */
1769 if (ret == RB_PAGE_NORMAL) {
1770 /*
1771 * If the tail had moved passed next, then we need
1772 * to reset the pointer.
1773 */
1774 if (cpu_buffer->tail_page != tail_page &&
1775 cpu_buffer->tail_page != next_page)
1776 rb_head_page_set_normal(cpu_buffer, new_head,
1777 next_page,
1778 RB_PAGE_HEAD);
1779 }
1780
1781 /*
1782 * If this was the outer most commit (the one that
1783 * changed the original pointer from HEAD to UPDATE),
1784 * then it is up to us to reset it to NORMAL.
1785 */
1786 if (type == RB_PAGE_HEAD) {
1787 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1788 tail_page,
1789 RB_PAGE_UPDATE);
1790 if (RB_WARN_ON(cpu_buffer,
1791 ret != RB_PAGE_UPDATE))
1792 return -1;
1793 }
1794
1795 return 0;
1796}
1797
34a148bf 1798static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
1799{
1800 struct ring_buffer_event event; /* Used only for sizeof array */
1801
1802 /* zero length can cause confusions */
1803 if (!length)
1804 length = 1;
1805
2271048d 1806 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
7a8e76a3
SR
1807 length += sizeof(event.array[0]);
1808
1809 length += RB_EVNT_HDR_SIZE;
2271048d 1810 length = ALIGN(length, RB_ARCH_ALIGNMENT);
7a8e76a3
SR
1811
1812 return length;
1813}
1814
c7b09308
SR
1815static inline void
1816rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1817 struct buffer_page *tail_page,
1818 unsigned long tail, unsigned long length)
1819{
1820 struct ring_buffer_event *event;
1821
1822 /*
1823 * Only the event that crossed the page boundary
1824 * must fill the old tail_page with padding.
1825 */
1826 if (tail >= BUF_PAGE_SIZE) {
b3230c8b
SR
1827 /*
1828 * If the page was filled, then we still need
1829 * to update the real_end. Reset it to zero
1830 * and the reader will ignore it.
1831 */
1832 if (tail == BUF_PAGE_SIZE)
1833 tail_page->real_end = 0;
1834
c7b09308
SR
1835 local_sub(length, &tail_page->write);
1836 return;
1837 }
1838
1839 event = __rb_page_index(tail_page, tail);
b0b7065b 1840 kmemcheck_annotate_bitfield(event, bitfield);
c7b09308 1841
ff0ff84a
SR
1842 /*
1843 * Save the original length to the meta data.
1844 * This will be used by the reader to add lost event
1845 * counter.
1846 */
1847 tail_page->real_end = tail;
1848
c7b09308
SR
1849 /*
1850 * If this event is bigger than the minimum size, then
1851 * we need to be careful that we don't subtract the
1852 * write counter enough to allow another writer to slip
1853 * in on this page.
1854 * We put in a discarded commit instead, to make sure
1855 * that this space is not used again.
1856 *
1857 * If we are less than the minimum size, we don't need to
1858 * worry about it.
1859 */
1860 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1861 /* No room for any events */
1862
1863 /* Mark the rest of the page with padding */
1864 rb_event_set_padding(event);
1865
1866 /* Set the write back to the previous setting */
1867 local_sub(length, &tail_page->write);
1868 return;
1869 }
1870
1871 /* Put in a discarded event */
1872 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1873 event->type_len = RINGBUF_TYPE_PADDING;
1874 /* time delta must be non zero */
1875 event->time_delta = 1;
c7b09308
SR
1876
1877 /* Set write to end of buffer */
1878 length = (tail + length) - BUF_PAGE_SIZE;
1879 local_sub(length, &tail_page->write);
1880}
6634ff26 1881
747e94ae
SR
1882/*
1883 * This is the slow path, force gcc not to inline it.
1884 */
1885static noinline struct ring_buffer_event *
6634ff26
SR
1886rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1887 unsigned long length, unsigned long tail,
e8bc43e8 1888 struct buffer_page *tail_page, u64 ts)
7a8e76a3 1889{
5a50e33c 1890 struct buffer_page *commit_page = cpu_buffer->commit_page;
7a8e76a3 1891 struct ring_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
1892 struct buffer_page *next_page;
1893 int ret;
aa20ae84
SR
1894
1895 next_page = tail_page;
1896
aa20ae84
SR
1897 rb_inc_page(cpu_buffer, &next_page);
1898
aa20ae84
SR
1899 /*
1900 * If for some reason, we had an interrupt storm that made
1901 * it all the way around the buffer, bail, and warn
1902 * about it.
1903 */
1904 if (unlikely(next_page == commit_page)) {
77ae365e 1905 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
1906 goto out_reset;
1907 }
1908
77ae365e
SR
1909 /*
1910 * This is where the fun begins!
1911 *
1912 * We are fighting against races between a reader that
1913 * could be on another CPU trying to swap its reader
1914 * page with the buffer head.
1915 *
1916 * We are also fighting against interrupts coming in and
1917 * moving the head or tail on us as well.
1918 *
1919 * If the next page is the head page then we have filled
1920 * the buffer, unless the commit page is still on the
1921 * reader page.
1922 */
1923 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
aa20ae84 1924
77ae365e
SR
1925 /*
1926 * If the commit is not on the reader page, then
1927 * move the header page.
1928 */
1929 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1930 /*
1931 * If we are not in overwrite mode,
1932 * this is easy, just stop here.
1933 */
1934 if (!(buffer->flags & RB_FL_OVERWRITE))
1935 goto out_reset;
1936
1937 ret = rb_handle_head_page(cpu_buffer,
1938 tail_page,
1939 next_page);
1940 if (ret < 0)
1941 goto out_reset;
1942 if (ret)
1943 goto out_again;
1944 } else {
1945 /*
1946 * We need to be careful here too. The
1947 * commit page could still be on the reader
1948 * page. We could have a small buffer, and
1949 * have filled up the buffer with events
1950 * from interrupts and such, and wrapped.
1951 *
1952 * Note, if the tail page is also the on the
1953 * reader_page, we let it move out.
1954 */
1955 if (unlikely((cpu_buffer->commit_page !=
1956 cpu_buffer->tail_page) &&
1957 (cpu_buffer->commit_page ==
1958 cpu_buffer->reader_page))) {
1959 local_inc(&cpu_buffer->commit_overrun);
1960 goto out_reset;
1961 }
aa20ae84
SR
1962 }
1963 }
1964
77ae365e
SR
1965 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1966 if (ret) {
1967 /*
1968 * Nested commits always have zero deltas, so
1969 * just reread the time stamp
1970 */
e8bc43e8
SR
1971 ts = rb_time_stamp(buffer);
1972 next_page->page->time_stamp = ts;
aa20ae84
SR
1973 }
1974
77ae365e 1975 out_again:
aa20ae84 1976
77ae365e 1977 rb_reset_tail(cpu_buffer, tail_page, tail, length);
aa20ae84
SR
1978
1979 /* fail and let the caller try again */
1980 return ERR_PTR(-EAGAIN);
1981
45141d46 1982 out_reset:
6f3b3440 1983 /* reset write */
c7b09308 1984 rb_reset_tail(cpu_buffer, tail_page, tail, length);
6f3b3440 1985
bf41a158 1986 return NULL;
7a8e76a3
SR
1987}
1988
6634ff26
SR
1989static struct ring_buffer_event *
1990__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
69d1b839
SR
1991 unsigned long length, u64 ts,
1992 u64 delta, int add_timestamp)
6634ff26 1993{
5a50e33c 1994 struct buffer_page *tail_page;
6634ff26
SR
1995 struct ring_buffer_event *event;
1996 unsigned long tail, write;
1997
69d1b839
SR
1998 /*
1999 * If the time delta since the last event is too big to
2000 * hold in the time field of the event, then we append a
2001 * TIME EXTEND event ahead of the data event.
2002 */
2003 if (unlikely(add_timestamp))
2004 length += RB_LEN_TIME_EXTEND;
2005
6634ff26
SR
2006 tail_page = cpu_buffer->tail_page;
2007 write = local_add_return(length, &tail_page->write);
77ae365e
SR
2008
2009 /* set write to only the index of the write */
2010 write &= RB_WRITE_MASK;
6634ff26
SR
2011 tail = write - length;
2012
2013 /* See if we shot pass the end of this buffer page */
747e94ae 2014 if (unlikely(write > BUF_PAGE_SIZE))
6634ff26 2015 return rb_move_tail(cpu_buffer, length, tail,
5a50e33c 2016 tail_page, ts);
6634ff26
SR
2017
2018 /* We reserved something on the buffer */
2019
6634ff26 2020 event = __rb_page_index(tail_page, tail);
1744a21d 2021 kmemcheck_annotate_bitfield(event, bitfield);
69d1b839 2022 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
6634ff26 2023
69d1b839 2024 local_inc(&tail_page->entries);
6634ff26
SR
2025
2026 /*
fa743953
SR
2027 * If this is the first commit on the page, then update
2028 * its timestamp.
6634ff26 2029 */
fa743953 2030 if (!tail)
e8bc43e8 2031 tail_page->page->time_stamp = ts;
6634ff26
SR
2032
2033 return event;
2034}
2035
edd813bf
SR
2036static inline int
2037rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2038 struct ring_buffer_event *event)
2039{
2040 unsigned long new_index, old_index;
2041 struct buffer_page *bpage;
2042 unsigned long index;
2043 unsigned long addr;
2044
2045 new_index = rb_event_index(event);
69d1b839 2046 old_index = new_index + rb_event_ts_length(event);
edd813bf
SR
2047 addr = (unsigned long)event;
2048 addr &= PAGE_MASK;
2049
2050 bpage = cpu_buffer->tail_page;
2051
2052 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77ae365e
SR
2053 unsigned long write_mask =
2054 local_read(&bpage->write) & ~RB_WRITE_MASK;
edd813bf
SR
2055 /*
2056 * This is on the tail page. It is possible that
2057 * a write could come in and move the tail page
2058 * and write to the next page. That is fine
2059 * because we just shorten what is on this page.
2060 */
77ae365e
SR
2061 old_index += write_mask;
2062 new_index += write_mask;
edd813bf
SR
2063 index = local_cmpxchg(&bpage->write, old_index, new_index);
2064 if (index == old_index)
2065 return 1;
2066 }
2067
2068 /* could not discard */
2069 return 0;
2070}
2071
fa743953
SR
2072static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2073{
2074 local_inc(&cpu_buffer->committing);
2075 local_inc(&cpu_buffer->commits);
2076}
2077
d9abde21 2078static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
fa743953
SR
2079{
2080 unsigned long commits;
2081
2082 if (RB_WARN_ON(cpu_buffer,
2083 !local_read(&cpu_buffer->committing)))
2084 return;
2085
2086 again:
2087 commits = local_read(&cpu_buffer->commits);
2088 /* synchronize with interrupts */
2089 barrier();
2090 if (local_read(&cpu_buffer->committing) == 1)
2091 rb_set_commit_to_write(cpu_buffer);
2092
2093 local_dec(&cpu_buffer->committing);
2094
2095 /* synchronize with interrupts */
2096 barrier();
2097
2098 /*
2099 * Need to account for interrupts coming in between the
2100 * updating of the commit page and the clearing of the
2101 * committing counter.
2102 */
2103 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2104 !local_read(&cpu_buffer->committing)) {
2105 local_inc(&cpu_buffer->committing);
2106 goto again;
2107 }
2108}
2109
7a8e76a3 2110static struct ring_buffer_event *
62f0b3eb
SR
2111rb_reserve_next_event(struct ring_buffer *buffer,
2112 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 2113 unsigned long length)
7a8e76a3
SR
2114{
2115 struct ring_buffer_event *event;
69d1b839 2116 u64 ts, delta;
818e3dd3 2117 int nr_loops = 0;
69d1b839 2118 int add_timestamp;
140ff891 2119 u64 diff;
7a8e76a3 2120
fa743953
SR
2121 rb_start_commit(cpu_buffer);
2122
85bac32c 2123#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
2124 /*
2125 * Due to the ability to swap a cpu buffer from a buffer
2126 * it is possible it was swapped before we committed.
2127 * (committing stops a swap). We check for it here and
2128 * if it happened, we have to fail the write.
2129 */
2130 barrier();
2131 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2132 local_dec(&cpu_buffer->committing);
2133 local_dec(&cpu_buffer->commits);
2134 return NULL;
2135 }
85bac32c 2136#endif
62f0b3eb 2137
be957c44 2138 length = rb_calculate_event_length(length);
bf41a158 2139 again:
69d1b839
SR
2140 add_timestamp = 0;
2141 delta = 0;
2142
818e3dd3
SR
2143 /*
2144 * We allow for interrupts to reenter here and do a trace.
2145 * If one does, it will cause this original code to loop
2146 * back here. Even with heavy interrupts happening, this
2147 * should only happen a few times in a row. If this happens
2148 * 1000 times in a row, there must be either an interrupt
2149 * storm or we have something buggy.
2150 * Bail!
2151 */
3e89c7bb 2152 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 2153 goto out_fail;
818e3dd3 2154
6d3f1e12 2155 ts = rb_time_stamp(cpu_buffer->buffer);
140ff891 2156 diff = ts - cpu_buffer->write_stamp;
7a8e76a3 2157
140ff891
SR
2158 /* make sure this diff is calculated here */
2159 barrier();
bf41a158 2160
140ff891
SR
2161 /* Did the write stamp get updated already? */
2162 if (likely(ts >= cpu_buffer->write_stamp)) {
168b6b1d
SR
2163 delta = diff;
2164 if (unlikely(test_time_stamp(delta))) {
69d1b839
SR
2165 WARN_ONCE(delta > (1ULL << 59),
2166 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
2167 (unsigned long long)delta,
2168 (unsigned long long)ts,
2169 (unsigned long long)cpu_buffer->write_stamp);
2170 add_timestamp = 1;
7a8e76a3 2171 }
168b6b1d 2172 }
7a8e76a3 2173
69d1b839
SR
2174 event = __rb_reserve_next(cpu_buffer, length, ts,
2175 delta, add_timestamp);
168b6b1d 2176 if (unlikely(PTR_ERR(event) == -EAGAIN))
bf41a158
SR
2177 goto again;
2178
fa743953
SR
2179 if (!event)
2180 goto out_fail;
7a8e76a3 2181
7a8e76a3 2182 return event;
fa743953
SR
2183
2184 out_fail:
2185 rb_end_commit(cpu_buffer);
2186 return NULL;
7a8e76a3
SR
2187}
2188
1155de47
PM
2189#ifdef CONFIG_TRACING
2190
aa18efb2 2191#define TRACE_RECURSIVE_DEPTH 16
261842b7 2192
d9abde21
SR
2193/* Keep this code out of the fast path cache */
2194static noinline void trace_recursive_fail(void)
261842b7 2195{
aa18efb2
SR
2196 /* Disable all tracing before we do anything else */
2197 tracing_off_permanent();
261842b7 2198
7d7d2b80 2199 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
aa18efb2
SR
2200 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2201 current->trace_recursion,
2202 hardirq_count() >> HARDIRQ_SHIFT,
2203 softirq_count() >> SOFTIRQ_SHIFT,
2204 in_nmi());
261842b7 2205
aa18efb2 2206 WARN_ON_ONCE(1);
d9abde21
SR
2207}
2208
2209static inline int trace_recursive_lock(void)
2210{
2211 current->trace_recursion++;
2212
2213 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2214 return 0;
2215
2216 trace_recursive_fail();
2217
aa18efb2 2218 return -1;
261842b7
SR
2219}
2220
d9abde21 2221static inline void trace_recursive_unlock(void)
261842b7 2222{
aa18efb2 2223 WARN_ON_ONCE(!current->trace_recursion);
261842b7 2224
aa18efb2 2225 current->trace_recursion--;
261842b7
SR
2226}
2227
1155de47
PM
2228#else
2229
2230#define trace_recursive_lock() (0)
2231#define trace_recursive_unlock() do { } while (0)
2232
2233#endif
2234
7a8e76a3
SR
2235/**
2236 * ring_buffer_lock_reserve - reserve a part of the buffer
2237 * @buffer: the ring buffer to reserve from
2238 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
2239 *
2240 * Returns a reseverd event on the ring buffer to copy directly to.
2241 * The user of this interface will need to get the body to write into
2242 * and can use the ring_buffer_event_data() interface.
2243 *
2244 * The length is the length of the data needed, not the event length
2245 * which also includes the event header.
2246 *
2247 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2248 * If NULL is returned, then nothing has been allocated or locked.
2249 */
2250struct ring_buffer_event *
0a987751 2251ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
2252{
2253 struct ring_buffer_per_cpu *cpu_buffer;
2254 struct ring_buffer_event *event;
5168ae50 2255 int cpu;
7a8e76a3 2256
033601a3 2257 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2258 return NULL;
2259
bf41a158 2260 /* If we are tracing schedule, we don't want to recurse */
5168ae50 2261 preempt_disable_notrace();
bf41a158 2262
52fbe9cd
LJ
2263 if (atomic_read(&buffer->record_disabled))
2264 goto out_nocheck;
2265
261842b7
SR
2266 if (trace_recursive_lock())
2267 goto out_nocheck;
2268
7a8e76a3
SR
2269 cpu = raw_smp_processor_id();
2270
9e01c1b7 2271 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2272 goto out;
7a8e76a3
SR
2273
2274 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2275
2276 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 2277 goto out;
7a8e76a3 2278
be957c44 2279 if (length > BUF_MAX_DATA_SIZE)
bf41a158 2280 goto out;
7a8e76a3 2281
62f0b3eb 2282 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 2283 if (!event)
d769041f 2284 goto out;
7a8e76a3
SR
2285
2286 return event;
2287
d769041f 2288 out:
261842b7
SR
2289 trace_recursive_unlock();
2290
2291 out_nocheck:
5168ae50 2292 preempt_enable_notrace();
7a8e76a3
SR
2293 return NULL;
2294}
c4f50183 2295EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 2296
a1863c21
SR
2297static void
2298rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
7a8e76a3
SR
2299 struct ring_buffer_event *event)
2300{
69d1b839
SR
2301 u64 delta;
2302
fa743953
SR
2303 /*
2304 * The event first in the commit queue updates the
2305 * time stamp.
2306 */
69d1b839
SR
2307 if (rb_event_is_commit(cpu_buffer, event)) {
2308 /*
2309 * A commit event that is first on a page
2310 * updates the write timestamp with the page stamp
2311 */
2312 if (!rb_event_index(event))
2313 cpu_buffer->write_stamp =
2314 cpu_buffer->commit_page->page->time_stamp;
2315 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2316 delta = event->array[0];
2317 delta <<= TS_SHIFT;
2318 delta += event->time_delta;
2319 cpu_buffer->write_stamp += delta;
2320 } else
2321 cpu_buffer->write_stamp += event->time_delta;
2322 }
a1863c21 2323}
bf41a158 2324
a1863c21
SR
2325static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2326 struct ring_buffer_event *event)
2327{
2328 local_inc(&cpu_buffer->entries);
2329 rb_update_write_stamp(cpu_buffer, event);
fa743953 2330 rb_end_commit(cpu_buffer);
7a8e76a3
SR
2331}
2332
2333/**
2334 * ring_buffer_unlock_commit - commit a reserved
2335 * @buffer: The buffer to commit to
2336 * @event: The event pointer to commit.
7a8e76a3
SR
2337 *
2338 * This commits the data to the ring buffer, and releases any locks held.
2339 *
2340 * Must be paired with ring_buffer_lock_reserve.
2341 */
2342int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 2343 struct ring_buffer_event *event)
7a8e76a3
SR
2344{
2345 struct ring_buffer_per_cpu *cpu_buffer;
2346 int cpu = raw_smp_processor_id();
2347
2348 cpu_buffer = buffer->buffers[cpu];
2349
7a8e76a3
SR
2350 rb_commit(cpu_buffer, event);
2351
261842b7
SR
2352 trace_recursive_unlock();
2353
5168ae50 2354 preempt_enable_notrace();
7a8e76a3
SR
2355
2356 return 0;
2357}
c4f50183 2358EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 2359
f3b9aae1
FW
2360static inline void rb_event_discard(struct ring_buffer_event *event)
2361{
69d1b839
SR
2362 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2363 event = skip_time_extend(event);
2364
334d4169
LJ
2365 /* array[0] holds the actual length for the discarded event */
2366 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2367 event->type_len = RINGBUF_TYPE_PADDING;
f3b9aae1
FW
2368 /* time delta must be non zero */
2369 if (!event->time_delta)
2370 event->time_delta = 1;
2371}
2372
a1863c21
SR
2373/*
2374 * Decrement the entries to the page that an event is on.
2375 * The event does not even need to exist, only the pointer
2376 * to the page it is on. This may only be called before the commit
2377 * takes place.
2378 */
2379static inline void
2380rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2381 struct ring_buffer_event *event)
2382{
2383 unsigned long addr = (unsigned long)event;
2384 struct buffer_page *bpage = cpu_buffer->commit_page;
2385 struct buffer_page *start;
2386
2387 addr &= PAGE_MASK;
2388
2389 /* Do the likely case first */
2390 if (likely(bpage->page == (void *)addr)) {
2391 local_dec(&bpage->entries);
2392 return;
2393 }
2394
2395 /*
2396 * Because the commit page may be on the reader page we
2397 * start with the next page and check the end loop there.
2398 */
2399 rb_inc_page(cpu_buffer, &bpage);
2400 start = bpage;
2401 do {
2402 if (bpage->page == (void *)addr) {
2403 local_dec(&bpage->entries);
2404 return;
2405 }
2406 rb_inc_page(cpu_buffer, &bpage);
2407 } while (bpage != start);
2408
2409 /* commit not part of this buffer?? */
2410 RB_WARN_ON(cpu_buffer, 1);
2411}
2412
fa1b47dd
SR
2413/**
2414 * ring_buffer_commit_discard - discard an event that has not been committed
2415 * @buffer: the ring buffer
2416 * @event: non committed event to discard
2417 *
dc892f73
SR
2418 * Sometimes an event that is in the ring buffer needs to be ignored.
2419 * This function lets the user discard an event in the ring buffer
2420 * and then that event will not be read later.
2421 *
2422 * This function only works if it is called before the the item has been
2423 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
2424 * if another event has not been added behind it.
2425 *
2426 * If another event has been added behind it, it will set the event
2427 * up as discarded, and perform the commit.
2428 *
2429 * If this function is called, do not call ring_buffer_unlock_commit on
2430 * the event.
2431 */
2432void ring_buffer_discard_commit(struct ring_buffer *buffer,
2433 struct ring_buffer_event *event)
2434{
2435 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
2436 int cpu;
2437
2438 /* The event is discarded regardless */
f3b9aae1 2439 rb_event_discard(event);
fa1b47dd 2440
fa743953
SR
2441 cpu = smp_processor_id();
2442 cpu_buffer = buffer->buffers[cpu];
2443
fa1b47dd
SR
2444 /*
2445 * This must only be called if the event has not been
2446 * committed yet. Thus we can assume that preemption
2447 * is still disabled.
2448 */
fa743953 2449 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 2450
a1863c21 2451 rb_decrement_entry(cpu_buffer, event);
0f2541d2 2452 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 2453 goto out;
fa1b47dd
SR
2454
2455 /*
2456 * The commit is still visible by the reader, so we
a1863c21 2457 * must still update the timestamp.
fa1b47dd 2458 */
a1863c21 2459 rb_update_write_stamp(cpu_buffer, event);
fa1b47dd 2460 out:
fa743953 2461 rb_end_commit(cpu_buffer);
fa1b47dd 2462
f3b9aae1
FW
2463 trace_recursive_unlock();
2464
5168ae50 2465 preempt_enable_notrace();
fa1b47dd
SR
2466
2467}
2468EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2469
7a8e76a3
SR
2470/**
2471 * ring_buffer_write - write data to the buffer without reserving
2472 * @buffer: The ring buffer to write to.
2473 * @length: The length of the data being written (excluding the event header)
2474 * @data: The data to write to the buffer.
2475 *
2476 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2477 * one function. If you already have the data to write to the buffer, it
2478 * may be easier to simply call this function.
2479 *
2480 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2481 * and not the length of the event which would hold the header.
2482 */
2483int ring_buffer_write(struct ring_buffer *buffer,
2484 unsigned long length,
2485 void *data)
2486{
2487 struct ring_buffer_per_cpu *cpu_buffer;
2488 struct ring_buffer_event *event;
7a8e76a3
SR
2489 void *body;
2490 int ret = -EBUSY;
5168ae50 2491 int cpu;
7a8e76a3 2492
033601a3 2493 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2494 return -EBUSY;
2495
5168ae50 2496 preempt_disable_notrace();
bf41a158 2497
52fbe9cd
LJ
2498 if (atomic_read(&buffer->record_disabled))
2499 goto out;
2500
7a8e76a3
SR
2501 cpu = raw_smp_processor_id();
2502
9e01c1b7 2503 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2504 goto out;
7a8e76a3
SR
2505
2506 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2507
2508 if (atomic_read(&cpu_buffer->record_disabled))
2509 goto out;
2510
be957c44
SR
2511 if (length > BUF_MAX_DATA_SIZE)
2512 goto out;
2513
62f0b3eb 2514 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3
SR
2515 if (!event)
2516 goto out;
2517
2518 body = rb_event_data(event);
2519
2520 memcpy(body, data, length);
2521
2522 rb_commit(cpu_buffer, event);
2523
2524 ret = 0;
2525 out:
5168ae50 2526 preempt_enable_notrace();
7a8e76a3
SR
2527
2528 return ret;
2529}
c4f50183 2530EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 2531
34a148bf 2532static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
2533{
2534 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 2535 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
2536 struct buffer_page *commit = cpu_buffer->commit_page;
2537
77ae365e
SR
2538 /* In case of error, head will be NULL */
2539 if (unlikely(!head))
2540 return 1;
2541
bf41a158
SR
2542 return reader->read == rb_page_commit(reader) &&
2543 (commit == reader ||
2544 (commit == head &&
2545 head->read == rb_page_commit(commit)));
2546}
2547
7a8e76a3
SR
2548/**
2549 * ring_buffer_record_disable - stop all writes into the buffer
2550 * @buffer: The ring buffer to stop writes to.
2551 *
2552 * This prevents all writes to the buffer. Any attempt to write
2553 * to the buffer after this will fail and return NULL.
2554 *
2555 * The caller should call synchronize_sched() after this.
2556 */
2557void ring_buffer_record_disable(struct ring_buffer *buffer)
2558{
2559 atomic_inc(&buffer->record_disabled);
2560}
c4f50183 2561EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
2562
2563/**
2564 * ring_buffer_record_enable - enable writes to the buffer
2565 * @buffer: The ring buffer to enable writes
2566 *
2567 * Note, multiple disables will need the same number of enables
c41b20e7 2568 * to truly enable the writing (much like preempt_disable).
7a8e76a3
SR
2569 */
2570void ring_buffer_record_enable(struct ring_buffer *buffer)
2571{
2572 atomic_dec(&buffer->record_disabled);
2573}
c4f50183 2574EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3
SR
2575
2576/**
2577 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2578 * @buffer: The ring buffer to stop writes to.
2579 * @cpu: The CPU buffer to stop
2580 *
2581 * This prevents all writes to the buffer. Any attempt to write
2582 * to the buffer after this will fail and return NULL.
2583 *
2584 * The caller should call synchronize_sched() after this.
2585 */
2586void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2587{
2588 struct ring_buffer_per_cpu *cpu_buffer;
2589
9e01c1b7 2590 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2591 return;
7a8e76a3
SR
2592
2593 cpu_buffer = buffer->buffers[cpu];
2594 atomic_inc(&cpu_buffer->record_disabled);
2595}
c4f50183 2596EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
2597
2598/**
2599 * ring_buffer_record_enable_cpu - enable writes to the buffer
2600 * @buffer: The ring buffer to enable writes
2601 * @cpu: The CPU to enable.
2602 *
2603 * Note, multiple disables will need the same number of enables
c41b20e7 2604 * to truly enable the writing (much like preempt_disable).
7a8e76a3
SR
2605 */
2606void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2607{
2608 struct ring_buffer_per_cpu *cpu_buffer;
2609
9e01c1b7 2610 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2611 return;
7a8e76a3
SR
2612
2613 cpu_buffer = buffer->buffers[cpu];
2614 atomic_dec(&cpu_buffer->record_disabled);
2615}
c4f50183 2616EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3 2617
f6195aa0
SR
2618/*
2619 * The total entries in the ring buffer is the running counter
2620 * of entries entered into the ring buffer, minus the sum of
2621 * the entries read from the ring buffer and the number of
2622 * entries that were overwritten.
2623 */
2624static inline unsigned long
2625rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2626{
2627 return local_read(&cpu_buffer->entries) -
2628 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2629}
2630
7a8e76a3
SR
2631/**
2632 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2633 * @buffer: The ring buffer
2634 * @cpu: The per CPU buffer to get the entries from.
2635 */
2636unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2637{
2638 struct ring_buffer_per_cpu *cpu_buffer;
2639
9e01c1b7 2640 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2641 return 0;
7a8e76a3
SR
2642
2643 cpu_buffer = buffer->buffers[cpu];
554f786e 2644
f6195aa0 2645 return rb_num_of_entries(cpu_buffer);
7a8e76a3 2646}
c4f50183 2647EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
2648
2649/**
2650 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2651 * @buffer: The ring buffer
2652 * @cpu: The per CPU buffer to get the number of overruns from
2653 */
2654unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2655{
2656 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 2657 unsigned long ret;
7a8e76a3 2658
9e01c1b7 2659 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 2660 return 0;
7a8e76a3
SR
2661
2662 cpu_buffer = buffer->buffers[cpu];
77ae365e 2663 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
2664
2665 return ret;
7a8e76a3 2666}
c4f50183 2667EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 2668
f0d2c681
SR
2669/**
2670 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2671 * @buffer: The ring buffer
2672 * @cpu: The per CPU buffer to get the number of overruns from
2673 */
2674unsigned long
2675ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2676{
2677 struct ring_buffer_per_cpu *cpu_buffer;
2678 unsigned long ret;
2679
2680 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2681 return 0;
2682
2683 cpu_buffer = buffer->buffers[cpu];
77ae365e 2684 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
2685
2686 return ret;
2687}
2688EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2689
7a8e76a3
SR
2690/**
2691 * ring_buffer_entries - get the number of entries in a buffer
2692 * @buffer: The ring buffer
2693 *
2694 * Returns the total number of entries in the ring buffer
2695 * (all CPU entries)
2696 */
2697unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2698{
2699 struct ring_buffer_per_cpu *cpu_buffer;
2700 unsigned long entries = 0;
2701 int cpu;
2702
2703 /* if you care about this being correct, lock the buffer */
2704 for_each_buffer_cpu(buffer, cpu) {
2705 cpu_buffer = buffer->buffers[cpu];
f6195aa0 2706 entries += rb_num_of_entries(cpu_buffer);
7a8e76a3
SR
2707 }
2708
2709 return entries;
2710}
c4f50183 2711EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
2712
2713/**
67b394f7 2714 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
2715 * @buffer: The ring buffer
2716 *
2717 * Returns the total number of overruns in the ring buffer
2718 * (all CPU entries)
2719 */
2720unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2721{
2722 struct ring_buffer_per_cpu *cpu_buffer;
2723 unsigned long overruns = 0;
2724 int cpu;
2725
2726 /* if you care about this being correct, lock the buffer */
2727 for_each_buffer_cpu(buffer, cpu) {
2728 cpu_buffer = buffer->buffers[cpu];
77ae365e 2729 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
2730 }
2731
2732 return overruns;
2733}
c4f50183 2734EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 2735
642edba5 2736static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
2737{
2738 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2739
d769041f
SR
2740 /* Iterator usage is expected to have record disabled */
2741 if (list_empty(&cpu_buffer->reader_page->list)) {
77ae365e
SR
2742 iter->head_page = rb_set_head_page(cpu_buffer);
2743 if (unlikely(!iter->head_page))
2744 return;
2745 iter->head = iter->head_page->read;
d769041f
SR
2746 } else {
2747 iter->head_page = cpu_buffer->reader_page;
6f807acd 2748 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
2749 }
2750 if (iter->head)
2751 iter->read_stamp = cpu_buffer->read_stamp;
2752 else
abc9b56d 2753 iter->read_stamp = iter->head_page->page->time_stamp;
492a74f4
SR
2754 iter->cache_reader_page = cpu_buffer->reader_page;
2755 iter->cache_read = cpu_buffer->read;
642edba5 2756}
f83c9d0f 2757
642edba5
SR
2758/**
2759 * ring_buffer_iter_reset - reset an iterator
2760 * @iter: The iterator to reset
2761 *
2762 * Resets the iterator, so that it will start from the beginning
2763 * again.
2764 */
2765void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2766{
554f786e 2767 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
2768 unsigned long flags;
2769
554f786e
SR
2770 if (!iter)
2771 return;
2772
2773 cpu_buffer = iter->cpu_buffer;
2774
642edba5
SR
2775 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2776 rb_iter_reset(iter);
f83c9d0f 2777 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 2778}
c4f50183 2779EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
2780
2781/**
2782 * ring_buffer_iter_empty - check if an iterator has no more to read
2783 * @iter: The iterator to check
2784 */
2785int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2786{
2787 struct ring_buffer_per_cpu *cpu_buffer;
2788
2789 cpu_buffer = iter->cpu_buffer;
2790
bf41a158
SR
2791 return iter->head_page == cpu_buffer->commit_page &&
2792 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 2793}
c4f50183 2794EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
2795
2796static void
2797rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2798 struct ring_buffer_event *event)
2799{
2800 u64 delta;
2801
334d4169 2802 switch (event->type_len) {
7a8e76a3
SR
2803 case RINGBUF_TYPE_PADDING:
2804 return;
2805
2806 case RINGBUF_TYPE_TIME_EXTEND:
2807 delta = event->array[0];
2808 delta <<= TS_SHIFT;
2809 delta += event->time_delta;
2810 cpu_buffer->read_stamp += delta;
2811 return;
2812
2813 case RINGBUF_TYPE_TIME_STAMP:
2814 /* FIXME: not implemented */
2815 return;
2816
2817 case RINGBUF_TYPE_DATA:
2818 cpu_buffer->read_stamp += event->time_delta;
2819 return;
2820
2821 default:
2822 BUG();
2823 }
2824 return;
2825}
2826
2827static void
2828rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2829 struct ring_buffer_event *event)
2830{
2831 u64 delta;
2832
334d4169 2833 switch (event->type_len) {
7a8e76a3
SR
2834 case RINGBUF_TYPE_PADDING:
2835 return;
2836
2837 case RINGBUF_TYPE_TIME_EXTEND:
2838 delta = event->array[0];
2839 delta <<= TS_SHIFT;
2840 delta += event->time_delta;
2841 iter->read_stamp += delta;
2842 return;
2843
2844 case RINGBUF_TYPE_TIME_STAMP:
2845 /* FIXME: not implemented */
2846 return;
2847
2848 case RINGBUF_TYPE_DATA:
2849 iter->read_stamp += event->time_delta;
2850 return;
2851
2852 default:
2853 BUG();
2854 }
2855 return;
2856}
2857
d769041f
SR
2858static struct buffer_page *
2859rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2860{
d769041f 2861 struct buffer_page *reader = NULL;
66a8cb95 2862 unsigned long overwrite;
d769041f 2863 unsigned long flags;
818e3dd3 2864 int nr_loops = 0;
77ae365e 2865 int ret;
d769041f 2866
3e03fb7f 2867 local_irq_save(flags);
0199c4e6 2868 arch_spin_lock(&cpu_buffer->lock);
d769041f
SR
2869
2870 again:
818e3dd3
SR
2871 /*
2872 * This should normally only loop twice. But because the
2873 * start of the reader inserts an empty page, it causes
2874 * a case where we will loop three times. There should be no
2875 * reason to loop four times (that I know of).
2876 */
3e89c7bb 2877 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
2878 reader = NULL;
2879 goto out;
2880 }
2881
d769041f
SR
2882 reader = cpu_buffer->reader_page;
2883
2884 /* If there's more to read, return this page */
bf41a158 2885 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
2886 goto out;
2887
2888 /* Never should we have an index greater than the size */
3e89c7bb
SR
2889 if (RB_WARN_ON(cpu_buffer,
2890 cpu_buffer->reader_page->read > rb_page_size(reader)))
2891 goto out;
d769041f
SR
2892
2893 /* check if we caught up to the tail */
2894 reader = NULL;
bf41a158 2895 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 2896 goto out;
7a8e76a3
SR
2897
2898 /*
d769041f 2899 * Reset the reader page to size zero.
7a8e76a3 2900 */
77ae365e
SR
2901 local_set(&cpu_buffer->reader_page->write, 0);
2902 local_set(&cpu_buffer->reader_page->entries, 0);
2903 local_set(&cpu_buffer->reader_page->page->commit, 0);
ff0ff84a 2904 cpu_buffer->reader_page->real_end = 0;
7a8e76a3 2905
77ae365e
SR
2906 spin:
2907 /*
2908 * Splice the empty reader page into the list around the head.
2909 */
2910 reader = rb_set_head_page(cpu_buffer);
0e1ff5d7 2911 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
d769041f 2912 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 2913
3adc54fa
SR
2914 /*
2915 * cpu_buffer->pages just needs to point to the buffer, it
2916 * has no specific buffer page to point to. Lets move it out
2917 * of our way so we don't accidently swap it.
2918 */
2919 cpu_buffer->pages = reader->list.prev;
2920
77ae365e
SR
2921 /* The reader page will be pointing to the new head */
2922 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
7a8e76a3 2923
66a8cb95
SR
2924 /*
2925 * We want to make sure we read the overruns after we set up our
2926 * pointers to the next object. The writer side does a
2927 * cmpxchg to cross pages which acts as the mb on the writer
2928 * side. Note, the reader will constantly fail the swap
2929 * while the writer is updating the pointers, so this
2930 * guarantees that the overwrite recorded here is the one we
2931 * want to compare with the last_overrun.
2932 */
2933 smp_mb();
2934 overwrite = local_read(&(cpu_buffer->overrun));
2935
77ae365e
SR
2936 /*
2937 * Here's the tricky part.
2938 *
2939 * We need to move the pointer past the header page.
2940 * But we can only do that if a writer is not currently
2941 * moving it. The page before the header page has the
2942 * flag bit '1' set if it is pointing to the page we want.
2943 * but if the writer is in the process of moving it
2944 * than it will be '2' or already moved '0'.
2945 */
2946
2947 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
2948
2949 /*
77ae365e 2950 * If we did not convert it, then we must try again.
7a8e76a3 2951 */
77ae365e
SR
2952 if (!ret)
2953 goto spin;
7a8e76a3 2954
77ae365e
SR
2955 /*
2956 * Yeah! We succeeded in replacing the page.
2957 *
2958 * Now make the new head point back to the reader page.
2959 */
5ded3dc6 2960 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
77ae365e 2961 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
d769041f
SR
2962
2963 /* Finally update the reader page to the new head */
2964 cpu_buffer->reader_page = reader;
2965 rb_reset_reader_page(cpu_buffer);
2966
66a8cb95
SR
2967 if (overwrite != cpu_buffer->last_overrun) {
2968 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
2969 cpu_buffer->last_overrun = overwrite;
2970 }
2971
d769041f
SR
2972 goto again;
2973
2974 out:
0199c4e6 2975 arch_spin_unlock(&cpu_buffer->lock);
3e03fb7f 2976 local_irq_restore(flags);
d769041f
SR
2977
2978 return reader;
2979}
2980
2981static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2982{
2983 struct ring_buffer_event *event;
2984 struct buffer_page *reader;
2985 unsigned length;
2986
2987 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 2988
d769041f 2989 /* This function should not be called when buffer is empty */
3e89c7bb
SR
2990 if (RB_WARN_ON(cpu_buffer, !reader))
2991 return;
7a8e76a3 2992
d769041f
SR
2993 event = rb_reader_event(cpu_buffer);
2994
a1863c21 2995 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 2996 cpu_buffer->read++;
d769041f
SR
2997
2998 rb_update_read_stamp(cpu_buffer, event);
2999
3000 length = rb_event_length(event);
6f807acd 3001 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
3002}
3003
3004static void rb_advance_iter(struct ring_buffer_iter *iter)
3005{
7a8e76a3
SR
3006 struct ring_buffer_per_cpu *cpu_buffer;
3007 struct ring_buffer_event *event;
3008 unsigned length;
3009
3010 cpu_buffer = iter->cpu_buffer;
7a8e76a3
SR
3011
3012 /*
3013 * Check if we are at the end of the buffer.
3014 */
bf41a158 3015 if (iter->head >= rb_page_size(iter->head_page)) {
ea05b57c
SR
3016 /* discarded commits can make the page empty */
3017 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 3018 return;
d769041f 3019 rb_inc_iter(iter);
7a8e76a3
SR
3020 return;
3021 }
3022
3023 event = rb_iter_head_event(iter);
3024
3025 length = rb_event_length(event);
3026
3027 /*
3028 * This should not be called to advance the header if we are
3029 * at the tail of the buffer.
3030 */
3e89c7bb 3031 if (RB_WARN_ON(cpu_buffer,
f536aafc 3032 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
3033 (iter->head + length > rb_commit_index(cpu_buffer))))
3034 return;
7a8e76a3
SR
3035
3036 rb_update_iter_read_stamp(iter, event);
3037
3038 iter->head += length;
3039
3040 /* check for end of page padding */
bf41a158
SR
3041 if ((iter->head >= rb_page_size(iter->head_page)) &&
3042 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
3043 rb_advance_iter(iter);
3044}
3045
66a8cb95
SR
3046static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3047{
3048 return cpu_buffer->lost_events;
3049}
3050
f83c9d0f 3051static struct ring_buffer_event *
66a8cb95
SR
3052rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3053 unsigned long *lost_events)
7a8e76a3 3054{
7a8e76a3 3055 struct ring_buffer_event *event;
d769041f 3056 struct buffer_page *reader;
818e3dd3 3057 int nr_loops = 0;
7a8e76a3 3058
7a8e76a3 3059 again:
818e3dd3 3060 /*
69d1b839
SR
3061 * We repeat when a time extend is encountered.
3062 * Since the time extend is always attached to a data event,
3063 * we should never loop more than once.
3064 * (We never hit the following condition more than twice).
818e3dd3 3065 */
69d1b839 3066 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
818e3dd3 3067 return NULL;
818e3dd3 3068
d769041f
SR
3069 reader = rb_get_reader_page(cpu_buffer);
3070 if (!reader)
7a8e76a3
SR
3071 return NULL;
3072
d769041f 3073 event = rb_reader_event(cpu_buffer);
7a8e76a3 3074
334d4169 3075 switch (event->type_len) {
7a8e76a3 3076 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3077 if (rb_null_event(event))
3078 RB_WARN_ON(cpu_buffer, 1);
3079 /*
3080 * Because the writer could be discarding every
3081 * event it creates (which would probably be bad)
3082 * if we were to go back to "again" then we may never
3083 * catch up, and will trigger the warn on, or lock
3084 * the box. Return the padding, and we will release
3085 * the current locks, and try again.
3086 */
2d622719 3087 return event;
7a8e76a3
SR
3088
3089 case RINGBUF_TYPE_TIME_EXTEND:
3090 /* Internal data, OK to advance */
d769041f 3091 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3092 goto again;
3093
3094 case RINGBUF_TYPE_TIME_STAMP:
3095 /* FIXME: not implemented */
d769041f 3096 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3097 goto again;
3098
3099 case RINGBUF_TYPE_DATA:
3100 if (ts) {
3101 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 3102 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 3103 cpu_buffer->cpu, ts);
7a8e76a3 3104 }
66a8cb95
SR
3105 if (lost_events)
3106 *lost_events = rb_lost_events(cpu_buffer);
7a8e76a3
SR
3107 return event;
3108
3109 default:
3110 BUG();
3111 }
3112
3113 return NULL;
3114}
c4f50183 3115EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 3116
f83c9d0f
SR
3117static struct ring_buffer_event *
3118rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
3119{
3120 struct ring_buffer *buffer;
3121 struct ring_buffer_per_cpu *cpu_buffer;
3122 struct ring_buffer_event *event;
818e3dd3 3123 int nr_loops = 0;
7a8e76a3 3124
7a8e76a3
SR
3125 cpu_buffer = iter->cpu_buffer;
3126 buffer = cpu_buffer->buffer;
3127
492a74f4
SR
3128 /*
3129 * Check if someone performed a consuming read to
3130 * the buffer. A consuming read invalidates the iterator
3131 * and we need to reset the iterator in this case.
3132 */
3133 if (unlikely(iter->cache_read != cpu_buffer->read ||
3134 iter->cache_reader_page != cpu_buffer->reader_page))
3135 rb_iter_reset(iter);
3136
7a8e76a3 3137 again:
3c05d748
SR
3138 if (ring_buffer_iter_empty(iter))
3139 return NULL;
3140
818e3dd3 3141 /*
69d1b839
SR
3142 * We repeat when a time extend is encountered.
3143 * Since the time extend is always attached to a data event,
3144 * we should never loop more than once.
3145 * (We never hit the following condition more than twice).
818e3dd3 3146 */
69d1b839 3147 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
818e3dd3 3148 return NULL;
818e3dd3 3149
7a8e76a3
SR
3150 if (rb_per_cpu_empty(cpu_buffer))
3151 return NULL;
3152
3c05d748
SR
3153 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3154 rb_inc_iter(iter);
3155 goto again;
3156 }
3157
7a8e76a3
SR
3158 event = rb_iter_head_event(iter);
3159
334d4169 3160 switch (event->type_len) {
7a8e76a3 3161 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3162 if (rb_null_event(event)) {
3163 rb_inc_iter(iter);
3164 goto again;
3165 }
3166 rb_advance_iter(iter);
3167 return event;
7a8e76a3
SR
3168
3169 case RINGBUF_TYPE_TIME_EXTEND:
3170 /* Internal data, OK to advance */
3171 rb_advance_iter(iter);
3172 goto again;
3173
3174 case RINGBUF_TYPE_TIME_STAMP:
3175 /* FIXME: not implemented */
3176 rb_advance_iter(iter);
3177 goto again;
3178
3179 case RINGBUF_TYPE_DATA:
3180 if (ts) {
3181 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
3182 ring_buffer_normalize_time_stamp(buffer,
3183 cpu_buffer->cpu, ts);
7a8e76a3
SR
3184 }
3185 return event;
3186
3187 default:
3188 BUG();
3189 }
3190
3191 return NULL;
3192}
c4f50183 3193EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 3194
8d707e8e
SR
3195static inline int rb_ok_to_lock(void)
3196{
3197 /*
3198 * If an NMI die dumps out the content of the ring buffer
3199 * do not grab locks. We also permanently disable the ring
3200 * buffer too. A one time deal is all you get from reading
3201 * the ring buffer from an NMI.
3202 */
464e85eb 3203 if (likely(!in_nmi()))
8d707e8e
SR
3204 return 1;
3205
3206 tracing_off_permanent();
3207 return 0;
3208}
3209
f83c9d0f
SR
3210/**
3211 * ring_buffer_peek - peek at the next event to be read
3212 * @buffer: The ring buffer to read
3213 * @cpu: The cpu to peak at
3214 * @ts: The timestamp counter of this event.
66a8cb95 3215 * @lost_events: a variable to store if events were lost (may be NULL)
f83c9d0f
SR
3216 *
3217 * This will return the event that will be read next, but does
3218 * not consume the data.
3219 */
3220struct ring_buffer_event *
66a8cb95
SR
3221ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3222 unsigned long *lost_events)
f83c9d0f
SR
3223{
3224 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 3225 struct ring_buffer_event *event;
f83c9d0f 3226 unsigned long flags;
8d707e8e 3227 int dolock;
f83c9d0f 3228
554f786e 3229 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3230 return NULL;
554f786e 3231
8d707e8e 3232 dolock = rb_ok_to_lock();
2d622719 3233 again:
8d707e8e
SR
3234 local_irq_save(flags);
3235 if (dolock)
3236 spin_lock(&cpu_buffer->reader_lock);
66a8cb95 3237 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
469535a5
RR
3238 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3239 rb_advance_reader(cpu_buffer);
8d707e8e
SR
3240 if (dolock)
3241 spin_unlock(&cpu_buffer->reader_lock);
3242 local_irq_restore(flags);
f83c9d0f 3243
1b959e18 3244 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3245 goto again;
2d622719 3246
f83c9d0f
SR
3247 return event;
3248}
3249
3250/**
3251 * ring_buffer_iter_peek - peek at the next event to be read
3252 * @iter: The ring buffer iterator
3253 * @ts: The timestamp counter of this event.
3254 *
3255 * This will return the event that will be read next, but does
3256 * not increment the iterator.
3257 */
3258struct ring_buffer_event *
3259ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3260{
3261 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3262 struct ring_buffer_event *event;
3263 unsigned long flags;
3264
2d622719 3265 again:
f83c9d0f
SR
3266 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3267 event = rb_iter_peek(iter, ts);
3268 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3269
1b959e18 3270 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3271 goto again;
2d622719 3272
f83c9d0f
SR
3273 return event;
3274}
3275
7a8e76a3
SR
3276/**
3277 * ring_buffer_consume - return an event and consume it
3278 * @buffer: The ring buffer to get the next event from
66a8cb95
SR
3279 * @cpu: the cpu to read the buffer from
3280 * @ts: a variable to store the timestamp (may be NULL)
3281 * @lost_events: a variable to store if events were lost (may be NULL)
7a8e76a3
SR
3282 *
3283 * Returns the next event in the ring buffer, and that event is consumed.
3284 * Meaning, that sequential reads will keep returning a different event,
3285 * and eventually empty the ring buffer if the producer is slower.
3286 */
3287struct ring_buffer_event *
66a8cb95
SR
3288ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3289 unsigned long *lost_events)
7a8e76a3 3290{
554f786e
SR
3291 struct ring_buffer_per_cpu *cpu_buffer;
3292 struct ring_buffer_event *event = NULL;
f83c9d0f 3293 unsigned long flags;
8d707e8e
SR
3294 int dolock;
3295
3296 dolock = rb_ok_to_lock();
7a8e76a3 3297
2d622719 3298 again:
554f786e
SR
3299 /* might be called in atomic */
3300 preempt_disable();
3301
9e01c1b7 3302 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 3303 goto out;
7a8e76a3 3304
554f786e 3305 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3306 local_irq_save(flags);
3307 if (dolock)
3308 spin_lock(&cpu_buffer->reader_lock);
f83c9d0f 3309
66a8cb95
SR
3310 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3311 if (event) {
3312 cpu_buffer->lost_events = 0;
469535a5 3313 rb_advance_reader(cpu_buffer);
66a8cb95 3314 }
7a8e76a3 3315
8d707e8e
SR
3316 if (dolock)
3317 spin_unlock(&cpu_buffer->reader_lock);
3318 local_irq_restore(flags);
f83c9d0f 3319
554f786e
SR
3320 out:
3321 preempt_enable();
3322
1b959e18 3323 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3324 goto again;
2d622719 3325
7a8e76a3
SR
3326 return event;
3327}
c4f50183 3328EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
3329
3330/**
72c9ddfd 3331 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
7a8e76a3
SR
3332 * @buffer: The ring buffer to read from
3333 * @cpu: The cpu buffer to iterate over
3334 *
72c9ddfd
DM
3335 * This performs the initial preparations necessary to iterate
3336 * through the buffer. Memory is allocated, buffer recording
3337 * is disabled, and the iterator pointer is returned to the caller.
7a8e76a3 3338 *
72c9ddfd
DM
3339 * Disabling buffer recordng prevents the reading from being
3340 * corrupted. This is not a consuming read, so a producer is not
3341 * expected.
3342 *
3343 * After a sequence of ring_buffer_read_prepare calls, the user is
3344 * expected to make at least one call to ring_buffer_prepare_sync.
3345 * Afterwards, ring_buffer_read_start is invoked to get things going
3346 * for real.
3347 *
3348 * This overall must be paired with ring_buffer_finish.
7a8e76a3
SR
3349 */
3350struct ring_buffer_iter *
72c9ddfd 3351ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
7a8e76a3
SR
3352{
3353 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 3354 struct ring_buffer_iter *iter;
7a8e76a3 3355
9e01c1b7 3356 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3357 return NULL;
7a8e76a3
SR
3358
3359 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3360 if (!iter)
8aabee57 3361 return NULL;
7a8e76a3
SR
3362
3363 cpu_buffer = buffer->buffers[cpu];
3364
3365 iter->cpu_buffer = cpu_buffer;
3366
3367 atomic_inc(&cpu_buffer->record_disabled);
72c9ddfd
DM
3368
3369 return iter;
3370}
3371EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3372
3373/**
3374 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3375 *
3376 * All previously invoked ring_buffer_read_prepare calls to prepare
3377 * iterators will be synchronized. Afterwards, read_buffer_read_start
3378 * calls on those iterators are allowed.
3379 */
3380void
3381ring_buffer_read_prepare_sync(void)
3382{
7a8e76a3 3383 synchronize_sched();
72c9ddfd
DM
3384}
3385EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3386
3387/**
3388 * ring_buffer_read_start - start a non consuming read of the buffer
3389 * @iter: The iterator returned by ring_buffer_read_prepare
3390 *
3391 * This finalizes the startup of an iteration through the buffer.
3392 * The iterator comes from a call to ring_buffer_read_prepare and
3393 * an intervening ring_buffer_read_prepare_sync must have been
3394 * performed.
3395 *
3396 * Must be paired with ring_buffer_finish.
3397 */
3398void
3399ring_buffer_read_start(struct ring_buffer_iter *iter)
3400{
3401 struct ring_buffer_per_cpu *cpu_buffer;
3402 unsigned long flags;
3403
3404 if (!iter)
3405 return;
3406
3407 cpu_buffer = iter->cpu_buffer;
7a8e76a3 3408
f83c9d0f 3409 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
0199c4e6 3410 arch_spin_lock(&cpu_buffer->lock);
642edba5 3411 rb_iter_reset(iter);
0199c4e6 3412 arch_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3413 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 3414}
c4f50183 3415EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
3416
3417/**
3418 * ring_buffer_finish - finish reading the iterator of the buffer
3419 * @iter: The iterator retrieved by ring_buffer_start
3420 *
3421 * This re-enables the recording to the buffer, and frees the
3422 * iterator.
3423 */
3424void
3425ring_buffer_read_finish(struct ring_buffer_iter *iter)
3426{
3427 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3428
3429 atomic_dec(&cpu_buffer->record_disabled);
3430 kfree(iter);
3431}
c4f50183 3432EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
3433
3434/**
3435 * ring_buffer_read - read the next item in the ring buffer by the iterator
3436 * @iter: The ring buffer iterator
3437 * @ts: The time stamp of the event read.
3438 *
3439 * This reads the next event in the ring buffer and increments the iterator.
3440 */
3441struct ring_buffer_event *
3442ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3443{
3444 struct ring_buffer_event *event;
f83c9d0f
SR
3445 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3446 unsigned long flags;
7a8e76a3 3447
f83c9d0f 3448 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 3449 again:
f83c9d0f 3450 event = rb_iter_peek(iter, ts);
7a8e76a3 3451 if (!event)
f83c9d0f 3452 goto out;
7a8e76a3 3453
7e9391cf
SR
3454 if (event->type_len == RINGBUF_TYPE_PADDING)
3455 goto again;
3456
7a8e76a3 3457 rb_advance_iter(iter);
f83c9d0f
SR
3458 out:
3459 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
3460
3461 return event;
3462}
c4f50183 3463EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
3464
3465/**
3466 * ring_buffer_size - return the size of the ring buffer (in bytes)
3467 * @buffer: The ring buffer.
3468 */
3469unsigned long ring_buffer_size(struct ring_buffer *buffer)
3470{
3471 return BUF_PAGE_SIZE * buffer->pages;
3472}
c4f50183 3473EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
3474
3475static void
3476rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3477{
77ae365e
SR
3478 rb_head_page_deactivate(cpu_buffer);
3479
7a8e76a3 3480 cpu_buffer->head_page
3adc54fa 3481 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 3482 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 3483 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 3484 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 3485
6f807acd 3486 cpu_buffer->head_page->read = 0;
bf41a158
SR
3487
3488 cpu_buffer->tail_page = cpu_buffer->head_page;
3489 cpu_buffer->commit_page = cpu_buffer->head_page;
3490
3491 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3492 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 3493 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 3494 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 3495 cpu_buffer->reader_page->read = 0;
7a8e76a3 3496
77ae365e
SR
3497 local_set(&cpu_buffer->commit_overrun, 0);
3498 local_set(&cpu_buffer->overrun, 0);
e4906eff 3499 local_set(&cpu_buffer->entries, 0);
fa743953
SR
3500 local_set(&cpu_buffer->committing, 0);
3501 local_set(&cpu_buffer->commits, 0);
77ae365e 3502 cpu_buffer->read = 0;
69507c06
SR
3503
3504 cpu_buffer->write_stamp = 0;
3505 cpu_buffer->read_stamp = 0;
77ae365e 3506
66a8cb95
SR
3507 cpu_buffer->lost_events = 0;
3508 cpu_buffer->last_overrun = 0;
3509
77ae365e 3510 rb_head_page_activate(cpu_buffer);
7a8e76a3
SR
3511}
3512
3513/**
3514 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3515 * @buffer: The ring buffer to reset a per cpu buffer of
3516 * @cpu: The CPU buffer to be reset
3517 */
3518void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3519{
3520 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3521 unsigned long flags;
3522
9e01c1b7 3523 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3524 return;
7a8e76a3 3525
41ede23e
SR
3526 atomic_inc(&cpu_buffer->record_disabled);
3527
f83c9d0f
SR
3528 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3529
41b6a95d
SR
3530 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3531 goto out;
3532
0199c4e6 3533 arch_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
3534
3535 rb_reset_cpu(cpu_buffer);
3536
0199c4e6 3537 arch_spin_unlock(&cpu_buffer->lock);
f83c9d0f 3538
41b6a95d 3539 out:
f83c9d0f 3540 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
41ede23e
SR
3541
3542 atomic_dec(&cpu_buffer->record_disabled);
7a8e76a3 3543}
c4f50183 3544EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
3545
3546/**
3547 * ring_buffer_reset - reset a ring buffer
3548 * @buffer: The ring buffer to reset all cpu buffers
3549 */
3550void ring_buffer_reset(struct ring_buffer *buffer)
3551{
7a8e76a3
SR
3552 int cpu;
3553
7a8e76a3 3554 for_each_buffer_cpu(buffer, cpu)
d769041f 3555 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 3556}
c4f50183 3557EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
3558
3559/**
3560 * rind_buffer_empty - is the ring buffer empty?
3561 * @buffer: The ring buffer to test
3562 */
3563int ring_buffer_empty(struct ring_buffer *buffer)
3564{
3565 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3566 unsigned long flags;
8d707e8e 3567 int dolock;
7a8e76a3 3568 int cpu;
d4788207 3569 int ret;
7a8e76a3 3570
8d707e8e 3571 dolock = rb_ok_to_lock();
7a8e76a3
SR
3572
3573 /* yes this is racy, but if you don't like the race, lock the buffer */
3574 for_each_buffer_cpu(buffer, cpu) {
3575 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3576 local_irq_save(flags);
3577 if (dolock)
3578 spin_lock(&cpu_buffer->reader_lock);
d4788207 3579 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3580 if (dolock)
3581 spin_unlock(&cpu_buffer->reader_lock);
3582 local_irq_restore(flags);
3583
d4788207 3584 if (!ret)
7a8e76a3
SR
3585 return 0;
3586 }
554f786e 3587
7a8e76a3
SR
3588 return 1;
3589}
c4f50183 3590EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
3591
3592/**
3593 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3594 * @buffer: The ring buffer
3595 * @cpu: The CPU buffer to test
3596 */
3597int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3598{
3599 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 3600 unsigned long flags;
8d707e8e 3601 int dolock;
8aabee57 3602 int ret;
7a8e76a3 3603
9e01c1b7 3604 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3605 return 1;
7a8e76a3 3606
8d707e8e
SR
3607 dolock = rb_ok_to_lock();
3608
7a8e76a3 3609 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3610 local_irq_save(flags);
3611 if (dolock)
3612 spin_lock(&cpu_buffer->reader_lock);
554f786e 3613 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e
SR
3614 if (dolock)
3615 spin_unlock(&cpu_buffer->reader_lock);
3616 local_irq_restore(flags);
554f786e
SR
3617
3618 return ret;
7a8e76a3 3619}
c4f50183 3620EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 3621
85bac32c 3622#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
3623/**
3624 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3625 * @buffer_a: One buffer to swap with
3626 * @buffer_b: The other buffer to swap with
3627 *
3628 * This function is useful for tracers that want to take a "snapshot"
3629 * of a CPU buffer and has another back up buffer lying around.
3630 * it is expected that the tracer handles the cpu buffer not being
3631 * used at the moment.
3632 */
3633int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3634 struct ring_buffer *buffer_b, int cpu)
3635{
3636 struct ring_buffer_per_cpu *cpu_buffer_a;
3637 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
3638 int ret = -EINVAL;
3639
9e01c1b7
RR
3640 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3641 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 3642 goto out;
7a8e76a3
SR
3643
3644 /* At least make sure the two buffers are somewhat the same */
6d102bc6 3645 if (buffer_a->pages != buffer_b->pages)
554f786e
SR
3646 goto out;
3647
3648 ret = -EAGAIN;
7a8e76a3 3649
97b17efe 3650 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 3651 goto out;
97b17efe
SR
3652
3653 if (atomic_read(&buffer_a->record_disabled))
554f786e 3654 goto out;
97b17efe
SR
3655
3656 if (atomic_read(&buffer_b->record_disabled))
554f786e 3657 goto out;
97b17efe 3658
7a8e76a3
SR
3659 cpu_buffer_a = buffer_a->buffers[cpu];
3660 cpu_buffer_b = buffer_b->buffers[cpu];
3661
97b17efe 3662 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 3663 goto out;
97b17efe
SR
3664
3665 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 3666 goto out;
97b17efe 3667
7a8e76a3
SR
3668 /*
3669 * We can't do a synchronize_sched here because this
3670 * function can be called in atomic context.
3671 * Normally this will be called from the same CPU as cpu.
3672 * If not it's up to the caller to protect this.
3673 */
3674 atomic_inc(&cpu_buffer_a->record_disabled);
3675 atomic_inc(&cpu_buffer_b->record_disabled);
3676
98277991
SR
3677 ret = -EBUSY;
3678 if (local_read(&cpu_buffer_a->committing))
3679 goto out_dec;
3680 if (local_read(&cpu_buffer_b->committing))
3681 goto out_dec;
3682
7a8e76a3
SR
3683 buffer_a->buffers[cpu] = cpu_buffer_b;
3684 buffer_b->buffers[cpu] = cpu_buffer_a;
3685
3686 cpu_buffer_b->buffer = buffer_a;
3687 cpu_buffer_a->buffer = buffer_b;
3688
98277991
SR
3689 ret = 0;
3690
3691out_dec:
7a8e76a3
SR
3692 atomic_dec(&cpu_buffer_a->record_disabled);
3693 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 3694out:
554f786e 3695 return ret;
7a8e76a3 3696}
c4f50183 3697EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 3698#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 3699
8789a9e7
SR
3700/**
3701 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3702 * @buffer: the buffer to allocate for.
3703 *
3704 * This function is used in conjunction with ring_buffer_read_page.
3705 * When reading a full page from the ring buffer, these functions
3706 * can be used to speed up the process. The calling function should
3707 * allocate a few pages first with this function. Then when it
3708 * needs to get pages from the ring buffer, it passes the result
3709 * of this function into ring_buffer_read_page, which will swap
3710 * the page that was allocated, with the read page of the buffer.
3711 *
3712 * Returns:
3713 * The page allocated, or NULL on error.
3714 */
3715void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3716{
044fa782 3717 struct buffer_data_page *bpage;
ef7a4a16 3718 unsigned long addr;
8789a9e7
SR
3719
3720 addr = __get_free_page(GFP_KERNEL);
3721 if (!addr)
3722 return NULL;
3723
044fa782 3724 bpage = (void *)addr;
8789a9e7 3725
ef7a4a16
SR
3726 rb_init_page(bpage);
3727
044fa782 3728 return bpage;
8789a9e7 3729}
d6ce96da 3730EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
3731
3732/**
3733 * ring_buffer_free_read_page - free an allocated read page
3734 * @buffer: the buffer the page was allocate for
3735 * @data: the page to free
3736 *
3737 * Free a page allocated from ring_buffer_alloc_read_page.
3738 */
3739void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3740{
3741 free_page((unsigned long)data);
3742}
d6ce96da 3743EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
3744
3745/**
3746 * ring_buffer_read_page - extract a page from the ring buffer
3747 * @buffer: buffer to extract from
3748 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 3749 * @len: amount to extract
8789a9e7
SR
3750 * @cpu: the cpu of the buffer to extract
3751 * @full: should the extraction only happen when the page is full.
3752 *
3753 * This function will pull out a page from the ring buffer and consume it.
3754 * @data_page must be the address of the variable that was returned
3755 * from ring_buffer_alloc_read_page. This is because the page might be used
3756 * to swap with a page in the ring buffer.
3757 *
3758 * for example:
b85fa01e 3759 * rpage = ring_buffer_alloc_read_page(buffer);
8789a9e7
SR
3760 * if (!rpage)
3761 * return error;
ef7a4a16 3762 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
3763 * if (ret >= 0)
3764 * process_page(rpage, ret);
8789a9e7
SR
3765 *
3766 * When @full is set, the function will not return true unless
3767 * the writer is off the reader page.
3768 *
3769 * Note: it is up to the calling functions to handle sleeps and wakeups.
3770 * The ring buffer can be used anywhere in the kernel and can not
3771 * blindly call wake_up. The layer that uses the ring buffer must be
3772 * responsible for that.
3773 *
3774 * Returns:
667d2412
LJ
3775 * >=0 if data has been transferred, returns the offset of consumed data.
3776 * <0 if no data has been transferred.
8789a9e7
SR
3777 */
3778int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 3779 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
3780{
3781 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3782 struct ring_buffer_event *event;
044fa782 3783 struct buffer_data_page *bpage;
ef7a4a16 3784 struct buffer_page *reader;
ff0ff84a 3785 unsigned long missed_events;
8789a9e7 3786 unsigned long flags;
ef7a4a16 3787 unsigned int commit;
667d2412 3788 unsigned int read;
4f3640f8 3789 u64 save_timestamp;
667d2412 3790 int ret = -1;
8789a9e7 3791
554f786e
SR
3792 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3793 goto out;
3794
474d32b6
SR
3795 /*
3796 * If len is not big enough to hold the page header, then
3797 * we can not copy anything.
3798 */
3799 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 3800 goto out;
474d32b6
SR
3801
3802 len -= BUF_PAGE_HDR_SIZE;
3803
8789a9e7 3804 if (!data_page)
554f786e 3805 goto out;
8789a9e7 3806
044fa782
SR
3807 bpage = *data_page;
3808 if (!bpage)
554f786e 3809 goto out;
8789a9e7
SR
3810
3811 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3812
ef7a4a16
SR
3813 reader = rb_get_reader_page(cpu_buffer);
3814 if (!reader)
554f786e 3815 goto out_unlock;
8789a9e7 3816
ef7a4a16
SR
3817 event = rb_reader_event(cpu_buffer);
3818
3819 read = reader->read;
3820 commit = rb_page_commit(reader);
667d2412 3821
66a8cb95 3822 /* Check if any events were dropped */
ff0ff84a 3823 missed_events = cpu_buffer->lost_events;
66a8cb95 3824
8789a9e7 3825 /*
474d32b6
SR
3826 * If this page has been partially read or
3827 * if len is not big enough to read the rest of the page or
3828 * a writer is still on the page, then
3829 * we must copy the data from the page to the buffer.
3830 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 3831 */
474d32b6 3832 if (read || (len < (commit - read)) ||
ef7a4a16 3833 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 3834 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
3835 unsigned int rpos = read;
3836 unsigned int pos = 0;
ef7a4a16 3837 unsigned int size;
8789a9e7
SR
3838
3839 if (full)
554f786e 3840 goto out_unlock;
8789a9e7 3841
ef7a4a16
SR
3842 if (len > (commit - read))
3843 len = (commit - read);
3844
69d1b839
SR
3845 /* Always keep the time extend and data together */
3846 size = rb_event_ts_length(event);
ef7a4a16
SR
3847
3848 if (len < size)
554f786e 3849 goto out_unlock;
ef7a4a16 3850
4f3640f8
SR
3851 /* save the current timestamp, since the user will need it */
3852 save_timestamp = cpu_buffer->read_stamp;
3853
ef7a4a16
SR
3854 /* Need to copy one event at a time */
3855 do {
474d32b6 3856 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
3857
3858 len -= size;
3859
3860 rb_advance_reader(cpu_buffer);
474d32b6
SR
3861 rpos = reader->read;
3862 pos += size;
ef7a4a16 3863
18fab912
HY
3864 if (rpos >= commit)
3865 break;
3866
ef7a4a16 3867 event = rb_reader_event(cpu_buffer);
69d1b839
SR
3868 /* Always keep the time extend and data together */
3869 size = rb_event_ts_length(event);
ef7a4a16 3870 } while (len > size);
667d2412
LJ
3871
3872 /* update bpage */
ef7a4a16 3873 local_set(&bpage->commit, pos);
4f3640f8 3874 bpage->time_stamp = save_timestamp;
ef7a4a16 3875
474d32b6
SR
3876 /* we copied everything to the beginning */
3877 read = 0;
8789a9e7 3878 } else {
afbab76a 3879 /* update the entry counter */
77ae365e 3880 cpu_buffer->read += rb_page_entries(reader);
afbab76a 3881
8789a9e7 3882 /* swap the pages */
044fa782 3883 rb_init_page(bpage);
ef7a4a16
SR
3884 bpage = reader->page;
3885 reader->page = *data_page;
3886 local_set(&reader->write, 0);
778c55d4 3887 local_set(&reader->entries, 0);
ef7a4a16 3888 reader->read = 0;
044fa782 3889 *data_page = bpage;
ff0ff84a
SR
3890
3891 /*
3892 * Use the real_end for the data size,
3893 * This gives us a chance to store the lost events
3894 * on the page.
3895 */
3896 if (reader->real_end)
3897 local_set(&bpage->commit, reader->real_end);
8789a9e7 3898 }
667d2412 3899 ret = read;
8789a9e7 3900
66a8cb95 3901 cpu_buffer->lost_events = 0;
2711ca23
SR
3902
3903 commit = local_read(&bpage->commit);
66a8cb95
SR
3904 /*
3905 * Set a flag in the commit field if we lost events
3906 */
ff0ff84a 3907 if (missed_events) {
ff0ff84a
SR
3908 /* If there is room at the end of the page to save the
3909 * missed events, then record it there.
3910 */
3911 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3912 memcpy(&bpage->data[commit], &missed_events,
3913 sizeof(missed_events));
3914 local_add(RB_MISSED_STORED, &bpage->commit);
2711ca23 3915 commit += sizeof(missed_events);
ff0ff84a 3916 }
66a8cb95 3917 local_add(RB_MISSED_EVENTS, &bpage->commit);
ff0ff84a 3918 }
66a8cb95 3919
2711ca23
SR
3920 /*
3921 * This page may be off to user land. Zero it out here.
3922 */
3923 if (commit < BUF_PAGE_SIZE)
3924 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3925
554f786e 3926 out_unlock:
8789a9e7
SR
3927 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3928
554f786e 3929 out:
8789a9e7
SR
3930 return ret;
3931}
d6ce96da 3932EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 3933
1155de47 3934#ifdef CONFIG_TRACING
a3583244
SR
3935static ssize_t
3936rb_simple_read(struct file *filp, char __user *ubuf,
3937 size_t cnt, loff_t *ppos)
3938{
5e39841c 3939 unsigned long *p = filp->private_data;
a3583244
SR
3940 char buf[64];
3941 int r;
3942
033601a3
SR
3943 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3944 r = sprintf(buf, "permanently disabled\n");
3945 else
3946 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
a3583244
SR
3947
3948 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3949}
3950
3951static ssize_t
3952rb_simple_write(struct file *filp, const char __user *ubuf,
3953 size_t cnt, loff_t *ppos)
3954{
5e39841c 3955 unsigned long *p = filp->private_data;
a3583244 3956 char buf[64];
5e39841c 3957 unsigned long val;
a3583244
SR
3958 int ret;
3959
3960 if (cnt >= sizeof(buf))
3961 return -EINVAL;
3962
3963 if (copy_from_user(&buf, ubuf, cnt))
3964 return -EFAULT;
3965
3966 buf[cnt] = 0;
3967
3968 ret = strict_strtoul(buf, 10, &val);
3969 if (ret < 0)
3970 return ret;
3971
033601a3
SR
3972 if (val)
3973 set_bit(RB_BUFFERS_ON_BIT, p);
3974 else
3975 clear_bit(RB_BUFFERS_ON_BIT, p);
a3583244
SR
3976
3977 (*ppos)++;
3978
3979 return cnt;
3980}
3981
5e2336a0 3982static const struct file_operations rb_simple_fops = {
a3583244
SR
3983 .open = tracing_open_generic,
3984 .read = rb_simple_read,
3985 .write = rb_simple_write,
6038f373 3986 .llseek = default_llseek,
a3583244
SR
3987};
3988
3989
3990static __init int rb_init_debugfs(void)
3991{
3992 struct dentry *d_tracer;
a3583244
SR
3993
3994 d_tracer = tracing_init_dentry();
3995
5452af66
FW
3996 trace_create_file("tracing_on", 0644, d_tracer,
3997 &ring_buffer_flags, &rb_simple_fops);
a3583244
SR
3998
3999 return 0;
4000}
4001
4002fs_initcall(rb_init_debugfs);
1155de47 4003#endif
554f786e 4004
59222efe 4005#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
4006static int rb_cpu_notify(struct notifier_block *self,
4007 unsigned long action, void *hcpu)
554f786e
SR
4008{
4009 struct ring_buffer *buffer =
4010 container_of(self, struct ring_buffer, cpu_notify);
4011 long cpu = (long)hcpu;
4012
4013 switch (action) {
4014 case CPU_UP_PREPARE:
4015 case CPU_UP_PREPARE_FROZEN:
3f237a79 4016 if (cpumask_test_cpu(cpu, buffer->cpumask))
554f786e
SR
4017 return NOTIFY_OK;
4018
4019 buffer->buffers[cpu] =
4020 rb_allocate_cpu_buffer(buffer, cpu);
4021 if (!buffer->buffers[cpu]) {
4022 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4023 cpu);
4024 return NOTIFY_OK;
4025 }
4026 smp_wmb();
3f237a79 4027 cpumask_set_cpu(cpu, buffer->cpumask);
554f786e
SR
4028 break;
4029 case CPU_DOWN_PREPARE:
4030 case CPU_DOWN_PREPARE_FROZEN:
4031 /*
4032 * Do nothing.
4033 * If we were to free the buffer, then the user would
4034 * lose any trace that was in the buffer.
4035 */
4036 break;
4037 default:
4038 break;
4039 }
4040 return NOTIFY_OK;
4041}
4042#endif