]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/kmemleak.c
Linux 2.6.31-rc1
[net-next-2.6.git] / mm / kmemleak.c
CommitLineData
3c7b4e6b
CM
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed
52 * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
53 * file together with modifications to the memory scanning parameters
54 * including the scan_thread pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
ae281064
JP
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
3c7b4e6b
CM
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h>
77#include <linux/debugfs.h>
78#include <linux/seq_file.h>
79#include <linux/cpumask.h>
80#include <linux/spinlock.h>
81#include <linux/mutex.h>
82#include <linux/rcupdate.h>
83#include <linux/stacktrace.h>
84#include <linux/cache.h>
85#include <linux/percpu.h>
86#include <linux/hardirq.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95
96#include <asm/sections.h>
97#include <asm/processor.h>
98#include <asm/atomic.h>
99
100#include <linux/kmemleak.h>
101
102/*
103 * Kmemleak configuration and common defines.
104 */
105#define MAX_TRACE 16 /* stack trace length */
106#define REPORTS_NR 50 /* maximum number of reported leaks */
107#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111
112#define BYTES_PER_POINTER sizeof(void *)
113
216c04b0
CM
114/* GFP bitmask for kmemleak internal allocations */
115#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
116
3c7b4e6b
CM
117/* scanning area inside a memory block */
118struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long offset;
121 size_t length;
122};
123
124/*
125 * Structure holding the metadata for each allocated memory block.
126 * Modifications to such objects should be made while holding the
127 * object->lock. Insertions or deletions from object_list, gray_list or
128 * tree_node are already protected by the corresponding locks or mutex (see
129 * the notes on locking above). These objects are reference-counted
130 * (use_count) and freed using the RCU mechanism.
131 */
132struct kmemleak_object {
133 spinlock_t lock;
134 unsigned long flags; /* object status flags */
135 struct list_head object_list;
136 struct list_head gray_list;
137 struct prio_tree_node tree_node;
138 struct rcu_head rcu; /* object_list lockless traversal */
139 /* object usage count; object freed when use_count == 0 */
140 atomic_t use_count;
141 unsigned long pointer;
142 size_t size;
143 /* minimum number of a pointers found before it is considered leak */
144 int min_count;
145 /* the total number of pointers found pointing to this object */
146 int count;
147 /* memory ranges to be scanned inside an object (empty for all) */
148 struct hlist_head area_list;
149 unsigned long trace[MAX_TRACE];
150 unsigned int trace_len;
151 unsigned long jiffies; /* creation timestamp */
152 pid_t pid; /* pid of the current task */
153 char comm[TASK_COMM_LEN]; /* executable name */
154};
155
156/* flag representing the memory block allocation status */
157#define OBJECT_ALLOCATED (1 << 0)
158/* flag set after the first reporting of an unreference object */
159#define OBJECT_REPORTED (1 << 1)
160/* flag set to not scan the object */
161#define OBJECT_NO_SCAN (1 << 2)
162
163/* the list of all allocated objects */
164static LIST_HEAD(object_list);
165/* the list of gray-colored objects (see color_gray comment below) */
166static LIST_HEAD(gray_list);
167/* prio search tree for object boundaries */
168static struct prio_tree_root object_tree_root;
169/* rw_lock protecting the access to object_list and prio_tree_root */
170static DEFINE_RWLOCK(kmemleak_lock);
171
172/* allocation caches for kmemleak internal data */
173static struct kmem_cache *object_cache;
174static struct kmem_cache *scan_area_cache;
175
176/* set if tracing memory operations is enabled */
177static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
178/* set in the late_initcall if there were no errors */
179static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
180/* enables or disables early logging of the memory operations */
181static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
182/* set if a fata kmemleak error has occurred */
183static atomic_t kmemleak_error = ATOMIC_INIT(0);
184
185/* minimum and maximum address that may be valid pointers */
186static unsigned long min_addr = ULONG_MAX;
187static unsigned long max_addr;
188
189/* used for yielding the CPU to other tasks during scanning */
190static unsigned long next_scan_yield;
191static struct task_struct *scan_thread;
192static unsigned long jiffies_scan_yield;
193static unsigned long jiffies_min_age;
194/* delay between automatic memory scannings */
195static signed long jiffies_scan_wait;
196/* enables or disables the task stacks scanning */
197static int kmemleak_stack_scan;
198/* mutex protecting the memory scanning */
199static DEFINE_MUTEX(scan_mutex);
200/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
201static DEFINE_MUTEX(kmemleak_mutex);
202
203/* number of leaks reported (for limitation purposes) */
204static int reported_leaks;
205
206/*
2030117d 207 * Early object allocation/freeing logging. Kmemleak is initialized after the
3c7b4e6b 208 * kernel allocator. However, both the kernel allocator and kmemleak may
2030117d 209 * allocate memory blocks which need to be tracked. Kmemleak defines an
3c7b4e6b
CM
210 * arbitrary buffer to hold the allocation/freeing information before it is
211 * fully initialized.
212 */
213
214/* kmemleak operation type for early logging */
215enum {
216 KMEMLEAK_ALLOC,
217 KMEMLEAK_FREE,
218 KMEMLEAK_NOT_LEAK,
219 KMEMLEAK_IGNORE,
220 KMEMLEAK_SCAN_AREA,
221 KMEMLEAK_NO_SCAN
222};
223
224/*
225 * Structure holding the information passed to kmemleak callbacks during the
226 * early logging.
227 */
228struct early_log {
229 int op_type; /* kmemleak operation type */
230 const void *ptr; /* allocated/freed memory block */
231 size_t size; /* memory block size */
232 int min_count; /* minimum reference count */
233 unsigned long offset; /* scan area offset */
234 size_t length; /* scan area length */
235};
236
237/* early logging buffer and current position */
238static struct early_log early_log[200];
239static int crt_early_log;
240
241static void kmemleak_disable(void);
242
243/*
244 * Print a warning and dump the stack trace.
245 */
246#define kmemleak_warn(x...) do { \
247 pr_warning(x); \
248 dump_stack(); \
249} while (0)
250
251/*
252 * Macro invoked when a serious kmemleak condition occured and cannot be
2030117d 253 * recovered from. Kmemleak will be disabled and further allocation/freeing
3c7b4e6b
CM
254 * tracing no longer available.
255 */
000814f4 256#define kmemleak_stop(x...) do { \
3c7b4e6b
CM
257 kmemleak_warn(x); \
258 kmemleak_disable(); \
259} while (0)
260
261/*
262 * Object colors, encoded with count and min_count:
263 * - white - orphan object, not enough references to it (count < min_count)
264 * - gray - not orphan, not marked as false positive (min_count == 0) or
265 * sufficient references to it (count >= min_count)
266 * - black - ignore, it doesn't contain references (e.g. text section)
267 * (min_count == -1). No function defined for this color.
268 * Newly created objects don't have any color assigned (object->count == -1)
269 * before the next memory scan when they become white.
270 */
271static int color_white(const struct kmemleak_object *object)
272{
273 return object->count != -1 && object->count < object->min_count;
274}
275
276static int color_gray(const struct kmemleak_object *object)
277{
278 return object->min_count != -1 && object->count >= object->min_count;
279}
280
281/*
282 * Objects are considered referenced if their color is gray and they have not
283 * been deleted.
284 */
285static int referenced_object(struct kmemleak_object *object)
286{
287 return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
288}
289
290/*
291 * Objects are considered unreferenced only if their color is white, they have
292 * not be deleted and have a minimum age to avoid false positives caused by
293 * pointers temporarily stored in CPU registers.
294 */
295static int unreferenced_object(struct kmemleak_object *object)
296{
297 return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
298 time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
299}
300
301/*
302 * Printing of the (un)referenced objects information, either to the seq file
303 * or to the kernel log. The print_referenced/print_unreferenced functions
304 * must be called with the object->lock held.
305 */
306#define print_helper(seq, x...) do { \
307 struct seq_file *s = (seq); \
308 if (s) \
309 seq_printf(s, x); \
310 else \
311 pr_info(x); \
312} while (0)
313
314static void print_referenced(struct kmemleak_object *object)
315{
ae281064 316 pr_info("referenced object 0x%08lx (size %zu)\n",
3c7b4e6b
CM
317 object->pointer, object->size);
318}
319
320static void print_unreferenced(struct seq_file *seq,
321 struct kmemleak_object *object)
322{
323 int i;
324
ae281064 325 print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
3c7b4e6b
CM
326 object->pointer, object->size);
327 print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
328 object->comm, object->pid, object->jiffies);
329 print_helper(seq, " backtrace:\n");
330
331 for (i = 0; i < object->trace_len; i++) {
332 void *ptr = (void *)object->trace[i];
333 print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
334 }
335}
336
337/*
338 * Print the kmemleak_object information. This function is used mainly for
339 * debugging special cases when kmemleak operations. It must be called with
340 * the object->lock held.
341 */
342static void dump_object_info(struct kmemleak_object *object)
343{
344 struct stack_trace trace;
345
346 trace.nr_entries = object->trace_len;
347 trace.entries = object->trace;
348
ae281064 349 pr_notice("Object 0x%08lx (size %zu):\n",
3c7b4e6b
CM
350 object->tree_node.start, object->size);
351 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
352 object->comm, object->pid, object->jiffies);
353 pr_notice(" min_count = %d\n", object->min_count);
354 pr_notice(" count = %d\n", object->count);
355 pr_notice(" backtrace:\n");
356 print_stack_trace(&trace, 4);
357}
358
359/*
360 * Look-up a memory block metadata (kmemleak_object) in the priority search
361 * tree based on a pointer value. If alias is 0, only values pointing to the
362 * beginning of the memory block are allowed. The kmemleak_lock must be held
363 * when calling this function.
364 */
365static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
366{
367 struct prio_tree_node *node;
368 struct prio_tree_iter iter;
369 struct kmemleak_object *object;
370
371 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
372 node = prio_tree_next(&iter);
373 if (node) {
374 object = prio_tree_entry(node, struct kmemleak_object,
375 tree_node);
376 if (!alias && object->pointer != ptr) {
ae281064 377 kmemleak_warn("Found object by alias");
3c7b4e6b
CM
378 object = NULL;
379 }
380 } else
381 object = NULL;
382
383 return object;
384}
385
386/*
387 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
388 * that once an object's use_count reached 0, the RCU freeing was already
389 * registered and the object should no longer be used. This function must be
390 * called under the protection of rcu_read_lock().
391 */
392static int get_object(struct kmemleak_object *object)
393{
394 return atomic_inc_not_zero(&object->use_count);
395}
396
397/*
398 * RCU callback to free a kmemleak_object.
399 */
400static void free_object_rcu(struct rcu_head *rcu)
401{
402 struct hlist_node *elem, *tmp;
403 struct kmemleak_scan_area *area;
404 struct kmemleak_object *object =
405 container_of(rcu, struct kmemleak_object, rcu);
406
407 /*
408 * Once use_count is 0 (guaranteed by put_object), there is no other
409 * code accessing this object, hence no need for locking.
410 */
411 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
412 hlist_del(elem);
413 kmem_cache_free(scan_area_cache, area);
414 }
415 kmem_cache_free(object_cache, object);
416}
417
418/*
419 * Decrement the object use_count. Once the count is 0, free the object using
420 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
421 * delete_object() path, the delayed RCU freeing ensures that there is no
422 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
423 * is also possible.
424 */
425static void put_object(struct kmemleak_object *object)
426{
427 if (!atomic_dec_and_test(&object->use_count))
428 return;
429
430 /* should only get here after delete_object was called */
431 WARN_ON(object->flags & OBJECT_ALLOCATED);
432
433 call_rcu(&object->rcu, free_object_rcu);
434}
435
436/*
437 * Look up an object in the prio search tree and increase its use_count.
438 */
439static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
440{
441 unsigned long flags;
442 struct kmemleak_object *object = NULL;
443
444 rcu_read_lock();
445 read_lock_irqsave(&kmemleak_lock, flags);
446 if (ptr >= min_addr && ptr < max_addr)
447 object = lookup_object(ptr, alias);
448 read_unlock_irqrestore(&kmemleak_lock, flags);
449
450 /* check whether the object is still available */
451 if (object && !get_object(object))
452 object = NULL;
453 rcu_read_unlock();
454
455 return object;
456}
457
458/*
459 * Create the metadata (struct kmemleak_object) corresponding to an allocated
460 * memory block and add it to the object_list and object_tree_root.
461 */
462static void create_object(unsigned long ptr, size_t size, int min_count,
463 gfp_t gfp)
464{
465 unsigned long flags;
466 struct kmemleak_object *object;
467 struct prio_tree_node *node;
468 struct stack_trace trace;
469
216c04b0 470 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
3c7b4e6b 471 if (!object) {
ae281064 472 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
3c7b4e6b
CM
473 return;
474 }
475
476 INIT_LIST_HEAD(&object->object_list);
477 INIT_LIST_HEAD(&object->gray_list);
478 INIT_HLIST_HEAD(&object->area_list);
479 spin_lock_init(&object->lock);
480 atomic_set(&object->use_count, 1);
481 object->flags = OBJECT_ALLOCATED;
482 object->pointer = ptr;
483 object->size = size;
484 object->min_count = min_count;
485 object->count = -1; /* no color initially */
486 object->jiffies = jiffies;
487
488 /* task information */
489 if (in_irq()) {
490 object->pid = 0;
491 strncpy(object->comm, "hardirq", sizeof(object->comm));
492 } else if (in_softirq()) {
493 object->pid = 0;
494 strncpy(object->comm, "softirq", sizeof(object->comm));
495 } else {
496 object->pid = current->pid;
497 /*
498 * There is a small chance of a race with set_task_comm(),
499 * however using get_task_comm() here may cause locking
500 * dependency issues with current->alloc_lock. In the worst
501 * case, the command line is not correct.
502 */
503 strncpy(object->comm, current->comm, sizeof(object->comm));
504 }
505
506 /* kernel backtrace */
507 trace.max_entries = MAX_TRACE;
508 trace.nr_entries = 0;
509 trace.entries = object->trace;
510 trace.skip = 1;
511 save_stack_trace(&trace);
512 object->trace_len = trace.nr_entries;
513
514 INIT_PRIO_TREE_NODE(&object->tree_node);
515 object->tree_node.start = ptr;
516 object->tree_node.last = ptr + size - 1;
517
518 write_lock_irqsave(&kmemleak_lock, flags);
519 min_addr = min(min_addr, ptr);
520 max_addr = max(max_addr, ptr + size);
521 node = prio_tree_insert(&object_tree_root, &object->tree_node);
522 /*
523 * The code calling the kernel does not yet have the pointer to the
524 * memory block to be able to free it. However, we still hold the
525 * kmemleak_lock here in case parts of the kernel started freeing
526 * random memory blocks.
527 */
528 if (node != &object->tree_node) {
529 unsigned long flags;
530
ae281064
JP
531 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
532 "(already existing)\n", ptr);
3c7b4e6b
CM
533 object = lookup_object(ptr, 1);
534 spin_lock_irqsave(&object->lock, flags);
535 dump_object_info(object);
536 spin_unlock_irqrestore(&object->lock, flags);
537
538 goto out;
539 }
540 list_add_tail_rcu(&object->object_list, &object_list);
541out:
542 write_unlock_irqrestore(&kmemleak_lock, flags);
543}
544
545/*
546 * Remove the metadata (struct kmemleak_object) for a memory block from the
547 * object_list and object_tree_root and decrement its use_count.
548 */
549static void delete_object(unsigned long ptr)
550{
551 unsigned long flags;
552 struct kmemleak_object *object;
553
554 write_lock_irqsave(&kmemleak_lock, flags);
555 object = lookup_object(ptr, 0);
556 if (!object) {
ae281064 557 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
3c7b4e6b
CM
558 ptr);
559 write_unlock_irqrestore(&kmemleak_lock, flags);
560 return;
561 }
562 prio_tree_remove(&object_tree_root, &object->tree_node);
563 list_del_rcu(&object->object_list);
564 write_unlock_irqrestore(&kmemleak_lock, flags);
565
566 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
567 WARN_ON(atomic_read(&object->use_count) < 1);
568
569 /*
570 * Locking here also ensures that the corresponding memory block
571 * cannot be freed when it is being scanned.
572 */
573 spin_lock_irqsave(&object->lock, flags);
574 if (object->flags & OBJECT_REPORTED)
575 print_referenced(object);
576 object->flags &= ~OBJECT_ALLOCATED;
577 spin_unlock_irqrestore(&object->lock, flags);
578 put_object(object);
579}
580
581/*
582 * Make a object permanently as gray-colored so that it can no longer be
583 * reported as a leak. This is used in general to mark a false positive.
584 */
585static void make_gray_object(unsigned long ptr)
586{
587 unsigned long flags;
588 struct kmemleak_object *object;
589
590 object = find_and_get_object(ptr, 0);
591 if (!object) {
ae281064 592 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
593 return;
594 }
595
596 spin_lock_irqsave(&object->lock, flags);
597 object->min_count = 0;
598 spin_unlock_irqrestore(&object->lock, flags);
599 put_object(object);
600}
601
602/*
603 * Mark the object as black-colored so that it is ignored from scans and
604 * reporting.
605 */
606static void make_black_object(unsigned long ptr)
607{
608 unsigned long flags;
609 struct kmemleak_object *object;
610
611 object = find_and_get_object(ptr, 0);
612 if (!object) {
ae281064 613 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
614 return;
615 }
616
617 spin_lock_irqsave(&object->lock, flags);
618 object->min_count = -1;
619 spin_unlock_irqrestore(&object->lock, flags);
620 put_object(object);
621}
622
623/*
624 * Add a scanning area to the object. If at least one such area is added,
625 * kmemleak will only scan these ranges rather than the whole memory block.
626 */
627static void add_scan_area(unsigned long ptr, unsigned long offset,
628 size_t length, gfp_t gfp)
629{
630 unsigned long flags;
631 struct kmemleak_object *object;
632 struct kmemleak_scan_area *area;
633
634 object = find_and_get_object(ptr, 0);
635 if (!object) {
ae281064
JP
636 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
637 ptr);
3c7b4e6b
CM
638 return;
639 }
640
216c04b0 641 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
3c7b4e6b 642 if (!area) {
ae281064 643 kmemleak_warn("Cannot allocate a scan area\n");
3c7b4e6b
CM
644 goto out;
645 }
646
647 spin_lock_irqsave(&object->lock, flags);
648 if (offset + length > object->size) {
ae281064 649 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
3c7b4e6b
CM
650 dump_object_info(object);
651 kmem_cache_free(scan_area_cache, area);
652 goto out_unlock;
653 }
654
655 INIT_HLIST_NODE(&area->node);
656 area->offset = offset;
657 area->length = length;
658
659 hlist_add_head(&area->node, &object->area_list);
660out_unlock:
661 spin_unlock_irqrestore(&object->lock, flags);
662out:
663 put_object(object);
664}
665
666/*
667 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
668 * pointer. Such object will not be scanned by kmemleak but references to it
669 * are searched.
670 */
671static void object_no_scan(unsigned long ptr)
672{
673 unsigned long flags;
674 struct kmemleak_object *object;
675
676 object = find_and_get_object(ptr, 0);
677 if (!object) {
ae281064 678 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
679 return;
680 }
681
682 spin_lock_irqsave(&object->lock, flags);
683 object->flags |= OBJECT_NO_SCAN;
684 spin_unlock_irqrestore(&object->lock, flags);
685 put_object(object);
686}
687
688/*
689 * Log an early kmemleak_* call to the early_log buffer. These calls will be
690 * processed later once kmemleak is fully initialized.
691 */
692static void log_early(int op_type, const void *ptr, size_t size,
693 int min_count, unsigned long offset, size_t length)
694{
695 unsigned long flags;
696 struct early_log *log;
697
698 if (crt_early_log >= ARRAY_SIZE(early_log)) {
ae281064 699 kmemleak_stop("Early log buffer exceeded\n");
3c7b4e6b
CM
700 return;
701 }
702
703 /*
704 * There is no need for locking since the kernel is still in UP mode
705 * at this stage. Disabling the IRQs is enough.
706 */
707 local_irq_save(flags);
708 log = &early_log[crt_early_log];
709 log->op_type = op_type;
710 log->ptr = ptr;
711 log->size = size;
712 log->min_count = min_count;
713 log->offset = offset;
714 log->length = length;
715 crt_early_log++;
716 local_irq_restore(flags);
717}
718
719/*
720 * Memory allocation function callback. This function is called from the
721 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
722 * vmalloc etc.).
723 */
724void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
725{
726 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
727
728 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
729 create_object((unsigned long)ptr, size, min_count, gfp);
730 else if (atomic_read(&kmemleak_early_log))
731 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
732}
733EXPORT_SYMBOL_GPL(kmemleak_alloc);
734
735/*
736 * Memory freeing function callback. This function is called from the kernel
737 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
738 */
739void kmemleak_free(const void *ptr)
740{
741 pr_debug("%s(0x%p)\n", __func__, ptr);
742
743 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
744 delete_object((unsigned long)ptr);
745 else if (atomic_read(&kmemleak_early_log))
746 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
747}
748EXPORT_SYMBOL_GPL(kmemleak_free);
749
750/*
751 * Mark an already allocated memory block as a false positive. This will cause
752 * the block to no longer be reported as leak and always be scanned.
753 */
754void kmemleak_not_leak(const void *ptr)
755{
756 pr_debug("%s(0x%p)\n", __func__, ptr);
757
758 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
759 make_gray_object((unsigned long)ptr);
760 else if (atomic_read(&kmemleak_early_log))
761 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
762}
763EXPORT_SYMBOL(kmemleak_not_leak);
764
765/*
766 * Ignore a memory block. This is usually done when it is known that the
767 * corresponding block is not a leak and does not contain any references to
768 * other allocated memory blocks.
769 */
770void kmemleak_ignore(const void *ptr)
771{
772 pr_debug("%s(0x%p)\n", __func__, ptr);
773
774 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
775 make_black_object((unsigned long)ptr);
776 else if (atomic_read(&kmemleak_early_log))
777 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
778}
779EXPORT_SYMBOL(kmemleak_ignore);
780
781/*
782 * Limit the range to be scanned in an allocated memory block.
783 */
784void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
785 gfp_t gfp)
786{
787 pr_debug("%s(0x%p)\n", __func__, ptr);
788
789 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
790 add_scan_area((unsigned long)ptr, offset, length, gfp);
791 else if (atomic_read(&kmemleak_early_log))
792 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
793}
794EXPORT_SYMBOL(kmemleak_scan_area);
795
796/*
797 * Inform kmemleak not to scan the given memory block.
798 */
799void kmemleak_no_scan(const void *ptr)
800{
801 pr_debug("%s(0x%p)\n", __func__, ptr);
802
803 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
804 object_no_scan((unsigned long)ptr);
805 else if (atomic_read(&kmemleak_early_log))
806 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
807}
808EXPORT_SYMBOL(kmemleak_no_scan);
809
810/*
811 * Yield the CPU so that other tasks get a chance to run. The yielding is
812 * rate-limited to avoid excessive number of calls to the schedule() function
813 * during memory scanning.
814 */
815static void scan_yield(void)
816{
817 might_sleep();
818
819 if (time_is_before_eq_jiffies(next_scan_yield)) {
820 schedule();
821 next_scan_yield = jiffies + jiffies_scan_yield;
822 }
823}
824
825/*
826 * Memory scanning is a long process and it needs to be interruptable. This
827 * function checks whether such interrupt condition occured.
828 */
829static int scan_should_stop(void)
830{
831 if (!atomic_read(&kmemleak_enabled))
832 return 1;
833
834 /*
835 * This function may be called from either process or kthread context,
836 * hence the need to check for both stop conditions.
837 */
838 if (current->mm)
839 return signal_pending(current);
840 else
841 return kthread_should_stop();
842
843 return 0;
844}
845
846/*
847 * Scan a memory block (exclusive range) for valid pointers and add those
848 * found to the gray list.
849 */
850static void scan_block(void *_start, void *_end,
851 struct kmemleak_object *scanned)
852{
853 unsigned long *ptr;
854 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
855 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
856
857 for (ptr = start; ptr < end; ptr++) {
858 unsigned long flags;
859 unsigned long pointer = *ptr;
860 struct kmemleak_object *object;
861
862 if (scan_should_stop())
863 break;
864
865 /*
866 * When scanning a memory block with a corresponding
867 * kmemleak_object, the CPU yielding is handled in the calling
868 * code since it holds the object->lock to avoid the block
869 * freeing.
870 */
871 if (!scanned)
872 scan_yield();
873
874 object = find_and_get_object(pointer, 1);
875 if (!object)
876 continue;
877 if (object == scanned) {
878 /* self referenced, ignore */
879 put_object(object);
880 continue;
881 }
882
883 /*
884 * Avoid the lockdep recursive warning on object->lock being
885 * previously acquired in scan_object(). These locks are
886 * enclosed by scan_mutex.
887 */
888 spin_lock_irqsave_nested(&object->lock, flags,
889 SINGLE_DEPTH_NESTING);
890 if (!color_white(object)) {
891 /* non-orphan, ignored or new */
892 spin_unlock_irqrestore(&object->lock, flags);
893 put_object(object);
894 continue;
895 }
896
897 /*
898 * Increase the object's reference count (number of pointers
899 * to the memory block). If this count reaches the required
900 * minimum, the object's color will become gray and it will be
901 * added to the gray_list.
902 */
903 object->count++;
904 if (color_gray(object))
905 list_add_tail(&object->gray_list, &gray_list);
906 else
907 put_object(object);
908 spin_unlock_irqrestore(&object->lock, flags);
909 }
910}
911
912/*
913 * Scan a memory block corresponding to a kmemleak_object. A condition is
914 * that object->use_count >= 1.
915 */
916static void scan_object(struct kmemleak_object *object)
917{
918 struct kmemleak_scan_area *area;
919 struct hlist_node *elem;
920 unsigned long flags;
921
922 /*
923 * Once the object->lock is aquired, the corresponding memory block
924 * cannot be freed (the same lock is aquired in delete_object).
925 */
926 spin_lock_irqsave(&object->lock, flags);
927 if (object->flags & OBJECT_NO_SCAN)
928 goto out;
929 if (!(object->flags & OBJECT_ALLOCATED))
930 /* already freed object */
931 goto out;
932 if (hlist_empty(&object->area_list))
933 scan_block((void *)object->pointer,
934 (void *)(object->pointer + object->size), object);
935 else
936 hlist_for_each_entry(area, elem, &object->area_list, node)
937 scan_block((void *)(object->pointer + area->offset),
938 (void *)(object->pointer + area->offset
939 + area->length), object);
940out:
941 spin_unlock_irqrestore(&object->lock, flags);
942}
943
944/*
945 * Scan data sections and all the referenced memory blocks allocated via the
946 * kernel's standard allocators. This function must be called with the
947 * scan_mutex held.
948 */
949static void kmemleak_scan(void)
950{
951 unsigned long flags;
952 struct kmemleak_object *object, *tmp;
953 struct task_struct *task;
954 int i;
955
956 /* prepare the kmemleak_object's */
957 rcu_read_lock();
958 list_for_each_entry_rcu(object, &object_list, object_list) {
959 spin_lock_irqsave(&object->lock, flags);
960#ifdef DEBUG
961 /*
962 * With a few exceptions there should be a maximum of
963 * 1 reference to any object at this point.
964 */
965 if (atomic_read(&object->use_count) > 1) {
ae281064 966 pr_debug("object->use_count = %d\n",
3c7b4e6b
CM
967 atomic_read(&object->use_count));
968 dump_object_info(object);
969 }
970#endif
971 /* reset the reference count (whiten the object) */
972 object->count = 0;
973 if (color_gray(object) && get_object(object))
974 list_add_tail(&object->gray_list, &gray_list);
975
976 spin_unlock_irqrestore(&object->lock, flags);
977 }
978 rcu_read_unlock();
979
980 /* data/bss scanning */
981 scan_block(_sdata, _edata, NULL);
982 scan_block(__bss_start, __bss_stop, NULL);
983
984#ifdef CONFIG_SMP
985 /* per-cpu sections scanning */
986 for_each_possible_cpu(i)
987 scan_block(__per_cpu_start + per_cpu_offset(i),
988 __per_cpu_end + per_cpu_offset(i), NULL);
989#endif
990
991 /*
992 * Struct page scanning for each node. The code below is not yet safe
993 * with MEMORY_HOTPLUG.
994 */
995 for_each_online_node(i) {
996 pg_data_t *pgdat = NODE_DATA(i);
997 unsigned long start_pfn = pgdat->node_start_pfn;
998 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
999 unsigned long pfn;
1000
1001 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1002 struct page *page;
1003
1004 if (!pfn_valid(pfn))
1005 continue;
1006 page = pfn_to_page(pfn);
1007 /* only scan if page is in use */
1008 if (page_count(page) == 0)
1009 continue;
1010 scan_block(page, page + 1, NULL);
1011 }
1012 }
1013
1014 /*
1015 * Scanning the task stacks may introduce false negatives and it is
1016 * not enabled by default.
1017 */
1018 if (kmemleak_stack_scan) {
1019 read_lock(&tasklist_lock);
1020 for_each_process(task)
1021 scan_block(task_stack_page(task),
1022 task_stack_page(task) + THREAD_SIZE, NULL);
1023 read_unlock(&tasklist_lock);
1024 }
1025
1026 /*
1027 * Scan the objects already referenced from the sections scanned
1028 * above. More objects will be referenced and, if there are no memory
1029 * leaks, all the objects will be scanned. The list traversal is safe
1030 * for both tail additions and removals from inside the loop. The
1031 * kmemleak objects cannot be freed from outside the loop because their
1032 * use_count was increased.
1033 */
1034 object = list_entry(gray_list.next, typeof(*object), gray_list);
1035 while (&object->gray_list != &gray_list) {
1036 scan_yield();
1037
1038 /* may add new objects to the list */
1039 if (!scan_should_stop())
1040 scan_object(object);
1041
1042 tmp = list_entry(object->gray_list.next, typeof(*object),
1043 gray_list);
1044
1045 /* remove the object from the list and release it */
1046 list_del(&object->gray_list);
1047 put_object(object);
1048
1049 object = tmp;
1050 }
1051 WARN_ON(!list_empty(&gray_list));
1052}
1053
1054/*
1055 * Thread function performing automatic memory scanning. Unreferenced objects
1056 * at the end of a memory scan are reported but only the first time.
1057 */
1058static int kmemleak_scan_thread(void *arg)
1059{
1060 static int first_run = 1;
1061
ae281064 1062 pr_info("Automatic memory scanning thread started\n");
3c7b4e6b
CM
1063
1064 /*
1065 * Wait before the first scan to allow the system to fully initialize.
1066 */
1067 if (first_run) {
1068 first_run = 0;
1069 ssleep(SECS_FIRST_SCAN);
1070 }
1071
1072 while (!kthread_should_stop()) {
1073 struct kmemleak_object *object;
1074 signed long timeout = jiffies_scan_wait;
1075
1076 mutex_lock(&scan_mutex);
1077
1078 kmemleak_scan();
1079 reported_leaks = 0;
1080
1081 rcu_read_lock();
1082 list_for_each_entry_rcu(object, &object_list, object_list) {
1083 unsigned long flags;
1084
1085 if (reported_leaks >= REPORTS_NR)
1086 break;
1087 spin_lock_irqsave(&object->lock, flags);
1088 if (!(object->flags & OBJECT_REPORTED) &&
1089 unreferenced_object(object)) {
1090 print_unreferenced(NULL, object);
1091 object->flags |= OBJECT_REPORTED;
1092 reported_leaks++;
1093 } else if ((object->flags & OBJECT_REPORTED) &&
1094 referenced_object(object)) {
1095 print_referenced(object);
1096 object->flags &= ~OBJECT_REPORTED;
1097 }
1098 spin_unlock_irqrestore(&object->lock, flags);
1099 }
1100 rcu_read_unlock();
1101
1102 mutex_unlock(&scan_mutex);
1103 /* wait before the next scan */
1104 while (timeout && !kthread_should_stop())
1105 timeout = schedule_timeout_interruptible(timeout);
1106 }
1107
ae281064 1108 pr_info("Automatic memory scanning thread ended\n");
3c7b4e6b
CM
1109
1110 return 0;
1111}
1112
1113/*
1114 * Start the automatic memory scanning thread. This function must be called
1115 * with the kmemleak_mutex held.
1116 */
1117void start_scan_thread(void)
1118{
1119 if (scan_thread)
1120 return;
1121 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1122 if (IS_ERR(scan_thread)) {
ae281064 1123 pr_warning("Failed to create the scan thread\n");
3c7b4e6b
CM
1124 scan_thread = NULL;
1125 }
1126}
1127
1128/*
1129 * Stop the automatic memory scanning thread. This function must be called
1130 * with the kmemleak_mutex held.
1131 */
1132void stop_scan_thread(void)
1133{
1134 if (scan_thread) {
1135 kthread_stop(scan_thread);
1136 scan_thread = NULL;
1137 }
1138}
1139
1140/*
1141 * Iterate over the object_list and return the first valid object at or after
1142 * the required position with its use_count incremented. The function triggers
1143 * a memory scanning when the pos argument points to the first position.
1144 */
1145static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1146{
1147 struct kmemleak_object *object;
1148 loff_t n = *pos;
1149
1150 if (!n) {
1151 kmemleak_scan();
1152 reported_leaks = 0;
1153 }
1154 if (reported_leaks >= REPORTS_NR)
1155 return NULL;
1156
1157 rcu_read_lock();
1158 list_for_each_entry_rcu(object, &object_list, object_list) {
1159 if (n-- > 0)
1160 continue;
1161 if (get_object(object))
1162 goto out;
1163 }
1164 object = NULL;
1165out:
1166 rcu_read_unlock();
1167 return object;
1168}
1169
1170/*
1171 * Return the next object in the object_list. The function decrements the
1172 * use_count of the previous object and increases that of the next one.
1173 */
1174static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1175{
1176 struct kmemleak_object *prev_obj = v;
1177 struct kmemleak_object *next_obj = NULL;
1178 struct list_head *n = &prev_obj->object_list;
1179
1180 ++(*pos);
1181 if (reported_leaks >= REPORTS_NR)
1182 goto out;
1183
1184 rcu_read_lock();
1185 list_for_each_continue_rcu(n, &object_list) {
1186 next_obj = list_entry(n, struct kmemleak_object, object_list);
1187 if (get_object(next_obj))
1188 break;
1189 }
1190 rcu_read_unlock();
1191out:
1192 put_object(prev_obj);
1193 return next_obj;
1194}
1195
1196/*
1197 * Decrement the use_count of the last object required, if any.
1198 */
1199static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1200{
1201 if (v)
1202 put_object(v);
1203}
1204
1205/*
1206 * Print the information for an unreferenced object to the seq file.
1207 */
1208static int kmemleak_seq_show(struct seq_file *seq, void *v)
1209{
1210 struct kmemleak_object *object = v;
1211 unsigned long flags;
1212
1213 spin_lock_irqsave(&object->lock, flags);
1214 if (!unreferenced_object(object))
1215 goto out;
1216 print_unreferenced(seq, object);
1217 reported_leaks++;
1218out:
1219 spin_unlock_irqrestore(&object->lock, flags);
1220 return 0;
1221}
1222
1223static const struct seq_operations kmemleak_seq_ops = {
1224 .start = kmemleak_seq_start,
1225 .next = kmemleak_seq_next,
1226 .stop = kmemleak_seq_stop,
1227 .show = kmemleak_seq_show,
1228};
1229
1230static int kmemleak_open(struct inode *inode, struct file *file)
1231{
1232 int ret = 0;
1233
1234 if (!atomic_read(&kmemleak_enabled))
1235 return -EBUSY;
1236
1237 ret = mutex_lock_interruptible(&kmemleak_mutex);
1238 if (ret < 0)
1239 goto out;
1240 if (file->f_mode & FMODE_READ) {
1241 ret = mutex_lock_interruptible(&scan_mutex);
1242 if (ret < 0)
1243 goto kmemleak_unlock;
1244 ret = seq_open(file, &kmemleak_seq_ops);
1245 if (ret < 0)
1246 goto scan_unlock;
1247 }
1248 return ret;
1249
1250scan_unlock:
1251 mutex_unlock(&scan_mutex);
1252kmemleak_unlock:
1253 mutex_unlock(&kmemleak_mutex);
1254out:
1255 return ret;
1256}
1257
1258static int kmemleak_release(struct inode *inode, struct file *file)
1259{
1260 int ret = 0;
1261
1262 if (file->f_mode & FMODE_READ) {
1263 seq_release(inode, file);
1264 mutex_unlock(&scan_mutex);
1265 }
1266 mutex_unlock(&kmemleak_mutex);
1267
1268 return ret;
1269}
1270
1271/*
1272 * File write operation to configure kmemleak at run-time. The following
1273 * commands can be written to the /sys/kernel/debug/kmemleak file:
1274 * off - disable kmemleak (irreversible)
1275 * stack=on - enable the task stacks scanning
1276 * stack=off - disable the tasks stacks scanning
1277 * scan=on - start the automatic memory scanning thread
1278 * scan=off - stop the automatic memory scanning thread
1279 * scan=... - set the automatic memory scanning period in seconds (0 to
1280 * disable it)
1281 */
1282static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1283 size_t size, loff_t *ppos)
1284{
1285 char buf[64];
1286 int buf_size;
1287
1288 if (!atomic_read(&kmemleak_enabled))
1289 return -EBUSY;
1290
1291 buf_size = min(size, (sizeof(buf) - 1));
1292 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1293 return -EFAULT;
1294 buf[buf_size] = 0;
1295
1296 if (strncmp(buf, "off", 3) == 0)
1297 kmemleak_disable();
1298 else if (strncmp(buf, "stack=on", 8) == 0)
1299 kmemleak_stack_scan = 1;
1300 else if (strncmp(buf, "stack=off", 9) == 0)
1301 kmemleak_stack_scan = 0;
1302 else if (strncmp(buf, "scan=on", 7) == 0)
1303 start_scan_thread();
1304 else if (strncmp(buf, "scan=off", 8) == 0)
1305 stop_scan_thread();
1306 else if (strncmp(buf, "scan=", 5) == 0) {
1307 unsigned long secs;
1308 int err;
1309
1310 err = strict_strtoul(buf + 5, 0, &secs);
1311 if (err < 0)
1312 return err;
1313 stop_scan_thread();
1314 if (secs) {
1315 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1316 start_scan_thread();
1317 }
1318 } else
1319 return -EINVAL;
1320
1321 /* ignore the rest of the buffer, only one command at a time */
1322 *ppos += size;
1323 return size;
1324}
1325
1326static const struct file_operations kmemleak_fops = {
1327 .owner = THIS_MODULE,
1328 .open = kmemleak_open,
1329 .read = seq_read,
1330 .write = kmemleak_write,
1331 .llseek = seq_lseek,
1332 .release = kmemleak_release,
1333};
1334
1335/*
1336 * Perform the freeing of the kmemleak internal objects after waiting for any
1337 * current memory scan to complete.
1338 */
1339static int kmemleak_cleanup_thread(void *arg)
1340{
1341 struct kmemleak_object *object;
1342
1343 mutex_lock(&kmemleak_mutex);
1344 stop_scan_thread();
1345 mutex_unlock(&kmemleak_mutex);
1346
1347 mutex_lock(&scan_mutex);
1348 rcu_read_lock();
1349 list_for_each_entry_rcu(object, &object_list, object_list)
1350 delete_object(object->pointer);
1351 rcu_read_unlock();
1352 mutex_unlock(&scan_mutex);
1353
1354 return 0;
1355}
1356
1357/*
1358 * Start the clean-up thread.
1359 */
1360static void kmemleak_cleanup(void)
1361{
1362 struct task_struct *cleanup_thread;
1363
1364 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1365 "kmemleak-clean");
1366 if (IS_ERR(cleanup_thread))
ae281064 1367 pr_warning("Failed to create the clean-up thread\n");
3c7b4e6b
CM
1368}
1369
1370/*
1371 * Disable kmemleak. No memory allocation/freeing will be traced once this
1372 * function is called. Disabling kmemleak is an irreversible operation.
1373 */
1374static void kmemleak_disable(void)
1375{
1376 /* atomically check whether it was already invoked */
1377 if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1378 return;
1379
1380 /* stop any memory operation tracing */
1381 atomic_set(&kmemleak_early_log, 0);
1382 atomic_set(&kmemleak_enabled, 0);
1383
1384 /* check whether it is too early for a kernel thread */
1385 if (atomic_read(&kmemleak_initialized))
1386 kmemleak_cleanup();
1387
1388 pr_info("Kernel memory leak detector disabled\n");
1389}
1390
1391/*
1392 * Allow boot-time kmemleak disabling (enabled by default).
1393 */
1394static int kmemleak_boot_config(char *str)
1395{
1396 if (!str)
1397 return -EINVAL;
1398 if (strcmp(str, "off") == 0)
1399 kmemleak_disable();
1400 else if (strcmp(str, "on") != 0)
1401 return -EINVAL;
1402 return 0;
1403}
1404early_param("kmemleak", kmemleak_boot_config);
1405
1406/*
2030117d 1407 * Kmemleak initialization.
3c7b4e6b
CM
1408 */
1409void __init kmemleak_init(void)
1410{
1411 int i;
1412 unsigned long flags;
1413
1414 jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
1415 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1416 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1417
1418 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1419 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1420 INIT_PRIO_TREE_ROOT(&object_tree_root);
1421
1422 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1423 local_irq_save(flags);
1424 if (!atomic_read(&kmemleak_error)) {
1425 atomic_set(&kmemleak_enabled, 1);
1426 atomic_set(&kmemleak_early_log, 0);
1427 }
1428 local_irq_restore(flags);
1429
1430 /*
1431 * This is the point where tracking allocations is safe. Automatic
1432 * scanning is started during the late initcall. Add the early logged
1433 * callbacks to the kmemleak infrastructure.
1434 */
1435 for (i = 0; i < crt_early_log; i++) {
1436 struct early_log *log = &early_log[i];
1437
1438 switch (log->op_type) {
1439 case KMEMLEAK_ALLOC:
1440 kmemleak_alloc(log->ptr, log->size, log->min_count,
1441 GFP_KERNEL);
1442 break;
1443 case KMEMLEAK_FREE:
1444 kmemleak_free(log->ptr);
1445 break;
1446 case KMEMLEAK_NOT_LEAK:
1447 kmemleak_not_leak(log->ptr);
1448 break;
1449 case KMEMLEAK_IGNORE:
1450 kmemleak_ignore(log->ptr);
1451 break;
1452 case KMEMLEAK_SCAN_AREA:
1453 kmemleak_scan_area(log->ptr, log->offset, log->length,
1454 GFP_KERNEL);
1455 break;
1456 case KMEMLEAK_NO_SCAN:
1457 kmemleak_no_scan(log->ptr);
1458 break;
1459 default:
1460 WARN_ON(1);
1461 }
1462 }
1463}
1464
1465/*
1466 * Late initialization function.
1467 */
1468static int __init kmemleak_late_init(void)
1469{
1470 struct dentry *dentry;
1471
1472 atomic_set(&kmemleak_initialized, 1);
1473
1474 if (atomic_read(&kmemleak_error)) {
1475 /*
1476 * Some error occured and kmemleak was disabled. There is a
1477 * small chance that kmemleak_disable() was called immediately
1478 * after setting kmemleak_initialized and we may end up with
1479 * two clean-up threads but serialized by scan_mutex.
1480 */
1481 kmemleak_cleanup();
1482 return -ENOMEM;
1483 }
1484
1485 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1486 &kmemleak_fops);
1487 if (!dentry)
ae281064 1488 pr_warning("Failed to create the debugfs kmemleak file\n");
3c7b4e6b
CM
1489 mutex_lock(&kmemleak_mutex);
1490 start_scan_thread();
1491 mutex_unlock(&kmemleak_mutex);
1492
1493 pr_info("Kernel memory leak detector initialized\n");
1494
1495 return 0;
1496}
1497late_initcall(kmemleak_late_init);