The locking code in the address dumper needs to grab the mm's mmap_sem
so that other CPUs do not get an inconsistent view. On UP systems this
really wasn't a problem, but it is easy to trigger a race on SMP systems
when another CPU removes a mapping.
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!in_atomic)
+ mmput(mm);
+ continue;
+ }
+
for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
struct vm_area_struct *vma;
for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
struct vm_area_struct *vma;
sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
name, vma->vm_start, vma->vm_end);
sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
name, vma->vm_start, vma->vm_end);
+ up_read(&mm->mmap_sem);
if (!in_atomic)
mmput(mm);
if (!in_atomic)
mmput(mm);
+
+ up_read(&mm->mmap_sem);
if (!in_atomic)
mmput(mm);
}
if (!in_atomic)
mmput(mm);
}
- /* we were unable to find this address anywhere */
+ /*
+ * we were unable to find this address anywhere,
+ * or some MMs were skipped because they were in use.
+ */
sprintf(buf, "/* kernel dynamic memory */");
done:
sprintf(buf, "/* kernel dynamic memory */");
done: