Ensure kmap_atomic() usage is strictly nested
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
memcpy(dest_buf, src_buf, len);
memcpy(dest_buf, src_buf, len);
- kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
kunmap_atomic(src_buf, KM_USER1);
+ kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit);
}
async_tx_sync_epilog(submit);
}
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
+ blkcipher_unmap_src(walk);
}
scatterwalk_advance(&walk->in, n);
}
scatterwalk_advance(&walk->in, n);
else
memcpy(raw_buf, loop_buf, size);
else
memcpy(raw_buf, loop_buf, size);
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
cond_resched();
return 0;
}
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
cond_resched();
return 0;
}
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr, to);
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vfrom, KM_USER0);
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vfrom, KM_USER0);
}
#endif /* _LINUX_HIGHMEM_H */
}
#endif /* _LINUX_HIGHMEM_H */
src = kmap_atomic(s_page, KM_USER0);
dst = kmap_atomic(d_page, KM_USER1);
do_copy_page(dst, src);
src = kmap_atomic(s_page, KM_USER0);
dst = kmap_atomic(d_page, KM_USER1);
do_copy_page(dst, src);
- kunmap_atomic(src, KM_USER0);
kunmap_atomic(dst, KM_USER1);
kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(src, KM_USER0);
} else {
if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel
} else {
if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel
memcpy(buf, kaddr1, PAGE_SIZE);
memcpy(kaddr1, kaddr2, PAGE_SIZE);
memcpy(kaddr2, buf, PAGE_SIZE);
memcpy(buf, kaddr1, PAGE_SIZE);
memcpy(kaddr1, kaddr2, PAGE_SIZE);
memcpy(kaddr2, buf, PAGE_SIZE);
- kunmap_atomic(kaddr1, KM_USER0);
kunmap_atomic(kaddr2, KM_USER1);
kunmap_atomic(kaddr2, KM_USER1);
+ kunmap_atomic(kaddr1, KM_USER0);