]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/filemap.h
[SCTP]: Include sk_buff overhead while updating the peer's receive window.
[net-next-2.6.git] / mm / filemap.h
CommitLineData
ceffc078
CO
1/*
2 * linux/mm/filemap.h
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7#ifndef __FILEMAP_H
8#define __FILEMAP_H
9
10#include <linux/types.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/highmem.h>
14#include <linux/uio.h>
15#include <linux/config.h>
c22ce143 16#include <linux/uaccess.h>
ceffc078 17
eb6fe0c3 18size_t
01408c49
N
19__filemap_copy_from_user_iovec_inatomic(char *vaddr,
20 const struct iovec *iov,
21 size_t base,
22 size_t bytes);
ceffc078
CO
23
24/*
25 * Copy as much as we can into the page and return the number of bytes which
26 * were sucessfully copied. If a fault is encountered then clear the page
27 * out to (offset+bytes) and return the number of bytes which were copied.
01408c49
N
28 *
29 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
30 * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
31 * and if the following non-atomic copy succeeds, then there is a small window
32 * where the target page contains neither the data before the write, nor the
33 * data after the write (it contains zero). A read at this time will see
34 * data that is inconsistent with any ordering of the read and the write.
35 * (This has been detected in practice).
ceffc078
CO
36 */
37static inline size_t
38filemap_copy_from_user(struct page *page, unsigned long offset,
39 const char __user *buf, unsigned bytes)
40{
41 char *kaddr;
42 int left;
43
44 kaddr = kmap_atomic(page, KM_USER0);
c22ce143 45 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
ceffc078
CO
46 kunmap_atomic(kaddr, KM_USER0);
47
48 if (left != 0) {
49 /* Do it the slow way */
50 kaddr = kmap(page);
c22ce143 51 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
ceffc078
CO
52 kunmap(page);
53 }
54 return bytes - left;
55}
56
57/*
58 * This has the same sideeffects and return value as filemap_copy_from_user().
59 * The difference is that on a fault we need to memset the remainder of the
60 * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
61 * single-segment behaviour.
62 */
63static inline size_t
64filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
65 const struct iovec *iov, size_t base, size_t bytes)
66{
67 char *kaddr;
68 size_t copied;
69
70 kaddr = kmap_atomic(page, KM_USER0);
01408c49
N
71 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
72 base, bytes);
ceffc078
CO
73 kunmap_atomic(kaddr, KM_USER0);
74 if (copied != bytes) {
75 kaddr = kmap(page);
01408c49
N
76 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
77 base, bytes);
78 if (bytes - copied)
79 memset(kaddr + offset + copied, 0, bytes - copied);
ceffc078
CO
80 kunmap(page);
81 }
82 return copied;
83}
84
85static inline void
86filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
87{
88 const struct iovec *iov = *iovp;
89 size_t base = *basep;
90
81b0c871 91 do {
ceffc078
CO
92 int copy = min(bytes, iov->iov_len - base);
93
94 bytes -= copy;
95 base += copy;
96 if (iov->iov_len == base) {
97 iov++;
98 base = 0;
99 }
81b0c871 100 } while (bytes);
ceffc078
CO
101 *iovp = iov;
102 *basep = base;
103}
104#endif