]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - fs/notify/fanotify/fanotify.c
fanotify: permissions and blocking
[net-next-2.6.git] / fs / notify / fanotify / fanotify.c
... / ...
CommitLineData
1#include <linux/fanotify.h>
2#include <linux/fdtable.h>
3#include <linux/fsnotify_backend.h>
4#include <linux/init.h>
5#include <linux/jiffies.h>
6#include <linux/kernel.h> /* UINT_MAX */
7#include <linux/mount.h>
8#include <linux/sched.h>
9#include <linux/types.h>
10#include <linux/wait.h>
11
12static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
13{
14 pr_debug("%s: old=%p new=%p\n", __func__, old, new);
15
16 if (old->to_tell == new->to_tell &&
17 old->data_type == new->data_type &&
18 old->tgid == new->tgid) {
19 switch (old->data_type) {
20 case (FSNOTIFY_EVENT_PATH):
21 if ((old->path.mnt == new->path.mnt) &&
22 (old->path.dentry == new->path.dentry))
23 return true;
24 case (FSNOTIFY_EVENT_NONE):
25 return true;
26 default:
27 BUG();
28 };
29 }
30 return false;
31}
32
33/* Note, if we return an event in *arg that a reference is being held... */
34static int fanotify_merge(struct list_head *list,
35 struct fsnotify_event *event,
36 void **arg)
37{
38 struct fsnotify_event_holder *test_holder;
39 struct fsnotify_event *test_event;
40 struct fsnotify_event *new_event;
41 struct fsnotify_event **return_event = (struct fsnotify_event **)arg;
42 int ret = 0;
43
44 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
45
46 *return_event = NULL;
47
48 /* and the list better be locked by something too! */
49
50 list_for_each_entry_reverse(test_holder, list, event_list) {
51 test_event = test_holder->event;
52 if (should_merge(test_event, event)) {
53 fsnotify_get_event(test_event);
54 *return_event = test_event;
55
56 ret = -EEXIST;
57 /* if they are exactly the same we are done */
58 if (test_event->mask == event->mask)
59 goto out;
60
61 /*
62 * if the refcnt == 1 this is the only queue
63 * for this event and so we can update the mask
64 * in place.
65 */
66 if (atomic_read(&test_event->refcnt) == 1) {
67 test_event->mask |= event->mask;
68 goto out;
69 }
70
71 /* can't allocate memory, merge was no possible */
72 new_event = fsnotify_clone_event(test_event);
73 if (unlikely(!new_event)) {
74 ret = 0;
75 goto out;
76 }
77
78 /* we didn't return the test_event, so drop that ref */
79 fsnotify_put_event(test_event);
80 /* the reference we return on new_event is from clone */
81 *return_event = new_event;
82
83 /* build new event and replace it on the list */
84 new_event->mask = (test_event->mask | event->mask);
85 fsnotify_replace_event(test_holder, new_event);
86
87 break;
88 }
89 }
90out:
91 return ret;
92}
93
94#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
95static int fanotify_get_response_from_access(struct fsnotify_group *group,
96 struct fsnotify_event *event)
97{
98 int ret;
99
100 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
101
102 wait_event(group->fanotify_data.access_waitq, event->response);
103
104 /* userspace responded, convert to something usable */
105 spin_lock(&event->lock);
106 switch (event->response) {
107 case FAN_ALLOW:
108 ret = 0;
109 break;
110 case FAN_DENY:
111 default:
112 ret = -EPERM;
113 }
114 event->response = 0;
115 spin_unlock(&event->lock);
116
117 return ret;
118}
119#endif
120
121static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
122{
123 int ret;
124 struct fsnotify_event *notify_event = NULL;
125
126 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
127 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
128 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
129 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
130 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
131 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
132 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
133 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
134 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
135
136 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
137
138 ret = fsnotify_add_notify_event(group, event, NULL, fanotify_merge,
139 (void **)&notify_event);
140 /* -EEXIST means this event was merged with another, not that it was an error */
141 if (ret == -EEXIST)
142 ret = 0;
143 if (ret)
144 goto out;
145
146#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
147 if (event->mask & FAN_ALL_PERM_EVENTS) {
148 /* if we merged we need to wait on the new event */
149 if (notify_event)
150 event = notify_event;
151 ret = fanotify_get_response_from_access(group, event);
152 }
153#endif
154
155out:
156 if (notify_event)
157 fsnotify_put_event(notify_event);
158 return ret;
159}
160
161static bool should_send_vfsmount_event(struct fsnotify_group *group, struct vfsmount *mnt,
162 struct inode *inode, __u32 mask)
163{
164 struct fsnotify_mark *mnt_mark;
165 struct fsnotify_mark *inode_mark;
166
167 pr_debug("%s: group=%p vfsmount=%p mask=%x\n",
168 __func__, group, mnt, mask);
169
170 mnt_mark = fsnotify_find_vfsmount_mark(group, mnt);
171 if (!mnt_mark)
172 return false;
173
174 mask &= mnt_mark->mask;
175 mask &= ~mnt_mark->ignored_mask;
176
177 if (mask) {
178 inode_mark = fsnotify_find_inode_mark(group, inode);
179 if (inode_mark) {
180 mask &= ~inode_mark->ignored_mask;
181 fsnotify_put_mark(inode_mark);
182 }
183 }
184
185 /* find took a reference */
186 fsnotify_put_mark(mnt_mark);
187
188 return mask;
189}
190
191static bool should_send_inode_event(struct fsnotify_group *group, struct inode *inode,
192 __u32 mask)
193{
194 struct fsnotify_mark *fsn_mark;
195
196 pr_debug("%s: group=%p inode=%p mask=%x\n",
197 __func__, group, inode, mask);
198
199 fsn_mark = fsnotify_find_inode_mark(group, inode);
200 if (!fsn_mark)
201 return false;
202
203 /* if the event is for a child and this inode doesn't care about
204 * events on the child, don't send it! */
205 if ((mask & FS_EVENT_ON_CHILD) &&
206 !(fsn_mark->mask & FS_EVENT_ON_CHILD)) {
207 mask = 0;
208 } else {
209 /*
210 * We care about children, but do we care about this particular
211 * type of event?
212 */
213 mask &= ~FS_EVENT_ON_CHILD;
214 mask &= fsn_mark->mask;
215 mask &= ~fsn_mark->ignored_mask;
216 }
217
218 /* find took a reference */
219 fsnotify_put_mark(fsn_mark);
220
221 return mask;
222}
223
224static bool fanotify_should_send_event(struct fsnotify_group *group, struct inode *to_tell,
225 struct vfsmount *mnt, __u32 mask, void *data,
226 int data_type)
227{
228 pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x data=%p data_type=%d\n",
229 __func__, group, to_tell, mnt, mask, data, data_type);
230
231 /* sorry, fanotify only gives a damn about files and dirs */
232 if (!S_ISREG(to_tell->i_mode) &&
233 !S_ISDIR(to_tell->i_mode))
234 return false;
235
236 /* if we don't have enough info to send an event to userspace say no */
237 if (data_type != FSNOTIFY_EVENT_PATH)
238 return false;
239
240 if (mnt)
241 return should_send_vfsmount_event(group, mnt, to_tell, mask);
242 else
243 return should_send_inode_event(group, to_tell, mask);
244}
245
246const struct fsnotify_ops fanotify_fsnotify_ops = {
247 .handle_event = fanotify_handle_event,
248 .should_send_event = fanotify_should_send_event,
249 .free_group_priv = NULL,
250 .free_event_priv = NULL,
251 .freeing_mark = NULL,
252};