]>
Commit | Line | Data |
---|---|---|
33d3dfff | 1 | #include <linux/fanotify.h> |
11637e4b | 2 | #include <linux/fcntl.h> |
2a3edf86 | 3 | #include <linux/file.h> |
11637e4b | 4 | #include <linux/fs.h> |
52c923dd | 5 | #include <linux/anon_inodes.h> |
11637e4b | 6 | #include <linux/fsnotify_backend.h> |
2a3edf86 | 7 | #include <linux/init.h> |
a1014f10 | 8 | #include <linux/mount.h> |
2a3edf86 | 9 | #include <linux/namei.h> |
a1014f10 | 10 | #include <linux/poll.h> |
11637e4b EP |
11 | #include <linux/security.h> |
12 | #include <linux/syscalls.h> | |
e4e047a2 | 13 | #include <linux/slab.h> |
2a3edf86 | 14 | #include <linux/types.h> |
a1014f10 EP |
15 | #include <linux/uaccess.h> |
16 | ||
17 | #include <asm/ioctls.h> | |
11637e4b | 18 | |
2529a0df | 19 | #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 |
e7099d8a | 20 | #define FANOTIFY_DEFAULT_MAX_MARKS 8192 |
2529a0df | 21 | |
33d3dfff | 22 | extern const struct fsnotify_ops fanotify_fsnotify_ops; |
11637e4b | 23 | |
2a3edf86 | 24 | static struct kmem_cache *fanotify_mark_cache __read_mostly; |
b2d87909 EP |
25 | static struct kmem_cache *fanotify_response_event_cache __read_mostly; |
26 | ||
27 | struct fanotify_response_event { | |
28 | struct list_head list; | |
29 | __s32 fd; | |
30 | struct fsnotify_event *event; | |
31 | }; | |
2a3edf86 | 32 | |
a1014f10 EP |
33 | /* |
34 | * Get an fsnotify notification event if one exists and is small | |
35 | * enough to fit in "count". Return an error pointer if the count | |
36 | * is not large enough. | |
37 | * | |
38 | * Called with the group->notification_mutex held. | |
39 | */ | |
40 | static struct fsnotify_event *get_one_event(struct fsnotify_group *group, | |
41 | size_t count) | |
42 | { | |
43 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); | |
44 | ||
45 | pr_debug("%s: group=%p count=%zd\n", __func__, group, count); | |
46 | ||
47 | if (fsnotify_notify_queue_is_empty(group)) | |
48 | return NULL; | |
49 | ||
50 | if (FAN_EVENT_METADATA_LEN > count) | |
51 | return ERR_PTR(-EINVAL); | |
52 | ||
53 | /* held the notification_mutex the whole time, so this is the | |
54 | * same event we peeked above */ | |
55 | return fsnotify_remove_notify_event(group); | |
56 | } | |
57 | ||
22aa425d | 58 | static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) |
a1014f10 EP |
59 | { |
60 | int client_fd; | |
61 | struct dentry *dentry; | |
62 | struct vfsmount *mnt; | |
63 | struct file *new_file; | |
64 | ||
22aa425d | 65 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
a1014f10 EP |
66 | |
67 | client_fd = get_unused_fd(); | |
68 | if (client_fd < 0) | |
69 | return client_fd; | |
70 | ||
2069601b | 71 | if (event->data_type != FSNOTIFY_EVENT_PATH) { |
a1014f10 EP |
72 | WARN_ON(1); |
73 | put_unused_fd(client_fd); | |
74 | return -EINVAL; | |
75 | } | |
76 | ||
77 | /* | |
78 | * we need a new file handle for the userspace program so it can read even if it was | |
79 | * originally opened O_WRONLY. | |
80 | */ | |
2069601b LT |
81 | dentry = dget(event->path.dentry); |
82 | mnt = mntget(event->path.mnt); | |
a1014f10 EP |
83 | /* it's possible this event was an overflow event. in that case dentry and mnt |
84 | * are NULL; That's fine, just don't call dentry open */ | |
85 | if (dentry && mnt) | |
86 | new_file = dentry_open(dentry, mnt, | |
80af2588 | 87 | group->fanotify_data.f_flags | FMODE_NONOTIFY, |
a1014f10 EP |
88 | current_cred()); |
89 | else | |
90 | new_file = ERR_PTR(-EOVERFLOW); | |
91 | if (IS_ERR(new_file)) { | |
92 | /* | |
93 | * we still send an event even if we can't open the file. this | |
94 | * can happen when say tasks are gone and we try to open their | |
95 | * /proc files or we try to open a WRONLY file like in sysfs | |
96 | * we just send the errno to userspace since there isn't much | |
97 | * else we can do. | |
98 | */ | |
99 | put_unused_fd(client_fd); | |
100 | client_fd = PTR_ERR(new_file); | |
101 | } else { | |
102 | fd_install(client_fd, new_file); | |
103 | } | |
104 | ||
22aa425d | 105 | return client_fd; |
a1014f10 EP |
106 | } |
107 | ||
108 | static ssize_t fill_event_metadata(struct fsnotify_group *group, | |
109 | struct fanotify_event_metadata *metadata, | |
110 | struct fsnotify_event *event) | |
111 | { | |
112 | pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, | |
113 | group, metadata, event); | |
114 | ||
115 | metadata->event_len = FAN_EVENT_METADATA_LEN; | |
116 | metadata->vers = FANOTIFY_METADATA_VERSION; | |
33d3dfff | 117 | metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; |
32c32632 | 118 | metadata->pid = pid_vnr(event->tgid); |
22aa425d | 119 | metadata->fd = create_fd(group, event); |
a1014f10 | 120 | |
22aa425d | 121 | return metadata->fd; |
a1014f10 EP |
122 | } |
123 | ||
b2d87909 EP |
124 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
125 | static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, | |
126 | __s32 fd) | |
127 | { | |
128 | struct fanotify_response_event *re, *return_re = NULL; | |
129 | ||
130 | mutex_lock(&group->fanotify_data.access_mutex); | |
131 | list_for_each_entry(re, &group->fanotify_data.access_list, list) { | |
132 | if (re->fd != fd) | |
133 | continue; | |
134 | ||
135 | list_del_init(&re->list); | |
136 | return_re = re; | |
137 | break; | |
138 | } | |
139 | mutex_unlock(&group->fanotify_data.access_mutex); | |
140 | ||
141 | pr_debug("%s: found return_re=%p\n", __func__, return_re); | |
142 | ||
143 | return return_re; | |
144 | } | |
145 | ||
146 | static int process_access_response(struct fsnotify_group *group, | |
147 | struct fanotify_response *response_struct) | |
148 | { | |
149 | struct fanotify_response_event *re; | |
150 | __s32 fd = response_struct->fd; | |
151 | __u32 response = response_struct->response; | |
152 | ||
153 | pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, | |
154 | fd, response); | |
155 | /* | |
156 | * make sure the response is valid, if invalid we do nothing and either | |
157 | * userspace can send a valid responce or we will clean it up after the | |
158 | * timeout | |
159 | */ | |
160 | switch (response) { | |
161 | case FAN_ALLOW: | |
162 | case FAN_DENY: | |
163 | break; | |
164 | default: | |
165 | return -EINVAL; | |
166 | } | |
167 | ||
168 | if (fd < 0) | |
169 | return -EINVAL; | |
170 | ||
171 | re = dequeue_re(group, fd); | |
172 | if (!re) | |
173 | return -ENOENT; | |
174 | ||
175 | re->event->response = response; | |
176 | ||
177 | wake_up(&group->fanotify_data.access_waitq); | |
178 | ||
179 | kmem_cache_free(fanotify_response_event_cache, re); | |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
184 | static int prepare_for_access_response(struct fsnotify_group *group, | |
185 | struct fsnotify_event *event, | |
186 | __s32 fd) | |
187 | { | |
188 | struct fanotify_response_event *re; | |
189 | ||
190 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
191 | return 0; | |
192 | ||
193 | re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); | |
194 | if (!re) | |
195 | return -ENOMEM; | |
196 | ||
197 | re->event = event; | |
198 | re->fd = fd; | |
199 | ||
200 | mutex_lock(&group->fanotify_data.access_mutex); | |
2eebf582 EP |
201 | |
202 | if (group->fanotify_data.bypass_perm) { | |
203 | mutex_unlock(&group->fanotify_data.access_mutex); | |
204 | kmem_cache_free(fanotify_response_event_cache, re); | |
205 | event->response = FAN_ALLOW; | |
206 | return 0; | |
207 | } | |
208 | ||
b2d87909 EP |
209 | list_add_tail(&re->list, &group->fanotify_data.access_list); |
210 | mutex_unlock(&group->fanotify_data.access_mutex); | |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | static void remove_access_response(struct fsnotify_group *group, | |
216 | struct fsnotify_event *event, | |
217 | __s32 fd) | |
218 | { | |
219 | struct fanotify_response_event *re; | |
220 | ||
221 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
222 | return; | |
223 | ||
224 | re = dequeue_re(group, fd); | |
225 | if (!re) | |
226 | return; | |
227 | ||
228 | BUG_ON(re->event != event); | |
229 | ||
230 | kmem_cache_free(fanotify_response_event_cache, re); | |
231 | ||
232 | return; | |
233 | } | |
234 | #else | |
235 | static int prepare_for_access_response(struct fsnotify_group *group, | |
236 | struct fsnotify_event *event, | |
237 | __s32 fd) | |
238 | { | |
239 | return 0; | |
240 | } | |
241 | ||
242 | static void remove_access_response(struct fsnotify_group *group, | |
243 | struct fsnotify_event *event, | |
244 | __s32 fd) | |
245 | { | |
8860f060 | 246 | return; |
b2d87909 EP |
247 | } |
248 | #endif | |
249 | ||
a1014f10 EP |
250 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
251 | struct fsnotify_event *event, | |
252 | char __user *buf) | |
253 | { | |
254 | struct fanotify_event_metadata fanotify_event_metadata; | |
b2d87909 | 255 | int fd, ret; |
a1014f10 EP |
256 | |
257 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); | |
258 | ||
b2d87909 EP |
259 | fd = fill_event_metadata(group, &fanotify_event_metadata, event); |
260 | if (fd < 0) | |
261 | return fd; | |
262 | ||
263 | ret = prepare_for_access_response(group, event, fd); | |
264 | if (ret) | |
265 | goto out_close_fd; | |
a1014f10 | 266 | |
b2d87909 | 267 | ret = -EFAULT; |
a1014f10 | 268 | if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) |
b2d87909 | 269 | goto out_kill_access_response; |
a1014f10 EP |
270 | |
271 | return FAN_EVENT_METADATA_LEN; | |
b2d87909 EP |
272 | |
273 | out_kill_access_response: | |
274 | remove_access_response(group, event, fd); | |
275 | out_close_fd: | |
276 | sys_close(fd); | |
277 | return ret; | |
a1014f10 EP |
278 | } |
279 | ||
280 | /* intofiy userspace file descriptor functions */ | |
281 | static unsigned int fanotify_poll(struct file *file, poll_table *wait) | |
282 | { | |
283 | struct fsnotify_group *group = file->private_data; | |
284 | int ret = 0; | |
285 | ||
286 | poll_wait(file, &group->notification_waitq, wait); | |
287 | mutex_lock(&group->notification_mutex); | |
288 | if (!fsnotify_notify_queue_is_empty(group)) | |
289 | ret = POLLIN | POLLRDNORM; | |
290 | mutex_unlock(&group->notification_mutex); | |
291 | ||
292 | return ret; | |
293 | } | |
294 | ||
295 | static ssize_t fanotify_read(struct file *file, char __user *buf, | |
296 | size_t count, loff_t *pos) | |
297 | { | |
298 | struct fsnotify_group *group; | |
299 | struct fsnotify_event *kevent; | |
300 | char __user *start; | |
301 | int ret; | |
302 | DEFINE_WAIT(wait); | |
303 | ||
304 | start = buf; | |
305 | group = file->private_data; | |
306 | ||
307 | pr_debug("%s: group=%p\n", __func__, group); | |
308 | ||
309 | while (1) { | |
310 | prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); | |
311 | ||
312 | mutex_lock(&group->notification_mutex); | |
313 | kevent = get_one_event(group, count); | |
314 | mutex_unlock(&group->notification_mutex); | |
315 | ||
316 | if (kevent) { | |
317 | ret = PTR_ERR(kevent); | |
318 | if (IS_ERR(kevent)) | |
319 | break; | |
320 | ret = copy_event_to_user(group, kevent, buf); | |
321 | fsnotify_put_event(kevent); | |
322 | if (ret < 0) | |
323 | break; | |
324 | buf += ret; | |
325 | count -= ret; | |
326 | continue; | |
327 | } | |
328 | ||
329 | ret = -EAGAIN; | |
330 | if (file->f_flags & O_NONBLOCK) | |
331 | break; | |
332 | ret = -EINTR; | |
333 | if (signal_pending(current)) | |
334 | break; | |
335 | ||
336 | if (start != buf) | |
337 | break; | |
338 | ||
339 | schedule(); | |
340 | } | |
341 | ||
342 | finish_wait(&group->notification_waitq, &wait); | |
343 | if (start != buf && ret != -EFAULT) | |
344 | ret = buf - start; | |
345 | return ret; | |
346 | } | |
347 | ||
b2d87909 EP |
348 | static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) |
349 | { | |
350 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | |
351 | struct fanotify_response response = { .fd = -1, .response = -1 }; | |
352 | struct fsnotify_group *group; | |
353 | int ret; | |
354 | ||
355 | group = file->private_data; | |
356 | ||
357 | if (count > sizeof(response)) | |
358 | count = sizeof(response); | |
359 | ||
360 | pr_debug("%s: group=%p count=%zu\n", __func__, group, count); | |
361 | ||
362 | if (copy_from_user(&response, buf, count)) | |
363 | return -EFAULT; | |
364 | ||
365 | ret = process_access_response(group, &response); | |
366 | if (ret < 0) | |
367 | count = ret; | |
368 | ||
369 | return count; | |
370 | #else | |
371 | return -EINVAL; | |
372 | #endif | |
373 | } | |
374 | ||
52c923dd EP |
375 | static int fanotify_release(struct inode *ignored, struct file *file) |
376 | { | |
377 | struct fsnotify_group *group = file->private_data; | |
2eebf582 | 378 | struct fanotify_response_event *re, *lre; |
52c923dd EP |
379 | |
380 | pr_debug("%s: file=%p group=%p\n", __func__, file, group); | |
381 | ||
2eebf582 EP |
382 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
383 | mutex_lock(&group->fanotify_data.access_mutex); | |
384 | ||
385 | group->fanotify_data.bypass_perm = true; | |
386 | ||
387 | list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { | |
388 | pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, | |
389 | re, re->event); | |
390 | ||
391 | list_del_init(&re->list); | |
392 | re->event->response = FAN_ALLOW; | |
393 | ||
394 | kmem_cache_free(fanotify_response_event_cache, re); | |
395 | } | |
396 | mutex_unlock(&group->fanotify_data.access_mutex); | |
397 | ||
398 | wake_up(&group->fanotify_data.access_waitq); | |
399 | #endif | |
52c923dd EP |
400 | /* matches the fanotify_init->fsnotify_alloc_group */ |
401 | fsnotify_put_group(group); | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
a1014f10 EP |
406 | static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
407 | { | |
408 | struct fsnotify_group *group; | |
409 | struct fsnotify_event_holder *holder; | |
410 | void __user *p; | |
411 | int ret = -ENOTTY; | |
412 | size_t send_len = 0; | |
413 | ||
414 | group = file->private_data; | |
415 | ||
416 | p = (void __user *) arg; | |
417 | ||
418 | switch (cmd) { | |
419 | case FIONREAD: | |
420 | mutex_lock(&group->notification_mutex); | |
421 | list_for_each_entry(holder, &group->notification_list, event_list) | |
422 | send_len += FAN_EVENT_METADATA_LEN; | |
423 | mutex_unlock(&group->notification_mutex); | |
424 | ret = put_user(send_len, (int __user *) p); | |
425 | break; | |
426 | } | |
427 | ||
428 | return ret; | |
429 | } | |
430 | ||
52c923dd | 431 | static const struct file_operations fanotify_fops = { |
a1014f10 EP |
432 | .poll = fanotify_poll, |
433 | .read = fanotify_read, | |
b2d87909 | 434 | .write = fanotify_write, |
52c923dd EP |
435 | .fasync = NULL, |
436 | .release = fanotify_release, | |
a1014f10 EP |
437 | .unlocked_ioctl = fanotify_ioctl, |
438 | .compat_ioctl = fanotify_ioctl, | |
6038f373 | 439 | .llseek = noop_llseek, |
52c923dd EP |
440 | }; |
441 | ||
2a3edf86 EP |
442 | static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) |
443 | { | |
444 | kmem_cache_free(fanotify_mark_cache, fsn_mark); | |
445 | } | |
446 | ||
447 | static int fanotify_find_path(int dfd, const char __user *filename, | |
448 | struct path *path, unsigned int flags) | |
449 | { | |
450 | int ret; | |
451 | ||
452 | pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, | |
453 | dfd, filename, flags); | |
454 | ||
455 | if (filename == NULL) { | |
456 | struct file *file; | |
457 | int fput_needed; | |
458 | ||
459 | ret = -EBADF; | |
460 | file = fget_light(dfd, &fput_needed); | |
461 | if (!file) | |
462 | goto out; | |
463 | ||
464 | ret = -ENOTDIR; | |
465 | if ((flags & FAN_MARK_ONLYDIR) && | |
466 | !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { | |
467 | fput_light(file, fput_needed); | |
468 | goto out; | |
469 | } | |
470 | ||
471 | *path = file->f_path; | |
472 | path_get(path); | |
473 | fput_light(file, fput_needed); | |
474 | } else { | |
475 | unsigned int lookup_flags = 0; | |
476 | ||
477 | if (!(flags & FAN_MARK_DONT_FOLLOW)) | |
478 | lookup_flags |= LOOKUP_FOLLOW; | |
479 | if (flags & FAN_MARK_ONLYDIR) | |
480 | lookup_flags |= LOOKUP_DIRECTORY; | |
481 | ||
482 | ret = user_path_at(dfd, filename, lookup_flags, path); | |
483 | if (ret) | |
484 | goto out; | |
485 | } | |
486 | ||
487 | /* you can only watch an inode if you have read permissions on it */ | |
488 | ret = inode_permission(path->dentry->d_inode, MAY_READ); | |
489 | if (ret) | |
490 | path_put(path); | |
491 | out: | |
492 | return ret; | |
493 | } | |
494 | ||
b9e4e3bd EP |
495 | static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, |
496 | __u32 mask, | |
497 | unsigned int flags) | |
088b09b0 AG |
498 | { |
499 | __u32 oldmask; | |
500 | ||
501 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
502 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
503 | oldmask = fsn_mark->mask; | |
504 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); | |
505 | } else { | |
506 | oldmask = fsn_mark->ignored_mask; | |
507 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); | |
508 | } | |
088b09b0 AG |
509 | spin_unlock(&fsn_mark->lock); |
510 | ||
511 | if (!(oldmask & ~mask)) | |
512 | fsnotify_destroy_mark(fsn_mark); | |
513 | ||
514 | return mask & oldmask; | |
515 | } | |
516 | ||
f3640192 | 517 | static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
518 | struct vfsmount *mnt, __u32 mask, |
519 | unsigned int flags) | |
88826276 EP |
520 | { |
521 | struct fsnotify_mark *fsn_mark = NULL; | |
088b09b0 | 522 | __u32 removed; |
88826276 | 523 | |
f3640192 AG |
524 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
525 | if (!fsn_mark) | |
526 | return -ENOENT; | |
88826276 | 527 | |
b9e4e3bd | 528 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
f3640192 | 529 | fsnotify_put_mark(fsn_mark); |
f3640192 AG |
530 | if (removed & mnt->mnt_fsnotify_mask) |
531 | fsnotify_recalc_vfsmount_mask(mnt); | |
532 | ||
533 | return 0; | |
534 | } | |
2a3edf86 | 535 | |
f3640192 | 536 | static int fanotify_remove_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
537 | struct inode *inode, __u32 mask, |
538 | unsigned int flags) | |
f3640192 AG |
539 | { |
540 | struct fsnotify_mark *fsn_mark = NULL; | |
541 | __u32 removed; | |
542 | ||
543 | fsn_mark = fsnotify_find_inode_mark(group, inode); | |
88826276 EP |
544 | if (!fsn_mark) |
545 | return -ENOENT; | |
546 | ||
b9e4e3bd | 547 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
5444e298 | 548 | /* matches the fsnotify_find_inode_mark() */ |
2a3edf86 | 549 | fsnotify_put_mark(fsn_mark); |
f3640192 AG |
550 | if (removed & inode->i_fsnotify_mask) |
551 | fsnotify_recalc_inode_mask(inode); | |
088b09b0 | 552 | |
2a3edf86 EP |
553 | return 0; |
554 | } | |
555 | ||
b9e4e3bd EP |
556 | static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, |
557 | __u32 mask, | |
558 | unsigned int flags) | |
912ee394 AG |
559 | { |
560 | __u32 oldmask; | |
561 | ||
562 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
563 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
564 | oldmask = fsn_mark->mask; | |
565 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); | |
566 | } else { | |
567 | oldmask = fsn_mark->ignored_mask; | |
568 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask)); | |
c9778a98 EP |
569 | if (flags & FAN_MARK_IGNORED_SURV_MODIFY) |
570 | fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; | |
b9e4e3bd | 571 | } |
912ee394 AG |
572 | spin_unlock(&fsn_mark->lock); |
573 | ||
574 | return mask & ~oldmask; | |
575 | } | |
576 | ||
52202dfb | 577 | static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
578 | struct vfsmount *mnt, __u32 mask, |
579 | unsigned int flags) | |
2a3edf86 EP |
580 | { |
581 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 582 | __u32 added; |
2a3edf86 | 583 | |
88826276 EP |
584 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
585 | if (!fsn_mark) { | |
88826276 EP |
586 | int ret; |
587 | ||
e7099d8a EP |
588 | if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) |
589 | return -ENOSPC; | |
590 | ||
912ee394 AG |
591 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
592 | if (!fsn_mark) | |
52202dfb | 593 | return -ENOMEM; |
88826276 | 594 | |
912ee394 AG |
595 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
596 | ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); | |
88826276 | 597 | if (ret) { |
912ee394 | 598 | fanotify_free_mark(fsn_mark); |
52202dfb | 599 | return ret; |
88826276 | 600 | } |
88826276 | 601 | } |
b9e4e3bd | 602 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 603 | fsnotify_put_mark(fsn_mark); |
43709a28 EP |
604 | if (added & ~mnt->mnt_fsnotify_mask) |
605 | fsnotify_recalc_vfsmount_mask(mnt); | |
606 | ||
52202dfb | 607 | return 0; |
88826276 EP |
608 | } |
609 | ||
52202dfb | 610 | static int fanotify_add_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
611 | struct inode *inode, __u32 mask, |
612 | unsigned int flags) | |
88826276 EP |
613 | { |
614 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 615 | __u32 added; |
88826276 EP |
616 | |
617 | pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); | |
2a3edf86 | 618 | |
5322a59f EP |
619 | /* |
620 | * If some other task has this inode open for write we should not add | |
621 | * an ignored mark, unless that ignored mark is supposed to survive | |
622 | * modification changes anyway. | |
623 | */ | |
624 | if ((flags & FAN_MARK_IGNORED_MASK) && | |
625 | !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && | |
626 | (atomic_read(&inode->i_writecount) > 0)) | |
627 | return 0; | |
628 | ||
5444e298 | 629 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
2a3edf86 | 630 | if (!fsn_mark) { |
88826276 | 631 | int ret; |
2a3edf86 | 632 | |
e7099d8a EP |
633 | if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) |
634 | return -ENOSPC; | |
635 | ||
912ee394 AG |
636 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
637 | if (!fsn_mark) | |
52202dfb | 638 | return -ENOMEM; |
2a3edf86 | 639 | |
912ee394 AG |
640 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
641 | ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); | |
2a3edf86 | 642 | if (ret) { |
912ee394 | 643 | fanotify_free_mark(fsn_mark); |
52202dfb | 644 | return ret; |
2a3edf86 | 645 | } |
2a3edf86 | 646 | } |
b9e4e3bd | 647 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 648 | fsnotify_put_mark(fsn_mark); |
43709a28 EP |
649 | if (added & ~inode->i_fsnotify_mask) |
650 | fsnotify_recalc_inode_mask(inode); | |
52202dfb | 651 | return 0; |
88826276 | 652 | } |
2a3edf86 | 653 | |
52c923dd | 654 | /* fanotify syscalls */ |
08ae8938 | 655 | SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) |
11637e4b | 656 | { |
52c923dd EP |
657 | struct fsnotify_group *group; |
658 | int f_flags, fd; | |
659 | ||
08ae8938 EP |
660 | pr_debug("%s: flags=%d event_f_flags=%d\n", |
661 | __func__, flags, event_f_flags); | |
52c923dd | 662 | |
52c923dd | 663 | if (!capable(CAP_SYS_ADMIN)) |
a2f13ad0 | 664 | return -EPERM; |
52c923dd EP |
665 | |
666 | if (flags & ~FAN_ALL_INIT_FLAGS) | |
667 | return -EINVAL; | |
668 | ||
b2d87909 | 669 | f_flags = O_RDWR | FMODE_NONOTIFY; |
52c923dd EP |
670 | if (flags & FAN_CLOEXEC) |
671 | f_flags |= O_CLOEXEC; | |
672 | if (flags & FAN_NONBLOCK) | |
673 | f_flags |= O_NONBLOCK; | |
674 | ||
675 | /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ | |
676 | group = fsnotify_alloc_group(&fanotify_fsnotify_ops); | |
677 | if (IS_ERR(group)) | |
678 | return PTR_ERR(group); | |
679 | ||
80af2588 | 680 | group->fanotify_data.f_flags = event_f_flags; |
9e66e423 EP |
681 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
682 | mutex_init(&group->fanotify_data.access_mutex); | |
683 | init_waitqueue_head(&group->fanotify_data.access_waitq); | |
684 | INIT_LIST_HEAD(&group->fanotify_data.access_list); | |
685 | #endif | |
4231a235 EP |
686 | switch (flags & FAN_ALL_CLASS_BITS) { |
687 | case FAN_CLASS_NOTIF: | |
688 | group->priority = FS_PRIO_0; | |
689 | break; | |
690 | case FAN_CLASS_CONTENT: | |
691 | group->priority = FS_PRIO_1; | |
692 | break; | |
693 | case FAN_CLASS_PRE_CONTENT: | |
694 | group->priority = FS_PRIO_2; | |
695 | break; | |
696 | default: | |
697 | fd = -EINVAL; | |
698 | goto out_put_group; | |
699 | } | |
cb2d429f | 700 | |
5dd03f55 EP |
701 | if (flags & FAN_UNLIMITED_QUEUE) { |
702 | fd = -EPERM; | |
703 | if (!capable(CAP_SYS_ADMIN)) | |
704 | goto out_put_group; | |
705 | group->max_events = UINT_MAX; | |
706 | } else { | |
707 | group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; | |
708 | } | |
2529a0df | 709 | |
e7099d8a EP |
710 | group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; |
711 | ||
52c923dd EP |
712 | fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); |
713 | if (fd < 0) | |
714 | goto out_put_group; | |
715 | ||
716 | return fd; | |
717 | ||
718 | out_put_group: | |
719 | fsnotify_put_group(group); | |
720 | return fd; | |
11637e4b | 721 | } |
bbaa4168 | 722 | |
9bbfc964 HC |
723 | SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, |
724 | __u64 mask, int dfd, | |
725 | const char __user * pathname) | |
bbaa4168 | 726 | { |
0ff21db9 EP |
727 | struct inode *inode = NULL; |
728 | struct vfsmount *mnt = NULL; | |
2a3edf86 EP |
729 | struct fsnotify_group *group; |
730 | struct file *filp; | |
731 | struct path path; | |
732 | int ret, fput_needed; | |
733 | ||
734 | pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", | |
735 | __func__, fanotify_fd, flags, dfd, pathname, mask); | |
736 | ||
737 | /* we only use the lower 32 bits as of right now. */ | |
738 | if (mask & ((__u64)0xffffffff << 32)) | |
739 | return -EINVAL; | |
740 | ||
88380fe6 AG |
741 | if (flags & ~FAN_ALL_MARK_FLAGS) |
742 | return -EINVAL; | |
4d92604c | 743 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
88380fe6 AG |
744 | case FAN_MARK_ADD: |
745 | case FAN_MARK_REMOVE: | |
4d92604c | 746 | case FAN_MARK_FLUSH: |
88380fe6 AG |
747 | break; |
748 | default: | |
749 | return -EINVAL; | |
750 | } | |
b2d87909 EP |
751 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
752 | if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) | |
753 | #else | |
88380fe6 | 754 | if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) |
b2d87909 | 755 | #endif |
2a3edf86 EP |
756 | return -EINVAL; |
757 | ||
758 | filp = fget_light(fanotify_fd, &fput_needed); | |
759 | if (unlikely(!filp)) | |
760 | return -EBADF; | |
761 | ||
762 | /* verify that this is indeed an fanotify instance */ | |
763 | ret = -EINVAL; | |
764 | if (unlikely(filp->f_op != &fanotify_fops)) | |
765 | goto fput_and_out; | |
4231a235 EP |
766 | group = filp->private_data; |
767 | ||
768 | /* | |
769 | * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not | |
770 | * allowed to set permissions events. | |
771 | */ | |
772 | ret = -EINVAL; | |
773 | if (mask & FAN_ALL_PERM_EVENTS && | |
774 | group->priority == FS_PRIO_0) | |
775 | goto fput_and_out; | |
2a3edf86 EP |
776 | |
777 | ret = fanotify_find_path(dfd, pathname, &path, flags); | |
778 | if (ret) | |
779 | goto fput_and_out; | |
780 | ||
781 | /* inode held in place by reference to path; group by fget on fd */ | |
eac8e9e8 | 782 | if (!(flags & FAN_MARK_MOUNT)) |
0ff21db9 EP |
783 | inode = path.dentry->d_inode; |
784 | else | |
785 | mnt = path.mnt; | |
2a3edf86 EP |
786 | |
787 | /* create/update an inode mark */ | |
4d92604c | 788 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
c6223f46 | 789 | case FAN_MARK_ADD: |
eac8e9e8 | 790 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 791 | ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); |
0ff21db9 | 792 | else |
b9e4e3bd | 793 | ret = fanotify_add_inode_mark(group, inode, mask, flags); |
c6223f46 AG |
794 | break; |
795 | case FAN_MARK_REMOVE: | |
f3640192 | 796 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 797 | ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); |
f3640192 | 798 | else |
b9e4e3bd | 799 | ret = fanotify_remove_inode_mark(group, inode, mask, flags); |
c6223f46 | 800 | break; |
4d92604c EP |
801 | case FAN_MARK_FLUSH: |
802 | if (flags & FAN_MARK_MOUNT) | |
803 | fsnotify_clear_vfsmount_marks_by_group(group); | |
804 | else | |
805 | fsnotify_clear_inode_marks_by_group(group); | |
4d92604c | 806 | break; |
c6223f46 AG |
807 | default: |
808 | ret = -EINVAL; | |
809 | } | |
2a3edf86 EP |
810 | |
811 | path_put(&path); | |
812 | fput_and_out: | |
813 | fput_light(filp, fput_needed); | |
814 | return ret; | |
815 | } | |
816 | ||
9bbfc964 HC |
817 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
818 | asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, | |
819 | long dfd, long pathname) | |
820 | { | |
821 | return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, | |
822 | mask, (int) dfd, | |
823 | (const char __user *) pathname); | |
824 | } | |
825 | SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); | |
826 | #endif | |
827 | ||
2a3edf86 EP |
828 | /* |
829 | * fanotify_user_setup - Our initialization function. Note that we cannnot return | |
830 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | |
831 | * must result in panic(). | |
832 | */ | |
833 | static int __init fanotify_user_setup(void) | |
834 | { | |
835 | fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); | |
b2d87909 EP |
836 | fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, |
837 | SLAB_PANIC); | |
2a3edf86 EP |
838 | |
839 | return 0; | |
bbaa4168 | 840 | } |
2a3edf86 | 841 | device_initcall(fanotify_user_setup); |