2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/fs.h> /* struct inode */
26 #include <linux/fsnotify_backend.h>
27 #include <linux/inotify.h>
28 #include <linux/path.h> /* struct path */
29 #include <linux/slab.h> /* kmem_* */
30 #include <linux/types.h>
31 #include <linux/sched.h>
36 * Check if 2 events contain the same information. We do not compare private data
37 * but at this moment that isn't a problem for any know fsnotify listeners.
39 static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
41 if ((old->mask == new->mask) &&
42 (old->to_tell == new->to_tell) &&
43 (old->data_type == new->data_type) &&
44 (old->name_len == new->name_len)) {
45 switch (old->data_type) {
46 case (FSNOTIFY_EVENT_INODE):
47 /* remember, after old was put on the wait_q we aren't
48 * allowed to look at the inode any more, only thing
49 * left to check was if the file_name is the same */
51 !strcmp(old->file_name, new->file_name))
54 case (FSNOTIFY_EVENT_PATH):
55 if ((old->path.mnt == new->path.mnt) &&
56 (old->path.dentry == new->path.dentry))
59 case (FSNOTIFY_EVENT_NONE):
60 if (old->mask & FS_Q_OVERFLOW)
62 else if (old->mask & FS_IN_IGNORED)
70 static int inotify_merge(struct list_head *list,
71 struct fsnotify_event *event,
74 struct fsnotify_event_holder *last_holder;
75 struct fsnotify_event *last_event;
78 /* and the list better be locked by something too */
79 spin_lock(&event->lock);
81 last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
82 last_event = last_holder->event;
83 if (event_compare(last_event, event))
86 spin_unlock(&event->lock);
91 static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
93 struct fsnotify_mark *fsn_mark;
94 struct inotify_inode_mark *i_mark;
95 struct inode *to_tell;
96 struct inotify_event_private_data *event_priv;
97 struct fsnotify_event_private_data *fsn_event_priv;
100 to_tell = event->to_tell;
102 fsn_mark = fsnotify_find_inode_mark(group, to_tell);
103 /* race with watch removal? We already passes should_send */
104 if (unlikely(!fsn_mark))
106 i_mark = container_of(fsn_mark, struct inotify_inode_mark,
110 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
111 if (unlikely(!event_priv))
114 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
116 fsn_event_priv->group = group;
119 ret = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge, NULL);
121 inotify_free_event_priv(fsn_event_priv);
122 /* EEXIST says we tail matched, EOVERFLOW isn't something
123 * to report up the stack. */
124 if ((ret == -EEXIST) ||
130 * If we hold the fsn_mark until after the event is on the queue
131 * IN_IGNORED won't be able to pass this event in the queue
133 fsnotify_put_mark(fsn_mark);
138 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
140 inotify_ignored_and_remove_idr(fsn_mark, group);
143 static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
144 struct vfsmount *mnt, __u32 mask, void *data,
147 struct fsnotify_mark *fsn_mark;
150 fsn_mark = fsnotify_find_inode_mark(group, inode);
154 mask = (mask & ~FS_EVENT_ON_CHILD);
155 send = (fsn_mark->mask & mask);
157 /* find took a reference */
158 fsnotify_put_mark(fsn_mark);
164 * This is NEVER supposed to be called. Inotify marks should either have been
165 * removed from the idr when the watch was removed or in the
166 * fsnotify_destroy_mark_by_group() call when the inotify instance was being
167 * torn down. This is only called if the idr is about to be freed but there
168 * are still marks in it.
170 static int idr_callback(int id, void *p, void *data)
172 struct fsnotify_mark *fsn_mark;
173 struct inotify_inode_mark *i_mark;
174 static bool warned = false;
181 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
183 WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
184 "idr. Probably leaking memory\n", id, p, data);
187 * I'm taking the liberty of assuming that the mark in question is a
188 * valid address and I'm dereferencing it. This might help to figure
189 * out why we got here and the panic is no worse than the original
190 * BUG() that was here.
193 printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
194 fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
198 static void inotify_free_group_priv(struct fsnotify_group *group)
200 /* ideally the idr is empty and we won't hit the BUG in teh callback */
201 idr_for_each(&group->inotify_data.idr, idr_callback, group);
202 idr_remove_all(&group->inotify_data.idr);
203 idr_destroy(&group->inotify_data.idr);
204 free_uid(group->inotify_data.user);
207 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
209 struct inotify_event_private_data *event_priv;
212 event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
213 fsnotify_event_priv_data);
215 kmem_cache_free(event_priv_cachep, event_priv);
218 const struct fsnotify_ops inotify_fsnotify_ops = {
219 .handle_event = inotify_handle_event,
220 .should_send_event = inotify_should_send_event,
221 .free_group_priv = inotify_free_group_priv,
222 .free_event_priv = inotify_free_event_priv,
223 .freeing_mark = inotify_freeing_mark,