2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/list.h>
20 #include <linux/mutex.h>
21 #include <linux/slab.h>
22 #include <linux/srcu.h>
23 #include <linux/rculist.h>
24 #include <linux/wait.h>
26 #include <linux/fsnotify_backend.h>
29 #include <asm/atomic.h>
31 /* protects writes to fsnotify_groups and fsnotify_mask */
32 static DEFINE_MUTEX(fsnotify_grp_mutex);
33 /* all groups registered to receive inode filesystem notifications */
34 LIST_HEAD(fsnotify_inode_groups);
35 /* all groups registered to receive mount point filesystem notifications */
36 LIST_HEAD(fsnotify_vfsmount_groups);
38 void fsnotify_add_vfsmount_group(struct fsnotify_group *group)
40 struct fsnotify_group *group_iter;
42 mutex_lock(&fsnotify_grp_mutex);
44 if (!group->on_vfsmount_group_list) {
45 list_for_each_entry(group_iter, &fsnotify_vfsmount_groups,
46 vfsmount_group_list) {
47 /* insert in front of this one? */
48 if (group < group_iter) {
49 /* list_add_tail() insert in front of group_iter */
50 list_add_tail_rcu(&group->inode_group_list,
51 &group_iter->inode_group_list);
56 /* apparently we need to be the last entry */
57 list_add_tail_rcu(&group->vfsmount_group_list, &fsnotify_vfsmount_groups);
60 group->on_vfsmount_group_list = 1;
62 mutex_unlock(&fsnotify_grp_mutex);
65 void fsnotify_add_inode_group(struct fsnotify_group *group)
67 struct fsnotify_group *group_iter;
69 mutex_lock(&fsnotify_grp_mutex);
71 /* add to global group list */
72 if (!group->on_inode_group_list) {
73 list_for_each_entry(group_iter, &fsnotify_inode_groups,
75 if (group < group_iter) {
76 /* list_add_tail() insert in front of group_iter */
77 list_add_tail_rcu(&group->inode_group_list,
78 &group_iter->inode_group_list);
83 /* apparently we need to be the last entry */
84 list_add_tail_rcu(&group->inode_group_list, &fsnotify_inode_groups);
87 group->on_inode_group_list = 1;
89 mutex_unlock(&fsnotify_grp_mutex);
93 * Final freeing of a group
95 void fsnotify_final_destroy_group(struct fsnotify_group *group)
97 /* clear the notification queue of all events */
98 fsnotify_flush_notify(group);
100 if (group->ops->free_group_priv)
101 group->ops->free_group_priv(group);
107 * Trying to get rid of a group. We need to first get rid of any outstanding
108 * allocations and then free the group. Remember that fsnotify_clear_marks_by_group
109 * could miss marks that are being freed by inode and those marks could still
110 * hold a reference to this group (via group->num_marks) If we get into that
111 * situtation, the fsnotify_final_destroy_group will get called when that final
114 static void fsnotify_destroy_group(struct fsnotify_group *group)
116 /* clear all inode marks for this group */
117 fsnotify_clear_marks_by_group(group);
119 synchronize_srcu(&fsnotify_mark_srcu);
121 /* past the point of no return, matches the initial value of 1 */
122 if (atomic_dec_and_test(&group->num_marks))
123 fsnotify_final_destroy_group(group);
127 * Remove this group from the global list of groups that will get events
128 * this can be done even if there are still references and things still using
129 * this group. This just stops the group from getting new events.
131 static void __fsnotify_evict_group(struct fsnotify_group *group)
133 BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
135 if (group->on_inode_group_list)
136 list_del_rcu(&group->inode_group_list);
137 group->on_inode_group_list = 0;
138 if (group->on_vfsmount_group_list)
139 list_del_rcu(&group->vfsmount_group_list);
140 group->on_vfsmount_group_list = 0;
144 * Called when a group is no longer interested in getting events. This can be
145 * used if a group is misbehaving or if for some reason a group should no longer
146 * get any filesystem events.
148 void fsnotify_evict_group(struct fsnotify_group *group)
150 mutex_lock(&fsnotify_grp_mutex);
151 __fsnotify_evict_group(group);
152 mutex_unlock(&fsnotify_grp_mutex);
156 * Drop a reference to a group. Free it if it's through.
158 void fsnotify_put_group(struct fsnotify_group *group)
160 if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex))
164 * OK, now we know that there's no other users *and* we hold mutex,
165 * so no new references will appear
167 __fsnotify_evict_group(group);
169 mutex_unlock(&fsnotify_grp_mutex);
171 fsnotify_destroy_group(group);
175 * Create a new fsnotify_group and hold a reference for the group returned.
177 struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
179 struct fsnotify_group *group;
181 group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
183 return ERR_PTR(-ENOMEM);
185 /* set to 0 when there a no external references to this group */
186 atomic_set(&group->refcnt, 1);
188 * hits 0 when there are no external references AND no marks for
191 atomic_set(&group->num_marks, 1);
193 mutex_init(&group->notification_mutex);
194 INIT_LIST_HEAD(&group->notification_list);
195 init_waitqueue_head(&group->notification_waitq);
196 group->max_events = UINT_MAX;
198 INIT_LIST_HEAD(&group->inode_group_list);
199 INIT_LIST_HEAD(&group->vfsmount_group_list);
201 spin_lock_init(&group->mark_lock);
202 INIT_LIST_HEAD(&group->marks_list);