]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/inotify.c
net: Fix recursive descent in __scm_destroy().
[net-next-2.6.git] / fs / inotify.c
CommitLineData
0eeca283
RL
1/*
2 * fs/inotify.c - inode-based file event notifications
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
2d9048e2
AG
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
9 *
0eeca283 10 * Copyright (C) 2005 John McCutchan
2d9048e2 11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
0eeca283
RL
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2, or (at your option) any
16 * later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
0eeca283
RL
26#include <linux/spinlock.h>
27#include <linux/idr.h>
28#include <linux/slab.h>
29#include <linux/fs.h>
914e2637 30#include <linux/sched.h>
0eeca283
RL
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/writeback.h>
34#include <linux/inotify.h>
0eeca283
RL
35
36static atomic_t inotify_cookie;
37
0eeca283
RL
38/*
39 * Lock ordering:
40 *
41 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
f24075bd 42 * iprune_mutex (synchronize shrink_icache_memory())
0eeca283 43 * inode_lock (protects the super_block->s_inodes list)
d4f9af9d 44 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
2d9048e2
AG
45 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
46 *
47 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
48 * of a caller's event handler. Thus, the caller must not hold any locks
49 * taken in their event handler while calling any of the published inotify
50 * interfaces.
0eeca283
RL
51 */
52
53/*
2d9048e2 54 * Lifetimes of the three main data structures--inotify_handle, inode, and
0eeca283
RL
55 * inotify_watch--are managed by reference count.
56 *
2d9048e2
AG
57 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
58 * Additional references can bump the count via get_inotify_handle() and drop
59 * the count via put_inotify_handle().
0eeca283 60 *
2d9048e2
AG
61 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
62 * to remove_watch_no_event(). Additional references can bump the count via
63 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
64 * is reponsible for the final put after receiving IN_IGNORED, or when using
65 * IN_ONESHOT after receiving the first event. Inotify does the final put if
66 * inotify_destroy() is called.
0eeca283
RL
67 *
68 * inode: Pinned so long as the inode is associated with a watch, from
2d9048e2 69 * inotify_add_watch() to the final put_inotify_watch().
0eeca283
RL
70 */
71
72/*
2d9048e2 73 * struct inotify_handle - represents an inotify instance
0eeca283 74 *
d4f9af9d 75 * This structure is protected by the mutex 'mutex'.
0eeca283 76 */
2d9048e2 77struct inotify_handle {
0eeca283 78 struct idr idr; /* idr mapping wd -> watch */
d4f9af9d 79 struct mutex mutex; /* protects this bad boy */
0eeca283
RL
80 struct list_head watches; /* list of watches */
81 atomic_t count; /* reference count */
b9c55d29 82 u32 last_wd; /* the last wd allocated */
2d9048e2 83 const struct inotify_operations *in_ops; /* inotify caller operations */
0eeca283
RL
84};
85
2d9048e2 86static inline void get_inotify_handle(struct inotify_handle *ih)
0eeca283 87{
2d9048e2 88 atomic_inc(&ih->count);
0eeca283
RL
89}
90
2d9048e2 91static inline void put_inotify_handle(struct inotify_handle *ih)
0eeca283 92{
2d9048e2
AG
93 if (atomic_dec_and_test(&ih->count)) {
94 idr_destroy(&ih->idr);
95 kfree(ih);
0eeca283
RL
96 }
97}
98
2d9048e2
AG
99/**
100 * get_inotify_watch - grab a reference to an inotify_watch
101 * @watch: watch to grab
102 */
103void get_inotify_watch(struct inotify_watch *watch)
0eeca283
RL
104{
105 atomic_inc(&watch->count);
106}
2d9048e2 107EXPORT_SYMBOL_GPL(get_inotify_watch);
0eeca283 108
2d9048e2 109/**
0eeca283 110 * put_inotify_watch - decrements the ref count on a given watch. cleans up
2d9048e2
AG
111 * watch references if the count reaches zero. inotify_watch is freed by
112 * inotify callers via the destroy_watch() op.
113 * @watch: watch to release
0eeca283 114 */
2d9048e2 115void put_inotify_watch(struct inotify_watch *watch)
0eeca283
RL
116{
117 if (atomic_dec_and_test(&watch->count)) {
2d9048e2 118 struct inotify_handle *ih = watch->ih;
0eeca283 119
2d9048e2
AG
120 iput(watch->inode);
121 ih->in_ops->destroy_watch(watch);
122 put_inotify_handle(ih);
0eeca283
RL
123 }
124}
2d9048e2 125EXPORT_SYMBOL_GPL(put_inotify_watch);
0eeca283
RL
126
127/*
2d9048e2 128 * inotify_handle_get_wd - returns the next WD for use by the given handle
0eeca283 129 *
2d9048e2 130 * Callers must hold ih->mutex. This function can sleep.
0eeca283 131 */
2d9048e2
AG
132static int inotify_handle_get_wd(struct inotify_handle *ih,
133 struct inotify_watch *watch)
0eeca283
RL
134{
135 int ret;
136
137 do {
2d9048e2 138 if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
0eeca283 139 return -ENOSPC;
2d9048e2 140 ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
0eeca283
RL
141 } while (ret == -EAGAIN);
142
2d9048e2
AG
143 if (likely(!ret))
144 ih->last_wd = watch->wd;
0eeca283 145
2d9048e2 146 return ret;
0eeca283
RL
147}
148
c32ccd87
NP
149/*
150 * inotify_inode_watched - returns nonzero if there are watches on this inode
151 * and zero otherwise. We call this lockless, we do not care if we race.
152 */
153static inline int inotify_inode_watched(struct inode *inode)
154{
155 return !list_empty(&inode->inotify_watches);
156}
157
158/*
159 * Get child dentry flag into synch with parent inode.
160 * Flag should always be clear for negative dentrys.
161 */
162static void set_dentry_child_flags(struct inode *inode, int watched)
163{
164 struct dentry *alias;
165
166 spin_lock(&dcache_lock);
167 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
168 struct dentry *child;
169
170 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
0d71bd59 171 if (!child->d_inode)
c32ccd87 172 continue;
0d71bd59 173
c32ccd87 174 spin_lock(&child->d_lock);
0d71bd59 175 if (watched)
c32ccd87 176 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
0d71bd59
NP
177 else
178 child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
c32ccd87
NP
179 spin_unlock(&child->d_lock);
180 }
181 }
182 spin_unlock(&dcache_lock);
183}
184
0eeca283 185/*
2d9048e2
AG
186 * inotify_find_handle - find the watch associated with the given inode and
187 * handle
0eeca283 188 *
d4f9af9d 189 * Callers must hold inode->inotify_mutex.
0eeca283 190 */
2d9048e2
AG
191static struct inotify_watch *inode_find_handle(struct inode *inode,
192 struct inotify_handle *ih)
0eeca283
RL
193{
194 struct inotify_watch *watch;
195
196 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
2d9048e2 197 if (watch->ih == ih)
0eeca283
RL
198 return watch;
199 }
200
201 return NULL;
202}
203
204/*
3ca10067 205 * remove_watch_no_event - remove watch without the IN_IGNORED event.
2d9048e2
AG
206 *
207 * Callers must hold both inode->inotify_mutex and ih->mutex.
0eeca283
RL
208 */
209static void remove_watch_no_event(struct inotify_watch *watch,
2d9048e2 210 struct inotify_handle *ih)
0eeca283
RL
211{
212 list_del(&watch->i_list);
2d9048e2 213 list_del(&watch->h_list);
0eeca283 214
c32ccd87
NP
215 if (!inotify_inode_watched(watch->inode))
216 set_dentry_child_flags(watch->inode, 0);
217
2d9048e2 218 idr_remove(&ih->idr, watch->wd);
0eeca283
RL
219}
220
3ca10067
AG
221/**
222 * inotify_remove_watch_locked - Remove a watch from both the handle and the
223 * inode. Sends the IN_IGNORED event signifying that the inode is no longer
224 * watched. May be invoked from a caller's event handler.
225 * @ih: inotify handle associated with watch
226 * @watch: watch to remove
0eeca283 227 *
2d9048e2 228 * Callers must hold both inode->inotify_mutex and ih->mutex.
0eeca283 229 */
3ca10067
AG
230void inotify_remove_watch_locked(struct inotify_handle *ih,
231 struct inotify_watch *watch)
0eeca283 232{
2d9048e2 233 remove_watch_no_event(watch, ih);
7c297722 234 ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
0eeca283 235}
3ca10067 236EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
0eeca283 237
2d9048e2 238/* Kernel API for producing events */
c32ccd87 239
0eeca283 240/*
c32ccd87 241 * inotify_d_instantiate - instantiate dcache entry for inode
0eeca283 242 */
c32ccd87 243void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
0eeca283 244{
c32ccd87
NP
245 struct dentry *parent;
246
247 if (!inode)
248 return;
249
c32ccd87
NP
250 spin_lock(&entry->d_lock);
251 parent = entry->d_parent;
091e881d 252 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
c32ccd87
NP
253 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
254 spin_unlock(&entry->d_lock);
0eeca283
RL
255}
256
c32ccd87
NP
257/*
258 * inotify_d_move - dcache entry has been moved
259 */
260void inotify_d_move(struct dentry *entry)
261{
262 struct dentry *parent;
263
264 parent = entry->d_parent;
265 if (inotify_inode_watched(parent->d_inode))
266 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
267 else
268 entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
269}
0eeca283
RL
270
271/**
272 * inotify_inode_queue_event - queue an event to all watches on this inode
273 * @inode: inode event is originating from
274 * @mask: event mask describing this event
275 * @cookie: cookie for synchronization, or zero
276 * @name: filename, if any
7c297722 277 * @n_inode: inode associated with name
0eeca283
RL
278 */
279void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
7c297722 280 const char *name, struct inode *n_inode)
0eeca283
RL
281{
282 struct inotify_watch *watch, *next;
283
284 if (!inotify_inode_watched(inode))
285 return;
286
d4f9af9d 287 mutex_lock(&inode->inotify_mutex);
0eeca283
RL
288 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
289 u32 watch_mask = watch->mask;
290 if (watch_mask & mask) {
2d9048e2
AG
291 struct inotify_handle *ih= watch->ih;
292 mutex_lock(&ih->mutex);
0eeca283 293 if (watch_mask & IN_ONESHOT)
2d9048e2 294 remove_watch_no_event(watch, ih);
7c297722
AG
295 ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
296 name, n_inode);
2d9048e2 297 mutex_unlock(&ih->mutex);
0eeca283
RL
298 }
299 }
d4f9af9d 300 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
301}
302EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
303
304/**
305 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
306 * @dentry: the dentry in question, we queue against this dentry's parent
307 * @mask: event mask describing this event
308 * @cookie: cookie for synchronization, or zero
309 * @name: filename, if any
310 */
311void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
312 u32 cookie, const char *name)
313{
314 struct dentry *parent;
315 struct inode *inode;
316
c32ccd87 317 if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
820249ba
JM
318 return;
319
0eeca283
RL
320 spin_lock(&dentry->d_lock);
321 parent = dentry->d_parent;
322 inode = parent->d_inode;
323
324 if (inotify_inode_watched(inode)) {
325 dget(parent);
326 spin_unlock(&dentry->d_lock);
7c297722
AG
327 inotify_inode_queue_event(inode, mask, cookie, name,
328 dentry->d_inode);
0eeca283
RL
329 dput(parent);
330 } else
331 spin_unlock(&dentry->d_lock);
332}
333EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
334
335/**
336 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
337 */
338u32 inotify_get_cookie(void)
339{
340 return atomic_inc_return(&inotify_cookie);
341}
342EXPORT_SYMBOL_GPL(inotify_get_cookie);
343
344/**
345 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
346 * @list: list of inodes being unmounted (sb->s_inodes)
347 *
348 * Called with inode_lock held, protecting the unmounting super block's list
f24075bd 349 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
0eeca283
RL
350 * We temporarily drop inode_lock, however, and CAN block.
351 */
352void inotify_unmount_inodes(struct list_head *list)
353{
354 struct inode *inode, *next_i, *need_iput = NULL;
355
356 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
357 struct inotify_watch *watch, *next_w;
358 struct inode *need_iput_tmp;
359 struct list_head *watches;
360
361 /*
362 * If i_count is zero, the inode cannot have any watches and
363 * doing an __iget/iput with MS_ACTIVE clear would actually
364 * evict all inodes with zero i_count from icache which is
365 * unnecessarily violent and may in fact be illegal to do.
366 */
367 if (!atomic_read(&inode->i_count))
368 continue;
369
370 /*
371 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
372 * I_WILL_FREE which is fine because by that point the inode
373 * cannot have any associated watches.
374 */
375 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
376 continue;
377
378 need_iput_tmp = need_iput;
379 need_iput = NULL;
3ca10067 380 /* In case inotify_remove_watch_locked() drops a reference. */
0eeca283
RL
381 if (inode != need_iput_tmp)
382 __iget(inode);
383 else
384 need_iput_tmp = NULL;
385 /* In case the dropping of a reference would nuke next_i. */
386 if ((&next_i->i_sb_list != list) &&
387 atomic_read(&next_i->i_count) &&
388 !(next_i->i_state & (I_CLEAR | I_FREEING |
389 I_WILL_FREE))) {
390 __iget(next_i);
391 need_iput = next_i;
392 }
393
394 /*
395 * We can safely drop inode_lock here because we hold
396 * references on both inode and next_i. Also no new inodes
397 * will be added since the umount has begun. Finally,
f24075bd 398 * iprune_mutex keeps shrink_icache_memory() away.
0eeca283
RL
399 */
400 spin_unlock(&inode_lock);
401
402 if (need_iput_tmp)
403 iput(need_iput_tmp);
404
405 /* for each watch, send IN_UNMOUNT and then remove it */
d4f9af9d 406 mutex_lock(&inode->inotify_mutex);
0eeca283
RL
407 watches = &inode->inotify_watches;
408 list_for_each_entry_safe(watch, next_w, watches, i_list) {
2d9048e2
AG
409 struct inotify_handle *ih= watch->ih;
410 mutex_lock(&ih->mutex);
411 ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
7c297722 412 NULL, NULL);
3ca10067 413 inotify_remove_watch_locked(ih, watch);
2d9048e2 414 mutex_unlock(&ih->mutex);
0eeca283 415 }
d4f9af9d 416 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
417 iput(inode);
418
419 spin_lock(&inode_lock);
420 }
421}
422EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
423
424/**
425 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
426 * @inode: inode that is about to be removed
427 */
428void inotify_inode_is_dead(struct inode *inode)
429{
430 struct inotify_watch *watch, *next;
431
d4f9af9d 432 mutex_lock(&inode->inotify_mutex);
0eeca283 433 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
2d9048e2
AG
434 struct inotify_handle *ih = watch->ih;
435 mutex_lock(&ih->mutex);
3ca10067 436 inotify_remove_watch_locked(ih, watch);
2d9048e2 437 mutex_unlock(&ih->mutex);
0eeca283 438 }
d4f9af9d 439 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
440}
441EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
442
2d9048e2 443/* Kernel Consumer API */
0eeca283 444
2d9048e2
AG
445/**
446 * inotify_init - allocate and initialize an inotify instance
447 * @ops: caller's inotify operations
448 */
449struct inotify_handle *inotify_init(const struct inotify_operations *ops)
0eeca283 450{
2d9048e2 451 struct inotify_handle *ih;
0eeca283 452
2d9048e2
AG
453 ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
454 if (unlikely(!ih))
455 return ERR_PTR(-ENOMEM);
0eeca283 456
2d9048e2
AG
457 idr_init(&ih->idr);
458 INIT_LIST_HEAD(&ih->watches);
459 mutex_init(&ih->mutex);
460 ih->last_wd = 0;
461 ih->in_ops = ops;
462 atomic_set(&ih->count, 0);
463 get_inotify_handle(ih);
0eeca283 464
2d9048e2 465 return ih;
0eeca283 466}
2d9048e2 467EXPORT_SYMBOL_GPL(inotify_init);
0eeca283 468
a9dc971d
AG
469/**
470 * inotify_init_watch - initialize an inotify watch
471 * @watch: watch to initialize
472 */
473void inotify_init_watch(struct inotify_watch *watch)
474{
475 INIT_LIST_HEAD(&watch->h_list);
476 INIT_LIST_HEAD(&watch->i_list);
477 atomic_set(&watch->count, 0);
478 get_inotify_watch(watch); /* initial get */
479}
480EXPORT_SYMBOL_GPL(inotify_init_watch);
481
2d9048e2
AG
482/**
483 * inotify_destroy - clean up and destroy an inotify instance
484 * @ih: inotify handle
485 */
486void inotify_destroy(struct inotify_handle *ih)
0eeca283 487{
0eeca283 488 /*
2d9048e2 489 * Destroy all of the watches for this handle. Unfortunately, not very
0eeca283
RL
490 * pretty. We cannot do a simple iteration over the list, because we
491 * do not know the inode until we iterate to the watch. But we need to
2d9048e2 492 * hold inode->inotify_mutex before ih->mutex. The following works.
0eeca283
RL
493 */
494 while (1) {
495 struct inotify_watch *watch;
496 struct list_head *watches;
497 struct inode *inode;
498
2d9048e2
AG
499 mutex_lock(&ih->mutex);
500 watches = &ih->watches;
0eeca283 501 if (list_empty(watches)) {
2d9048e2 502 mutex_unlock(&ih->mutex);
0eeca283
RL
503 break;
504 }
b5e61818 505 watch = list_first_entry(watches, struct inotify_watch, h_list);
0eeca283 506 get_inotify_watch(watch);
2d9048e2 507 mutex_unlock(&ih->mutex);
0eeca283
RL
508
509 inode = watch->inode;
d4f9af9d 510 mutex_lock(&inode->inotify_mutex);
2d9048e2 511 mutex_lock(&ih->mutex);
66055a4e
AG
512
513 /* make sure we didn't race with another list removal */
2d9048e2
AG
514 if (likely(idr_find(&ih->idr, watch->wd))) {
515 remove_watch_no_event(watch, ih);
516 put_inotify_watch(watch);
517 }
66055a4e 518
2d9048e2 519 mutex_unlock(&ih->mutex);
d4f9af9d 520 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
521 put_inotify_watch(watch);
522 }
523
2d9048e2
AG
524 /* free this handle: the put matching the get in inotify_init() */
525 put_inotify_handle(ih);
0eeca283 526}
2d9048e2 527EXPORT_SYMBOL_GPL(inotify_destroy);
0eeca283 528
a9dc971d
AG
529/**
530 * inotify_find_watch - find an existing watch for an (ih,inode) pair
531 * @ih: inotify handle
532 * @inode: inode to watch
533 * @watchp: pointer to existing inotify_watch
534 *
535 * Caller must pin given inode (via nameidata).
536 */
537s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
538 struct inotify_watch **watchp)
539{
540 struct inotify_watch *old;
541 int ret = -ENOENT;
542
543 mutex_lock(&inode->inotify_mutex);
544 mutex_lock(&ih->mutex);
545
546 old = inode_find_handle(inode, ih);
547 if (unlikely(old)) {
548 get_inotify_watch(old); /* caller must put watch */
549 *watchp = old;
550 ret = old->wd;
551 }
552
553 mutex_unlock(&ih->mutex);
554 mutex_unlock(&inode->inotify_mutex);
555
556 return ret;
557}
558EXPORT_SYMBOL_GPL(inotify_find_watch);
559
2d9048e2
AG
560/**
561 * inotify_find_update_watch - find and update the mask of an existing watch
562 * @ih: inotify handle
563 * @inode: inode's watch to update
564 * @mask: mask of events to watch
0eeca283 565 *
2d9048e2 566 * Caller must pin given inode (via nameidata).
0eeca283 567 */
2d9048e2
AG
568s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
569 u32 mask)
0eeca283 570{
2d9048e2
AG
571 struct inotify_watch *old;
572 int mask_add = 0;
573 int ret;
0eeca283 574
2d9048e2
AG
575 if (mask & IN_MASK_ADD)
576 mask_add = 1;
577
578 /* don't allow invalid bits: we don't want flags set */
579 mask &= IN_ALL_EVENTS | IN_ONESHOT;
580 if (unlikely(!mask))
0eeca283 581 return -EINVAL;
0eeca283 582
d4f9af9d 583 mutex_lock(&inode->inotify_mutex);
2d9048e2 584 mutex_lock(&ih->mutex);
0eeca283 585
2d9048e2
AG
586 /*
587 * Handle the case of re-adding a watch on an (inode,ih) pair that we
588 * are already watching. We just update the mask and return its wd.
589 */
590 old = inode_find_handle(inode, ih);
591 if (unlikely(!old)) {
592 ret = -ENOENT;
593 goto out;
0eeca283
RL
594 }
595
2d9048e2
AG
596 if (mask_add)
597 old->mask |= mask;
598 else
599 old->mask = mask;
600 ret = old->wd;
601out:
602 mutex_unlock(&ih->mutex);
603 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
604 return ret;
605}
2d9048e2 606EXPORT_SYMBOL_GPL(inotify_find_update_watch);
0eeca283 607
2d9048e2
AG
608/**
609 * inotify_add_watch - add a watch to an inotify instance
610 * @ih: inotify handle
611 * @watch: caller allocated watch structure
612 * @inode: inode to watch
613 * @mask: mask of events to watch
614 *
615 * Caller must pin given inode (via nameidata).
616 * Caller must ensure it only calls inotify_add_watch() once per watch.
617 * Calls inotify_handle_get_wd() so may sleep.
618 */
619s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
620 struct inode *inode, u32 mask)
0eeca283 621{
2d9048e2 622 int ret = 0;
d599e36a 623 int newly_watched;
0eeca283 624
2d9048e2
AG
625 /* don't allow invalid bits: we don't want flags set */
626 mask &= IN_ALL_EVENTS | IN_ONESHOT;
627 if (unlikely(!mask))
628 return -EINVAL;
629 watch->mask = mask;
783bc29b 630
2d9048e2
AG
631 mutex_lock(&inode->inotify_mutex);
632 mutex_lock(&ih->mutex);
8140a500 633
2d9048e2
AG
634 /* Initialize a new watch */
635 ret = inotify_handle_get_wd(ih, watch);
b680716e 636 if (unlikely(ret))
2d9048e2
AG
637 goto out;
638 ret = watch->wd;
0eeca283 639
2d9048e2
AG
640 /* save a reference to handle and bump the count to make it official */
641 get_inotify_handle(ih);
642 watch->ih = ih;
0eeca283
RL
643
644 /*
2d9048e2
AG
645 * Save a reference to the inode and bump the ref count to make it
646 * official. We hold a reference to nameidata, which makes this safe.
0eeca283 647 */
2d9048e2 648 watch->inode = igrab(inode);
0eeca283 649
2d9048e2 650 /* Add the watch to the handle's and the inode's list */
d599e36a 651 newly_watched = !inotify_inode_watched(inode);
2d9048e2 652 list_add(&watch->h_list, &ih->watches);
0eeca283 653 list_add(&watch->i_list, &inode->inotify_watches);
d599e36a
NP
654 /*
655 * Set child flags _after_ adding the watch, so there is no race
656 * windows where newly instantiated children could miss their parent's
657 * watched flag.
658 */
659 if (newly_watched)
660 set_dentry_child_flags(inode, 1);
661
0eeca283 662out:
2d9048e2 663 mutex_unlock(&ih->mutex);
d4f9af9d 664 mutex_unlock(&inode->inotify_mutex);
0eeca283
RL
665 return ret;
666}
2d9048e2 667EXPORT_SYMBOL_GPL(inotify_add_watch);
0eeca283 668
b9efe8a2
AV
669/**
670 * inotify_clone_watch - put the watch next to existing one
671 * @old: already installed watch
672 * @new: new watch
673 *
674 * Caller must hold the inotify_mutex of inode we are dealing with;
675 * it is expected to remove the old watch before unlocking the inode.
676 */
677s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
678{
679 struct inotify_handle *ih = old->ih;
680 int ret = 0;
681
682 new->mask = old->mask;
683 new->ih = ih;
684
685 mutex_lock(&ih->mutex);
686
687 /* Initialize a new watch */
688 ret = inotify_handle_get_wd(ih, new);
689 if (unlikely(ret))
690 goto out;
691 ret = new->wd;
692
693 get_inotify_handle(ih);
694
695 new->inode = igrab(old->inode);
696
697 list_add(&new->h_list, &ih->watches);
698 list_add(&new->i_list, &old->inode->inotify_watches);
699out:
700 mutex_unlock(&ih->mutex);
701 return ret;
702}
703
455434d4
AV
704void inotify_evict_watch(struct inotify_watch *watch)
705{
706 get_inotify_watch(watch);
707 mutex_lock(&watch->ih->mutex);
708 inotify_remove_watch_locked(watch->ih, watch);
709 mutex_unlock(&watch->ih->mutex);
710}
711
2d9048e2
AG
712/**
713 * inotify_rm_wd - remove a watch from an inotify instance
714 * @ih: inotify handle
715 * @wd: watch descriptor to remove
716 *
717 * Can sleep.
718 */
719int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
0eeca283 720{
2d9048e2
AG
721 struct inotify_watch *watch;
722 struct inode *inode;
783bc29b 723
2d9048e2
AG
724 mutex_lock(&ih->mutex);
725 watch = idr_find(&ih->idr, wd);
726 if (unlikely(!watch)) {
727 mutex_unlock(&ih->mutex);
728 return -EINVAL;
783bc29b 729 }
2d9048e2
AG
730 get_inotify_watch(watch);
731 inode = watch->inode;
732 mutex_unlock(&ih->mutex);
783bc29b 733
2d9048e2
AG
734 mutex_lock(&inode->inotify_mutex);
735 mutex_lock(&ih->mutex);
9a556e89 736
2d9048e2
AG
737 /* make sure that we did not race */
738 if (likely(idr_find(&ih->idr, wd) == watch))
3ca10067 739 inotify_remove_watch_locked(ih, watch);
0eeca283 740
2d9048e2
AG
741 mutex_unlock(&ih->mutex);
742 mutex_unlock(&inode->inotify_mutex);
743 put_inotify_watch(watch);
0eeca283 744
2d9048e2
AG
745 return 0;
746}
747EXPORT_SYMBOL_GPL(inotify_rm_wd);
0eeca283 748
a9dc971d
AG
749/**
750 * inotify_rm_watch - remove a watch from an inotify instance
751 * @ih: inotify handle
752 * @watch: watch to remove
753 *
754 * Can sleep.
755 */
756int inotify_rm_watch(struct inotify_handle *ih,
757 struct inotify_watch *watch)
758{
759 return inotify_rm_wd(ih, watch->wd);
760}
761EXPORT_SYMBOL_GPL(inotify_rm_watch);
762
0eeca283 763/*
2d9048e2 764 * inotify_setup - core initialization function
0eeca283 765 */
b680716e 766static int __init inotify_setup(void)
0eeca283 767{
0eeca283
RL
768 atomic_set(&inotify_cookie, 0);
769
0eeca283
RL
770 return 0;
771}
772
b680716e 773module_init(inotify_setup);