]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/audit_tree.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[net-next-2.6.git] / kernel / audit_tree.c
CommitLineData
74c3cbe3
AV
1#include "audit.h"
2#include <linux/inotify.h>
3#include <linux/namei.h>
4#include <linux/mount.h>
916d7576 5#include <linux/kthread.h>
74c3cbe3
AV
6
7struct audit_tree;
8struct audit_chunk;
9
10struct audit_tree {
11 atomic_t count;
12 int goner;
13 struct audit_chunk *root;
14 struct list_head chunks;
15 struct list_head rules;
16 struct list_head list;
17 struct list_head same_root;
18 struct rcu_head head;
19 char pathname[];
20};
21
22struct audit_chunk {
23 struct list_head hash;
24 struct inotify_watch watch;
25 struct list_head trees; /* with root here */
26 int dead;
27 int count;
8f7b0ba1 28 atomic_long_t refs;
74c3cbe3
AV
29 struct rcu_head head;
30 struct node {
31 struct list_head list;
32 struct audit_tree *owner;
33 unsigned index; /* index; upper bit indicates 'will prune' */
34 } owners[];
35};
36
37static LIST_HEAD(tree_list);
38static LIST_HEAD(prune_list);
39
40/*
41 * One struct chunk is attached to each inode of interest.
42 * We replace struct chunk on tagging/untagging.
43 * Rules have pointer to struct audit_tree.
44 * Rules have struct list_head rlist forming a list of rules over
45 * the same tree.
46 * References to struct chunk are collected at audit_inode{,_child}()
47 * time and used in AUDIT_TREE rule matching.
48 * These references are dropped at the same time we are calling
49 * audit_free_names(), etc.
50 *
51 * Cyclic lists galore:
52 * tree.chunks anchors chunk.owners[].list hash_lock
53 * tree.rules anchors rule.rlist audit_filter_mutex
54 * chunk.trees anchors tree.same_root hash_lock
55 * chunk.hash is a hash with middle bits of watch.inode as
56 * a hash function. RCU, hash_lock
57 *
58 * tree is refcounted; one reference for "some rules on rules_list refer to
59 * it", one for each chunk with pointer to it.
60 *
8f7b0ba1
AV
61 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
62 * of watch contributes 1 to .refs).
74c3cbe3
AV
63 *
64 * node.index allows to get from node.list to containing chunk.
65 * MSB of that sucker is stolen to mark taggings that we might have to
66 * revert - several operations have very unpleasant cleanup logics and
67 * that makes a difference. Some.
68 */
69
70static struct inotify_handle *rtree_ih;
71
72static struct audit_tree *alloc_tree(const char *s)
73{
74 struct audit_tree *tree;
75
76 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
77 if (tree) {
78 atomic_set(&tree->count, 1);
79 tree->goner = 0;
80 INIT_LIST_HEAD(&tree->chunks);
81 INIT_LIST_HEAD(&tree->rules);
82 INIT_LIST_HEAD(&tree->list);
83 INIT_LIST_HEAD(&tree->same_root);
84 tree->root = NULL;
85 strcpy(tree->pathname, s);
86 }
87 return tree;
88}
89
90static inline void get_tree(struct audit_tree *tree)
91{
92 atomic_inc(&tree->count);
93}
94
95static void __put_tree(struct rcu_head *rcu)
96{
97 struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
98 kfree(tree);
99}
100
101static inline void put_tree(struct audit_tree *tree)
102{
103 if (atomic_dec_and_test(&tree->count))
104 call_rcu(&tree->head, __put_tree);
105}
106
107/* to avoid bringing the entire thing in audit.h */
108const char *audit_tree_path(struct audit_tree *tree)
109{
110 return tree->pathname;
111}
112
113static struct audit_chunk *alloc_chunk(int count)
114{
115 struct audit_chunk *chunk;
116 size_t size;
117 int i;
118
119 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
120 chunk = kzalloc(size, GFP_KERNEL);
121 if (!chunk)
122 return NULL;
123
124 INIT_LIST_HEAD(&chunk->hash);
125 INIT_LIST_HEAD(&chunk->trees);
126 chunk->count = count;
8f7b0ba1 127 atomic_long_set(&chunk->refs, 1);
74c3cbe3
AV
128 for (i = 0; i < count; i++) {
129 INIT_LIST_HEAD(&chunk->owners[i].list);
130 chunk->owners[i].index = i;
131 }
132 inotify_init_watch(&chunk->watch);
133 return chunk;
134}
135
8f7b0ba1 136static void free_chunk(struct audit_chunk *chunk)
74c3cbe3 137{
74c3cbe3
AV
138 int i;
139
140 for (i = 0; i < chunk->count; i++) {
141 if (chunk->owners[i].owner)
142 put_tree(chunk->owners[i].owner);
143 }
144 kfree(chunk);
145}
146
8f7b0ba1 147void audit_put_chunk(struct audit_chunk *chunk)
74c3cbe3 148{
8f7b0ba1
AV
149 if (atomic_long_dec_and_test(&chunk->refs))
150 free_chunk(chunk);
74c3cbe3
AV
151}
152
8f7b0ba1 153static void __put_chunk(struct rcu_head *rcu)
74c3cbe3 154{
8f7b0ba1
AV
155 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
156 audit_put_chunk(chunk);
74c3cbe3
AV
157}
158
159enum {HASH_SIZE = 128};
160static struct list_head chunk_hash_heads[HASH_SIZE];
161static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
162
163static inline struct list_head *chunk_hash(const struct inode *inode)
164{
165 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
166 return chunk_hash_heads + n % HASH_SIZE;
167}
168
169/* hash_lock is held by caller */
170static void insert_hash(struct audit_chunk *chunk)
171{
172 struct list_head *list = chunk_hash(chunk->watch.inode);
173 list_add_rcu(&chunk->hash, list);
174}
175
176/* called under rcu_read_lock */
177struct audit_chunk *audit_tree_lookup(const struct inode *inode)
178{
179 struct list_head *list = chunk_hash(inode);
6793a051 180 struct audit_chunk *p;
74c3cbe3 181
6793a051 182 list_for_each_entry_rcu(p, list, hash) {
74c3cbe3 183 if (p->watch.inode == inode) {
8f7b0ba1 184 atomic_long_inc(&p->refs);
74c3cbe3
AV
185 return p;
186 }
187 }
188 return NULL;
189}
190
191int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
192{
193 int n;
194 for (n = 0; n < chunk->count; n++)
195 if (chunk->owners[n].owner == tree)
196 return 1;
197 return 0;
198}
199
200/* tagging and untagging inodes with trees */
201
8f7b0ba1
AV
202static struct audit_chunk *find_chunk(struct node *p)
203{
204 int index = p->index & ~(1U<<31);
205 p -= index;
206 return container_of(p, struct audit_chunk, owners[0]);
207}
208
209static void untag_chunk(struct node *p)
74c3cbe3 210{
8f7b0ba1 211 struct audit_chunk *chunk = find_chunk(p);
74c3cbe3
AV
212 struct audit_chunk *new;
213 struct audit_tree *owner;
214 int size = chunk->count - 1;
215 int i, j;
216
8f7b0ba1
AV
217 if (!pin_inotify_watch(&chunk->watch)) {
218 /*
219 * Filesystem is shutting down; all watches are getting
220 * evicted, just take it off the node list for this
221 * tree and let the eviction logics take care of the
222 * rest.
223 */
224 owner = p->owner;
225 if (owner->root == chunk) {
226 list_del_init(&owner->same_root);
227 owner->root = NULL;
228 }
229 list_del_init(&p->list);
230 p->owner = NULL;
231 put_tree(owner);
232 return;
233 }
234
235 spin_unlock(&hash_lock);
236
237 /*
238 * pin_inotify_watch() succeeded, so the watch won't go away
239 * from under us.
240 */
74c3cbe3
AV
241 mutex_lock(&chunk->watch.inode->inotify_mutex);
242 if (chunk->dead) {
243 mutex_unlock(&chunk->watch.inode->inotify_mutex);
8f7b0ba1 244 goto out;
74c3cbe3
AV
245 }
246
247 owner = p->owner;
248
249 if (!size) {
250 chunk->dead = 1;
251 spin_lock(&hash_lock);
252 list_del_init(&chunk->trees);
253 if (owner->root == chunk)
254 owner->root = NULL;
255 list_del_init(&p->list);
256 list_del_rcu(&chunk->hash);
257 spin_unlock(&hash_lock);
258 inotify_evict_watch(&chunk->watch);
259 mutex_unlock(&chunk->watch.inode->inotify_mutex);
260 put_inotify_watch(&chunk->watch);
8f7b0ba1 261 goto out;
74c3cbe3
AV
262 }
263
264 new = alloc_chunk(size);
265 if (!new)
266 goto Fallback;
267 if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
268 free_chunk(new);
269 goto Fallback;
270 }
271
272 chunk->dead = 1;
273 spin_lock(&hash_lock);
274 list_replace_init(&chunk->trees, &new->trees);
275 if (owner->root == chunk) {
276 list_del_init(&owner->same_root);
277 owner->root = NULL;
278 }
279
6f5d5114 280 for (i = j = 0; j <= size; i++, j++) {
74c3cbe3
AV
281 struct audit_tree *s;
282 if (&chunk->owners[j] == p) {
283 list_del_init(&p->list);
284 i--;
285 continue;
286 }
287 s = chunk->owners[j].owner;
288 new->owners[i].owner = s;
289 new->owners[i].index = chunk->owners[j].index - j + i;
290 if (!s) /* result of earlier fallback */
291 continue;
292 get_tree(s);
6f5d5114 293 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
74c3cbe3
AV
294 }
295
296 list_replace_rcu(&chunk->hash, &new->hash);
297 list_for_each_entry(owner, &new->trees, same_root)
298 owner->root = new;
299 spin_unlock(&hash_lock);
300 inotify_evict_watch(&chunk->watch);
301 mutex_unlock(&chunk->watch.inode->inotify_mutex);
302 put_inotify_watch(&chunk->watch);
8f7b0ba1 303 goto out;
74c3cbe3
AV
304
305Fallback:
306 // do the best we can
307 spin_lock(&hash_lock);
308 if (owner->root == chunk) {
309 list_del_init(&owner->same_root);
310 owner->root = NULL;
311 }
312 list_del_init(&p->list);
313 p->owner = NULL;
314 put_tree(owner);
315 spin_unlock(&hash_lock);
316 mutex_unlock(&chunk->watch.inode->inotify_mutex);
8f7b0ba1
AV
317out:
318 unpin_inotify_watch(&chunk->watch);
319 spin_lock(&hash_lock);
74c3cbe3
AV
320}
321
322static int create_chunk(struct inode *inode, struct audit_tree *tree)
323{
324 struct audit_chunk *chunk = alloc_chunk(1);
325 if (!chunk)
326 return -ENOMEM;
327
328 if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
329 free_chunk(chunk);
330 return -ENOSPC;
331 }
332
333 mutex_lock(&inode->inotify_mutex);
334 spin_lock(&hash_lock);
335 if (tree->goner) {
336 spin_unlock(&hash_lock);
337 chunk->dead = 1;
338 inotify_evict_watch(&chunk->watch);
339 mutex_unlock(&inode->inotify_mutex);
340 put_inotify_watch(&chunk->watch);
341 return 0;
342 }
343 chunk->owners[0].index = (1U << 31);
344 chunk->owners[0].owner = tree;
345 get_tree(tree);
346 list_add(&chunk->owners[0].list, &tree->chunks);
347 if (!tree->root) {
348 tree->root = chunk;
349 list_add(&tree->same_root, &chunk->trees);
350 }
351 insert_hash(chunk);
352 spin_unlock(&hash_lock);
353 mutex_unlock(&inode->inotify_mutex);
354 return 0;
355}
356
357/* the first tagged inode becomes root of tree */
358static int tag_chunk(struct inode *inode, struct audit_tree *tree)
359{
360 struct inotify_watch *watch;
361 struct audit_tree *owner;
362 struct audit_chunk *chunk, *old;
363 struct node *p;
364 int n;
365
366 if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
367 return create_chunk(inode, tree);
368
369 old = container_of(watch, struct audit_chunk, watch);
370
371 /* are we already there? */
372 spin_lock(&hash_lock);
373 for (n = 0; n < old->count; n++) {
374 if (old->owners[n].owner == tree) {
375 spin_unlock(&hash_lock);
b4c30aad 376 put_inotify_watch(&old->watch);
74c3cbe3
AV
377 return 0;
378 }
379 }
380 spin_unlock(&hash_lock);
381
382 chunk = alloc_chunk(old->count + 1);
b4c30aad
AV
383 if (!chunk) {
384 put_inotify_watch(&old->watch);
74c3cbe3 385 return -ENOMEM;
b4c30aad 386 }
74c3cbe3
AV
387
388 mutex_lock(&inode->inotify_mutex);
389 if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
390 mutex_unlock(&inode->inotify_mutex);
318b6d3d 391 put_inotify_watch(&old->watch);
74c3cbe3
AV
392 free_chunk(chunk);
393 return -ENOSPC;
394 }
395 spin_lock(&hash_lock);
396 if (tree->goner) {
397 spin_unlock(&hash_lock);
398 chunk->dead = 1;
399 inotify_evict_watch(&chunk->watch);
400 mutex_unlock(&inode->inotify_mutex);
318b6d3d 401 put_inotify_watch(&old->watch);
74c3cbe3
AV
402 put_inotify_watch(&chunk->watch);
403 return 0;
404 }
405 list_replace_init(&old->trees, &chunk->trees);
406 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
407 struct audit_tree *s = old->owners[n].owner;
408 p->owner = s;
409 p->index = old->owners[n].index;
410 if (!s) /* result of fallback in untag */
411 continue;
412 get_tree(s);
413 list_replace_init(&old->owners[n].list, &p->list);
414 }
415 p->index = (chunk->count - 1) | (1U<<31);
416 p->owner = tree;
417 get_tree(tree);
418 list_add(&p->list, &tree->chunks);
419 list_replace_rcu(&old->hash, &chunk->hash);
420 list_for_each_entry(owner, &chunk->trees, same_root)
421 owner->root = chunk;
422 old->dead = 1;
423 if (!tree->root) {
424 tree->root = chunk;
425 list_add(&tree->same_root, &chunk->trees);
426 }
427 spin_unlock(&hash_lock);
428 inotify_evict_watch(&old->watch);
429 mutex_unlock(&inode->inotify_mutex);
b4c30aad
AV
430 put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
431 put_inotify_watch(&old->watch); /* and kill it */
74c3cbe3
AV
432 return 0;
433}
434
74c3cbe3
AV
435static void kill_rules(struct audit_tree *tree)
436{
437 struct audit_krule *rule, *next;
438 struct audit_entry *entry;
439 struct audit_buffer *ab;
440
441 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
442 entry = container_of(rule, struct audit_entry, rule);
443
444 list_del_init(&rule->rlist);
445 if (rule->tree) {
446 /* not a half-baked one */
447 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
9d960985
EP
448 audit_log_format(ab, "op=");
449 audit_log_string(ab, "remove rule");
450 audit_log_format(ab, " dir=");
74c3cbe3 451 audit_log_untrustedstring(ab, rule->tree->pathname);
9d960985 452 audit_log_key(ab, rule->filterkey);
74c3cbe3
AV
453 audit_log_format(ab, " list=%d res=1", rule->listnr);
454 audit_log_end(ab);
455 rule->tree = NULL;
456 list_del_rcu(&entry->list);
e45aa212 457 list_del(&entry->rule.list);
74c3cbe3
AV
458 call_rcu(&entry->rcu, audit_free_rule_rcu);
459 }
460 }
461}
462
463/*
464 * finish killing struct audit_tree
465 */
466static void prune_one(struct audit_tree *victim)
467{
468 spin_lock(&hash_lock);
469 while (!list_empty(&victim->chunks)) {
470 struct node *p;
74c3cbe3
AV
471
472 p = list_entry(victim->chunks.next, struct node, list);
74c3cbe3 473
8f7b0ba1 474 untag_chunk(p);
74c3cbe3
AV
475 }
476 spin_unlock(&hash_lock);
477 put_tree(victim);
478}
479
480/* trim the uncommitted chunks from tree */
481
482static void trim_marked(struct audit_tree *tree)
483{
484 struct list_head *p, *q;
485 spin_lock(&hash_lock);
486 if (tree->goner) {
487 spin_unlock(&hash_lock);
488 return;
489 }
490 /* reorder */
491 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
492 struct node *node = list_entry(p, struct node, list);
493 q = p->next;
494 if (node->index & (1U<<31)) {
495 list_del_init(p);
496 list_add(p, &tree->chunks);
497 }
498 }
499
500 while (!list_empty(&tree->chunks)) {
501 struct node *node;
74c3cbe3
AV
502
503 node = list_entry(tree->chunks.next, struct node, list);
504
505 /* have we run out of marked? */
506 if (!(node->index & (1U<<31)))
507 break;
508
8f7b0ba1 509 untag_chunk(node);
74c3cbe3
AV
510 }
511 if (!tree->root && !tree->goner) {
512 tree->goner = 1;
513 spin_unlock(&hash_lock);
514 mutex_lock(&audit_filter_mutex);
515 kill_rules(tree);
516 list_del_init(&tree->list);
517 mutex_unlock(&audit_filter_mutex);
518 prune_one(tree);
519 } else {
520 spin_unlock(&hash_lock);
521 }
522}
523
916d7576
AV
524static void audit_schedule_prune(void);
525
74c3cbe3
AV
526/* called with audit_filter_mutex */
527int audit_remove_tree_rule(struct audit_krule *rule)
528{
529 struct audit_tree *tree;
530 tree = rule->tree;
531 if (tree) {
532 spin_lock(&hash_lock);
533 list_del_init(&rule->rlist);
534 if (list_empty(&tree->rules) && !tree->goner) {
535 tree->root = NULL;
536 list_del_init(&tree->same_root);
537 tree->goner = 1;
538 list_move(&tree->list, &prune_list);
539 rule->tree = NULL;
540 spin_unlock(&hash_lock);
541 audit_schedule_prune();
542 return 1;
543 }
544 rule->tree = NULL;
545 spin_unlock(&hash_lock);
546 return 1;
547 }
548 return 0;
549}
550
1f707137
AV
551static int compare_root(struct vfsmount *mnt, void *arg)
552{
553 return mnt->mnt_root->d_inode == arg;
554}
555
74c3cbe3
AV
556void audit_trim_trees(void)
557{
558 struct list_head cursor;
559
560 mutex_lock(&audit_filter_mutex);
561 list_add(&cursor, &tree_list);
562 while (cursor.next != &tree_list) {
563 struct audit_tree *tree;
98bc993f 564 struct path path;
74c3cbe3
AV
565 struct vfsmount *root_mnt;
566 struct node *node;
74c3cbe3
AV
567 int err;
568
569 tree = container_of(cursor.next, struct audit_tree, list);
570 get_tree(tree);
571 list_del(&cursor);
572 list_add(&cursor, &tree->list);
573 mutex_unlock(&audit_filter_mutex);
574
98bc993f 575 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
576 if (err)
577 goto skip_it;
578
589ff870 579 root_mnt = collect_mounts(&path);
98bc993f 580 path_put(&path);
74c3cbe3
AV
581 if (!root_mnt)
582 goto skip_it;
583
74c3cbe3
AV
584 spin_lock(&hash_lock);
585 list_for_each_entry(node, &tree->chunks, list) {
1f707137 586 struct inode *inode = find_chunk(node)->watch.inode;
74c3cbe3 587 node->index |= 1U<<31;
1f707137
AV
588 if (iterate_mounts(compare_root, inode, root_mnt))
589 node->index &= ~(1U<<31);
74c3cbe3
AV
590 }
591 spin_unlock(&hash_lock);
592 trim_marked(tree);
593 put_tree(tree);
74c3cbe3
AV
594 drop_collected_mounts(root_mnt);
595skip_it:
596 mutex_lock(&audit_filter_mutex);
597 }
598 list_del(&cursor);
599 mutex_unlock(&audit_filter_mutex);
600}
601
74c3cbe3
AV
602int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
603{
604
605 if (pathname[0] != '/' ||
606 rule->listnr != AUDIT_FILTER_EXIT ||
5af75d8d 607 op != Audit_equal ||
74c3cbe3
AV
608 rule->inode_f || rule->watch || rule->tree)
609 return -EINVAL;
610 rule->tree = alloc_tree(pathname);
611 if (!rule->tree)
612 return -ENOMEM;
613 return 0;
614}
615
616void audit_put_tree(struct audit_tree *tree)
617{
618 put_tree(tree);
619}
620
1f707137
AV
621static int tag_mount(struct vfsmount *mnt, void *arg)
622{
623 return tag_chunk(mnt->mnt_root->d_inode, arg);
624}
625
74c3cbe3
AV
626/* called with audit_filter_mutex */
627int audit_add_tree_rule(struct audit_krule *rule)
628{
629 struct audit_tree *seed = rule->tree, *tree;
98bc993f 630 struct path path;
1f707137 631 struct vfsmount *mnt;
74c3cbe3
AV
632 int err;
633
634 list_for_each_entry(tree, &tree_list, list) {
635 if (!strcmp(seed->pathname, tree->pathname)) {
636 put_tree(seed);
637 rule->tree = tree;
638 list_add(&rule->rlist, &tree->rules);
639 return 0;
640 }
641 }
642 tree = seed;
643 list_add(&tree->list, &tree_list);
644 list_add(&rule->rlist, &tree->rules);
645 /* do not set rule->tree yet */
646 mutex_unlock(&audit_filter_mutex);
647
98bc993f 648 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
649 if (err)
650 goto Err;
589ff870 651 mnt = collect_mounts(&path);
98bc993f 652 path_put(&path);
74c3cbe3
AV
653 if (!mnt) {
654 err = -ENOMEM;
655 goto Err;
656 }
74c3cbe3
AV
657
658 get_tree(tree);
1f707137 659 err = iterate_mounts(tag_mount, tree, mnt);
74c3cbe3
AV
660 drop_collected_mounts(mnt);
661
662 if (!err) {
663 struct node *node;
664 spin_lock(&hash_lock);
665 list_for_each_entry(node, &tree->chunks, list)
666 node->index &= ~(1U<<31);
667 spin_unlock(&hash_lock);
668 } else {
669 trim_marked(tree);
670 goto Err;
671 }
672
673 mutex_lock(&audit_filter_mutex);
674 if (list_empty(&rule->rlist)) {
675 put_tree(tree);
676 return -ENOENT;
677 }
678 rule->tree = tree;
679 put_tree(tree);
680
681 return 0;
682Err:
683 mutex_lock(&audit_filter_mutex);
684 list_del_init(&tree->list);
685 list_del_init(&tree->rules);
686 put_tree(tree);
687 return err;
688}
689
690int audit_tag_tree(char *old, char *new)
691{
692 struct list_head cursor, barrier;
693 int failed = 0;
2096f759 694 struct path path1, path2;
74c3cbe3 695 struct vfsmount *tagged;
74c3cbe3
AV
696 int err;
697
2096f759 698 err = kern_path(new, 0, &path2);
74c3cbe3
AV
699 if (err)
700 return err;
2096f759
AV
701 tagged = collect_mounts(&path2);
702 path_put(&path2);
74c3cbe3
AV
703 if (!tagged)
704 return -ENOMEM;
705
2096f759 706 err = kern_path(old, 0, &path1);
74c3cbe3
AV
707 if (err) {
708 drop_collected_mounts(tagged);
709 return err;
710 }
74c3cbe3 711
74c3cbe3
AV
712 mutex_lock(&audit_filter_mutex);
713 list_add(&barrier, &tree_list);
714 list_add(&cursor, &barrier);
715
716 while (cursor.next != &tree_list) {
717 struct audit_tree *tree;
2096f759 718 int good_one = 0;
74c3cbe3
AV
719
720 tree = container_of(cursor.next, struct audit_tree, list);
721 get_tree(tree);
722 list_del(&cursor);
723 list_add(&cursor, &tree->list);
724 mutex_unlock(&audit_filter_mutex);
725
2096f759
AV
726 err = kern_path(tree->pathname, 0, &path2);
727 if (!err) {
728 good_one = path_is_under(&path1, &path2);
729 path_put(&path2);
74c3cbe3
AV
730 }
731
2096f759 732 if (!good_one) {
74c3cbe3
AV
733 put_tree(tree);
734 mutex_lock(&audit_filter_mutex);
735 continue;
736 }
74c3cbe3 737
1f707137 738 failed = iterate_mounts(tag_mount, tree, tagged);
74c3cbe3
AV
739 if (failed) {
740 put_tree(tree);
741 mutex_lock(&audit_filter_mutex);
742 break;
743 }
744
745 mutex_lock(&audit_filter_mutex);
746 spin_lock(&hash_lock);
747 if (!tree->goner) {
748 list_del(&tree->list);
749 list_add(&tree->list, &tree_list);
750 }
751 spin_unlock(&hash_lock);
752 put_tree(tree);
753 }
754
755 while (barrier.prev != &tree_list) {
756 struct audit_tree *tree;
757
758 tree = container_of(barrier.prev, struct audit_tree, list);
759 get_tree(tree);
760 list_del(&tree->list);
761 list_add(&tree->list, &barrier);
762 mutex_unlock(&audit_filter_mutex);
763
764 if (!failed) {
765 struct node *node;
766 spin_lock(&hash_lock);
767 list_for_each_entry(node, &tree->chunks, list)
768 node->index &= ~(1U<<31);
769 spin_unlock(&hash_lock);
770 } else {
771 trim_marked(tree);
772 }
773
774 put_tree(tree);
775 mutex_lock(&audit_filter_mutex);
776 }
777 list_del(&barrier);
778 list_del(&cursor);
74c3cbe3 779 mutex_unlock(&audit_filter_mutex);
2096f759 780 path_put(&path1);
74c3cbe3
AV
781 drop_collected_mounts(tagged);
782 return failed;
783}
784
785/*
786 * That gets run when evict_chunk() ends up needing to kill audit_tree.
916d7576 787 * Runs from a separate thread.
74c3cbe3 788 */
916d7576 789static int prune_tree_thread(void *unused)
74c3cbe3 790{
916d7576 791 mutex_lock(&audit_cmd_mutex);
74c3cbe3
AV
792 mutex_lock(&audit_filter_mutex);
793
794 while (!list_empty(&prune_list)) {
795 struct audit_tree *victim;
796
797 victim = list_entry(prune_list.next, struct audit_tree, list);
798 list_del_init(&victim->list);
799
800 mutex_unlock(&audit_filter_mutex);
801
802 prune_one(victim);
803
804 mutex_lock(&audit_filter_mutex);
805 }
806
807 mutex_unlock(&audit_filter_mutex);
916d7576
AV
808 mutex_unlock(&audit_cmd_mutex);
809 return 0;
810}
811
812static void audit_schedule_prune(void)
813{
814 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
815}
816
817/*
818 * ... and that one is done if evict_chunk() decides to delay until the end
819 * of syscall. Runs synchronously.
820 */
821void audit_kill_trees(struct list_head *list)
822{
823 mutex_lock(&audit_cmd_mutex);
824 mutex_lock(&audit_filter_mutex);
825
826 while (!list_empty(list)) {
827 struct audit_tree *victim;
828
829 victim = list_entry(list->next, struct audit_tree, list);
830 kill_rules(victim);
831 list_del_init(&victim->list);
832
833 mutex_unlock(&audit_filter_mutex);
834
835 prune_one(victim);
836
837 mutex_lock(&audit_filter_mutex);
838 }
839
840 mutex_unlock(&audit_filter_mutex);
841 mutex_unlock(&audit_cmd_mutex);
74c3cbe3
AV
842}
843
844/*
845 * Here comes the stuff asynchronous to auditctl operations
846 */
847
848/* inode->inotify_mutex is locked */
849static void evict_chunk(struct audit_chunk *chunk)
850{
851 struct audit_tree *owner;
916d7576
AV
852 struct list_head *postponed = audit_killed_trees();
853 int need_prune = 0;
74c3cbe3
AV
854 int n;
855
856 if (chunk->dead)
857 return;
858
859 chunk->dead = 1;
860 mutex_lock(&audit_filter_mutex);
861 spin_lock(&hash_lock);
862 while (!list_empty(&chunk->trees)) {
863 owner = list_entry(chunk->trees.next,
864 struct audit_tree, same_root);
865 owner->goner = 1;
866 owner->root = NULL;
867 list_del_init(&owner->same_root);
868 spin_unlock(&hash_lock);
916d7576
AV
869 if (!postponed) {
870 kill_rules(owner);
871 list_move(&owner->list, &prune_list);
872 need_prune = 1;
873 } else {
874 list_move(&owner->list, postponed);
875 }
74c3cbe3
AV
876 spin_lock(&hash_lock);
877 }
878 list_del_rcu(&chunk->hash);
879 for (n = 0; n < chunk->count; n++)
880 list_del_init(&chunk->owners[n].list);
881 spin_unlock(&hash_lock);
916d7576
AV
882 if (need_prune)
883 audit_schedule_prune();
74c3cbe3
AV
884 mutex_unlock(&audit_filter_mutex);
885}
886
887static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
888 u32 cookie, const char *dname, struct inode *inode)
889{
890 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
891
892 if (mask & IN_IGNORED) {
893 evict_chunk(chunk);
894 put_inotify_watch(watch);
895 }
896}
897
898static void destroy_watch(struct inotify_watch *watch)
899{
900 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
8f7b0ba1 901 call_rcu(&chunk->head, __put_chunk);
74c3cbe3
AV
902}
903
904static const struct inotify_operations rtree_inotify_ops = {
905 .handle_event = handle_event,
906 .destroy_watch = destroy_watch,
907};
908
909static int __init audit_tree_init(void)
910{
911 int i;
912
913 rtree_ih = inotify_init(&rtree_inotify_ops);
914 if (IS_ERR(rtree_ih))
915 audit_panic("cannot initialize inotify handle for rectree watches");
916
917 for (i = 0; i < HASH_SIZE; i++)
918 INIT_LIST_HEAD(&chunk_hash_heads[i]);
919
920 return 0;
921}
922__initcall(audit_tree_init);