--- /dev/null
+#include "audit.h"
+#include <linux/inotify.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+
+struct audit_tree;
+struct audit_chunk;
+
+struct audit_tree {
+       atomic_t count;
+       int goner;
+       struct audit_chunk *root;
+       struct list_head chunks;
+       struct list_head rules;
+       struct list_head list;
+       struct list_head same_root;
+       struct rcu_head head;
+       char pathname[];
+};
+
+struct audit_chunk {
+       struct list_head hash;
+       struct inotify_watch watch;
+       struct list_head trees;         /* with root here */
+       int dead;
+       int count;
+       struct rcu_head head;
+       struct node {
+               struct list_head list;
+               struct audit_tree *owner;
+               unsigned index;         /* index; upper bit indicates 'will prune' */
+       } owners[];
+};
+
+static LIST_HEAD(tree_list);
+static LIST_HEAD(prune_list);
+
+/*
+ * One struct chunk is attached to each inode of interest.
+ * We replace struct chunk on tagging/untagging.
+ * Rules have pointer to struct audit_tree.
+ * Rules have struct list_head rlist forming a list of rules over
+ * the same tree.
+ * References to struct chunk are collected at audit_inode{,_child}()
+ * time and used in AUDIT_TREE rule matching.
+ * These references are dropped at the same time we are calling
+ * audit_free_names(), etc.
+ *
+ * Cyclic lists galore:
+ * tree.chunks anchors chunk.owners[].list                     hash_lock
+ * tree.rules anchors rule.rlist                               audit_filter_mutex
+ * chunk.trees anchors tree.same_root                          hash_lock
+ * chunk.hash is a hash with middle bits of watch.inode as
+ * a hash function.                                            RCU, hash_lock
+ *
+ * tree is refcounted; one reference for "some rules on rules_list refer to
+ * it", one for each chunk with pointer to it.
+ *
+ * chunk is refcounted by embedded inotify_watch.
+ *
+ * node.index allows to get from node.list to containing chunk.
+ * MSB of that sucker is stolen to mark taggings that we might have to
+ * revert - several operations have very unpleasant cleanup logics and
+ * that makes a difference.  Some.
+ */
+
+static struct inotify_handle *rtree_ih;
+
+static struct audit_tree *alloc_tree(const char *s)
+{
+       struct audit_tree *tree;
+
+       tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
+       if (tree) {
+               atomic_set(&tree->count, 1);
+               tree->goner = 0;
+               INIT_LIST_HEAD(&tree->chunks);
+               INIT_LIST_HEAD(&tree->rules);
+               INIT_LIST_HEAD(&tree->list);
+               INIT_LIST_HEAD(&tree->same_root);
+               tree->root = NULL;
+               strcpy(tree->pathname, s);
+       }
+       return tree;
+}
+
+static inline void get_tree(struct audit_tree *tree)
+{
+       atomic_inc(&tree->count);
+}
+
+static void __put_tree(struct rcu_head *rcu)
+{
+       struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
+       kfree(tree);
+}
+
+static inline void put_tree(struct audit_tree *tree)
+{
+       if (atomic_dec_and_test(&tree->count))
+               call_rcu(&tree->head, __put_tree);
+}
+
+/* to avoid bringing the entire thing in audit.h */
+const char *audit_tree_path(struct audit_tree *tree)
+{
+       return tree->pathname;
+}
+
+static struct audit_chunk *alloc_chunk(int count)
+{
+       struct audit_chunk *chunk;
+       size_t size;
+       int i;
+
+       size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
+       chunk = kzalloc(size, GFP_KERNEL);
+       if (!chunk)
+               return NULL;
+
+       INIT_LIST_HEAD(&chunk->hash);
+       INIT_LIST_HEAD(&chunk->trees);
+       chunk->count = count;
+       for (i = 0; i < count; i++) {
+               INIT_LIST_HEAD(&chunk->owners[i].list);
+               chunk->owners[i].index = i;
+       }
+       inotify_init_watch(&chunk->watch);
+       return chunk;
+}
+
+static void __free_chunk(struct rcu_head *rcu)
+{
+       struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
+       int i;
+
+       for (i = 0; i < chunk->count; i++) {
+               if (chunk->owners[i].owner)
+                       put_tree(chunk->owners[i].owner);
+       }
+       kfree(chunk);
+}
+
+static inline void free_chunk(struct audit_chunk *chunk)
+{
+       call_rcu(&chunk->head, __free_chunk);
+}
+
+void audit_put_chunk(struct audit_chunk *chunk)
+{
+       put_inotify_watch(&chunk->watch);
+}
+
+enum {HASH_SIZE = 128};
+static struct list_head chunk_hash_heads[HASH_SIZE];
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
+
+static inline struct list_head *chunk_hash(const struct inode *inode)
+{
+       unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
+       return chunk_hash_heads + n % HASH_SIZE;
+}
+
+/* hash_lock is held by caller */
+static void insert_hash(struct audit_chunk *chunk)
+{
+       struct list_head *list = chunk_hash(chunk->watch.inode);
+       list_add_rcu(&chunk->hash, list);
+}
+
+/* called under rcu_read_lock */
+struct audit_chunk *audit_tree_lookup(const struct inode *inode)
+{
+       struct list_head *list = chunk_hash(inode);
+       struct list_head *pos;
+
+       list_for_each_rcu(pos, list) {
+               struct audit_chunk *p = container_of(pos, struct audit_chunk, hash);
+               if (p->watch.inode == inode) {
+                       get_inotify_watch(&p->watch);
+                       return p;
+               }
+       }
+       return NULL;
+}
+
+int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
+{
+       int n;
+       for (n = 0; n < chunk->count; n++)
+               if (chunk->owners[n].owner == tree)
+                       return 1;
+       return 0;
+}
+
+/* tagging and untagging inodes with trees */
+
+static void untag_chunk(struct audit_chunk *chunk, struct node *p)
+{
+       struct audit_chunk *new;
+       struct audit_tree *owner;
+       int size = chunk->count - 1;
+       int i, j;
+
+       mutex_lock(&chunk->watch.inode->inotify_mutex);
+       if (chunk->dead) {
+               mutex_unlock(&chunk->watch.inode->inotify_mutex);
+               return;
+       }
+
+       owner = p->owner;
+
+       if (!size) {
+               chunk->dead = 1;
+               spin_lock(&hash_lock);
+               list_del_init(&chunk->trees);
+               if (owner->root == chunk)
+                       owner->root = NULL;
+               list_del_init(&p->list);
+               list_del_rcu(&chunk->hash);
+               spin_unlock(&hash_lock);
+               inotify_evict_watch(&chunk->watch);
+               mutex_unlock(&chunk->watch.inode->inotify_mutex);
+               put_inotify_watch(&chunk->watch);
+               return;
+       }
+
+       new = alloc_chunk(size);
+       if (!new)
+               goto Fallback;
+       if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
+               free_chunk(new);
+               goto Fallback;
+       }
+
+       chunk->dead = 1;
+       spin_lock(&hash_lock);
+       list_replace_init(&chunk->trees, &new->trees);
+       if (owner->root == chunk) {
+               list_del_init(&owner->same_root);
+               owner->root = NULL;
+       }
+
+       for (i = j = 0; i < size; i++, j++) {
+               struct audit_tree *s;
+               if (&chunk->owners[j] == p) {
+                       list_del_init(&p->list);
+                       i--;
+                       continue;
+               }
+               s = chunk->owners[j].owner;
+               new->owners[i].owner = s;
+               new->owners[i].index = chunk->owners[j].index - j + i;
+               if (!s) /* result of earlier fallback */
+                       continue;
+               get_tree(s);
+               list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
+       }
+
+       list_replace_rcu(&chunk->hash, &new->hash);
+       list_for_each_entry(owner, &new->trees, same_root)
+               owner->root = new;
+       spin_unlock(&hash_lock);
+       inotify_evict_watch(&chunk->watch);
+       mutex_unlock(&chunk->watch.inode->inotify_mutex);
+       put_inotify_watch(&chunk->watch);
+       return;
+
+Fallback:
+       // do the best we can
+       spin_lock(&hash_lock);
+       if (owner->root == chunk) {
+               list_del_init(&owner->same_root);
+               owner->root = NULL;
+       }
+       list_del_init(&p->list);
+       p->owner = NULL;
+       put_tree(owner);
+       spin_unlock(&hash_lock);
+       mutex_unlock(&chunk->watch.inode->inotify_mutex);
+}
+
+static int create_chunk(struct inode *inode, struct audit_tree *tree)
+{
+       struct audit_chunk *chunk = alloc_chunk(1);
+       if (!chunk)
+               return -ENOMEM;
+
+       if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
+               free_chunk(chunk);
+               return -ENOSPC;
+       }
+
+       mutex_lock(&inode->inotify_mutex);
+       spin_lock(&hash_lock);
+       if (tree->goner) {
+               spin_unlock(&hash_lock);
+               chunk->dead = 1;
+               inotify_evict_watch(&chunk->watch);
+               mutex_unlock(&inode->inotify_mutex);
+               put_inotify_watch(&chunk->watch);
+               return 0;
+       }
+       chunk->owners[0].index = (1U << 31);
+       chunk->owners[0].owner = tree;
+       get_tree(tree);
+       list_add(&chunk->owners[0].list, &tree->chunks);
+       if (!tree->root) {
+               tree->root = chunk;
+               list_add(&tree->same_root, &chunk->trees);
+       }
+       insert_hash(chunk);
+       spin_unlock(&hash_lock);
+       mutex_unlock(&inode->inotify_mutex);
+       return 0;
+}
+
+/* the first tagged inode becomes root of tree */
+static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+{
+       struct inotify_watch *watch;
+       struct audit_tree *owner;
+       struct audit_chunk *chunk, *old;
+       struct node *p;
+       int n;
+
+       if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
+               return create_chunk(inode, tree);
+
+       old = container_of(watch, struct audit_chunk, watch);
+
+       /* are we already there? */
+       spin_lock(&hash_lock);
+       for (n = 0; n < old->count; n++) {
+               if (old->owners[n].owner == tree) {
+                       spin_unlock(&hash_lock);
+                       put_inotify_watch(watch);
+                       return 0;
+               }
+       }
+       spin_unlock(&hash_lock);
+
+       chunk = alloc_chunk(old->count + 1);
+       if (!chunk)
+               return -ENOMEM;
+
+       mutex_lock(&inode->inotify_mutex);
+       if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
+               mutex_unlock(&inode->inotify_mutex);
+               free_chunk(chunk);
+               return -ENOSPC;
+       }
+       spin_lock(&hash_lock);
+       if (tree->goner) {
+               spin_unlock(&hash_lock);
+               chunk->dead = 1;
+               inotify_evict_watch(&chunk->watch);
+               mutex_unlock(&inode->inotify_mutex);
+               put_inotify_watch(&chunk->watch);
+               return 0;
+       }
+       list_replace_init(&old->trees, &chunk->trees);
+       for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
+               struct audit_tree *s = old->owners[n].owner;
+               p->owner = s;
+               p->index = old->owners[n].index;
+               if (!s) /* result of fallback in untag */
+                       continue;
+               get_tree(s);
+               list_replace_init(&old->owners[n].list, &p->list);
+       }
+       p->index = (chunk->count - 1) | (1U<<31);
+       p->owner = tree;
+       get_tree(tree);
+       list_add(&p->list, &tree->chunks);
+       list_replace_rcu(&old->hash, &chunk->hash);
+       list_for_each_entry(owner, &chunk->trees, same_root)
+               owner->root = chunk;
+       old->dead = 1;
+       if (!tree->root) {
+               tree->root = chunk;
+               list_add(&tree->same_root, &chunk->trees);
+       }
+       spin_unlock(&hash_lock);
+       inotify_evict_watch(&old->watch);
+       mutex_unlock(&inode->inotify_mutex);
+       put_inotify_watch(&old->watch);
+       return 0;
+}
+
+static struct audit_chunk *find_chunk(struct node *p)
+{
+       int index = p->index & ~(1U<<31);
+       p -= index;
+       return container_of(p, struct audit_chunk, owners[0]);
+}
+
+static void kill_rules(struct audit_tree *tree)
+{
+       struct audit_krule *rule, *next;
+       struct audit_entry *entry;
+       struct audit_buffer *ab;
+
+       list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
+               entry = container_of(rule, struct audit_entry, rule);
+
+               list_del_init(&rule->rlist);
+               if (rule->tree) {
+                       /* not a half-baked one */
+                       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+                       audit_log_format(ab, "op=remove rule dir=");
+                       audit_log_untrustedstring(ab, rule->tree->pathname);
+                       if (rule->filterkey) {
+                               audit_log_format(ab, " key=");
+                               audit_log_untrustedstring(ab, rule->filterkey);
+                       } else
+                               audit_log_format(ab, " key=(null)");
+                       audit_log_format(ab, " list=%d res=1", rule->listnr);
+                       audit_log_end(ab);
+                       rule->tree = NULL;
+                       list_del_rcu(&entry->list);
+                       call_rcu(&entry->rcu, audit_free_rule_rcu);
+               }
+       }
+}
+
+/*
+ * finish killing struct audit_tree
+ */
+static void prune_one(struct audit_tree *victim)
+{
+       spin_lock(&hash_lock);
+       while (!list_empty(&victim->chunks)) {
+               struct node *p;
+               struct audit_chunk *chunk;
+
+               p = list_entry(victim->chunks.next, struct node, list);
+               chunk = find_chunk(p);
+               get_inotify_watch(&chunk->watch);
+               spin_unlock(&hash_lock);
+
+               untag_chunk(chunk, p);
+
+               put_inotify_watch(&chunk->watch);
+               spin_lock(&hash_lock);
+       }
+       spin_unlock(&hash_lock);
+       put_tree(victim);
+}
+
+/* trim the uncommitted chunks from tree */
+
+static void trim_marked(struct audit_tree *tree)
+{
+       struct list_head *p, *q;
+       spin_lock(&hash_lock);
+       if (tree->goner) {
+               spin_unlock(&hash_lock);
+               return;
+       }
+       /* reorder */
+       for (p = tree->chunks.next; p != &tree->chunks; p = q) {
+               struct node *node = list_entry(p, struct node, list);
+               q = p->next;
+               if (node->index & (1U<<31)) {
+                       list_del_init(p);
+                       list_add(p, &tree->chunks);
+               }
+       }
+
+       while (!list_empty(&tree->chunks)) {
+               struct node *node;
+               struct audit_chunk *chunk;
+
+               node = list_entry(tree->chunks.next, struct node, list);
+
+               /* have we run out of marked? */
+               if (!(node->index & (1U<<31)))
+                       break;
+
+               chunk = find_chunk(node);
+               get_inotify_watch(&chunk->watch);
+               spin_unlock(&hash_lock);
+
+               untag_chunk(chunk, node);
+
+               put_inotify_watch(&chunk->watch);
+               spin_lock(&hash_lock);
+       }
+       if (!tree->root && !tree->goner) {
+               tree->goner = 1;
+               spin_unlock(&hash_lock);
+               mutex_lock(&audit_filter_mutex);
+               kill_rules(tree);
+               list_del_init(&tree->list);
+               mutex_unlock(&audit_filter_mutex);
+               prune_one(tree);
+       } else {
+               spin_unlock(&hash_lock);
+       }
+}
+
+/* called with audit_filter_mutex */
+int audit_remove_tree_rule(struct audit_krule *rule)
+{
+       struct audit_tree *tree;
+       tree = rule->tree;
+       if (tree) {
+               spin_lock(&hash_lock);
+               list_del_init(&rule->rlist);
+               if (list_empty(&tree->rules) && !tree->goner) {
+                       tree->root = NULL;
+                       list_del_init(&tree->same_root);
+                       tree->goner = 1;
+                       list_move(&tree->list, &prune_list);
+                       rule->tree = NULL;
+                       spin_unlock(&hash_lock);
+                       audit_schedule_prune();
+                       return 1;
+               }
+               rule->tree = NULL;
+               spin_unlock(&hash_lock);
+               return 1;
+       }
+       return 0;
+}
+
+void audit_trim_trees(void)
+{
+       struct list_head cursor;
+
+       mutex_lock(&audit_filter_mutex);
+       list_add(&cursor, &tree_list);
+       while (cursor.next != &tree_list) {
+               struct audit_tree *tree;
+               struct nameidata nd;
+               struct vfsmount *root_mnt;
+               struct node *node;
+               struct list_head list;
+               int err;
+
+               tree = container_of(cursor.next, struct audit_tree, list);
+               get_tree(tree);
+               list_del(&cursor);
+               list_add(&cursor, &tree->list);
+               mutex_unlock(&audit_filter_mutex);
+
+               err = path_lookup(tree->pathname, 0, &nd);
+               if (err)
+                       goto skip_it;
+
+               root_mnt = collect_mounts(nd.mnt, nd.dentry);
+               path_release(&nd);
+               if (!root_mnt)
+                       goto skip_it;
+
+               list_add_tail(&list, &root_mnt->mnt_list);
+               spin_lock(&hash_lock);
+               list_for_each_entry(node, &tree->chunks, list) {
+                       struct audit_chunk *chunk = find_chunk(node);
+                       struct inode *inode = chunk->watch.inode;
+                       struct vfsmount *mnt;
+                       node->index |= 1U<<31;
+                       list_for_each_entry(mnt, &list, mnt_list) {
+                               if (mnt->mnt_root->d_inode == inode) {
+                                       node->index &= ~(1U<<31);
+                                       break;
+                               }
+                       }
+               }
+               spin_unlock(&hash_lock);
+               trim_marked(tree);
+               put_tree(tree);
+               list_del_init(&list);
+               drop_collected_mounts(root_mnt);
+skip_it:
+               mutex_lock(&audit_filter_mutex);
+       }
+       list_del(&cursor);
+       mutex_unlock(&audit_filter_mutex);
+}
+
+static int is_under(struct vfsmount *mnt, struct dentry *dentry,
+                   struct nameidata *nd)
+{
+       if (mnt != nd->mnt) {
+               for (;;) {
+                       if (mnt->mnt_parent == mnt)
+                               return 0;
+                       if (mnt->mnt_parent == nd->mnt)
+                                       break;
+                       mnt = mnt->mnt_parent;
+               }
+               dentry = mnt->mnt_mountpoint;
+       }
+       return is_subdir(dentry, nd->dentry);
+}
+
+int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
+{
+
+       if (pathname[0] != '/' ||
+           rule->listnr != AUDIT_FILTER_EXIT ||
+           op & ~AUDIT_EQUAL ||
+           rule->inode_f || rule->watch || rule->tree)
+               return -EINVAL;
+       rule->tree = alloc_tree(pathname);
+       if (!rule->tree)
+               return -ENOMEM;
+       return 0;
+}
+
+void audit_put_tree(struct audit_tree *tree)
+{
+       put_tree(tree);
+}
+
+/* called with audit_filter_mutex */
+int audit_add_tree_rule(struct audit_krule *rule)
+{
+       struct audit_tree *seed = rule->tree, *tree;
+       struct nameidata nd;
+       struct vfsmount *mnt, *p;
+       struct list_head list;
+       int err;
+
+       list_for_each_entry(tree, &tree_list, list) {
+               if (!strcmp(seed->pathname, tree->pathname)) {
+                       put_tree(seed);
+                       rule->tree = tree;
+                       list_add(&rule->rlist, &tree->rules);
+                       return 0;
+               }
+       }
+       tree = seed;
+       list_add(&tree->list, &tree_list);
+       list_add(&rule->rlist, &tree->rules);
+       /* do not set rule->tree yet */
+       mutex_unlock(&audit_filter_mutex);
+
+       err = path_lookup(tree->pathname, 0, &nd);
+       if (err)
+               goto Err;
+       mnt = collect_mounts(nd.mnt, nd.dentry);
+       path_release(&nd);
+       if (!mnt) {
+               err = -ENOMEM;
+               goto Err;
+       }
+       list_add_tail(&list, &mnt->mnt_list);
+
+       get_tree(tree);
+       list_for_each_entry(p, &list, mnt_list) {
+               err = tag_chunk(p->mnt_root->d_inode, tree);
+               if (err)
+                       break;
+       }
+
+       list_del(&list);
+       drop_collected_mounts(mnt);
+
+       if (!err) {
+               struct node *node;
+               spin_lock(&hash_lock);
+               list_for_each_entry(node, &tree->chunks, list)
+                       node->index &= ~(1U<<31);
+               spin_unlock(&hash_lock);
+       } else {
+               trim_marked(tree);
+               goto Err;
+       }
+
+       mutex_lock(&audit_filter_mutex);
+       if (list_empty(&rule->rlist)) {
+               put_tree(tree);
+               return -ENOENT;
+       }
+       rule->tree = tree;
+       put_tree(tree);
+
+       return 0;
+Err:
+       mutex_lock(&audit_filter_mutex);
+       list_del_init(&tree->list);
+       list_del_init(&tree->rules);
+       put_tree(tree);
+       return err;
+}
+
+int audit_tag_tree(char *old, char *new)
+{
+       struct list_head cursor, barrier;
+       int failed = 0;
+       struct nameidata nd;
+       struct vfsmount *tagged;
+       struct list_head list;
+       struct vfsmount *mnt;
+       struct dentry *dentry;
+       int err;
+
+       err = path_lookup(new, 0, &nd);
+       if (err)
+               return err;
+       tagged = collect_mounts(nd.mnt, nd.dentry);
+       path_release(&nd);
+       if (!tagged)
+               return -ENOMEM;
+
+       err = path_lookup(old, 0, &nd);
+       if (err) {
+               drop_collected_mounts(tagged);
+               return err;
+       }
+       mnt = mntget(nd.mnt);
+       dentry = dget(nd.dentry);
+       path_release(&nd);
+
+       if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
+               follow_up(&mnt, &dentry);
+
+       list_add_tail(&list, &tagged->mnt_list);
+
+       mutex_lock(&audit_filter_mutex);
+       list_add(&barrier, &tree_list);
+       list_add(&cursor, &barrier);
+
+       while (cursor.next != &tree_list) {
+               struct audit_tree *tree;
+               struct vfsmount *p;
+
+               tree = container_of(cursor.next, struct audit_tree, list);
+               get_tree(tree);
+               list_del(&cursor);
+               list_add(&cursor, &tree->list);
+               mutex_unlock(&audit_filter_mutex);
+
+               err = path_lookup(tree->pathname, 0, &nd);
+               if (err) {
+                       put_tree(tree);
+                       mutex_lock(&audit_filter_mutex);
+                       continue;
+               }
+
+               spin_lock(&vfsmount_lock);
+               if (!is_under(mnt, dentry, &nd)) {
+                       spin_unlock(&vfsmount_lock);
+                       path_release(&nd);
+                       put_tree(tree);
+                       mutex_lock(&audit_filter_mutex);
+                       continue;
+               }
+               spin_unlock(&vfsmount_lock);
+               path_release(&nd);
+
+               list_for_each_entry(p, &list, mnt_list) {
+                       failed = tag_chunk(p->mnt_root->d_inode, tree);
+                       if (failed)
+                               break;
+               }
+
+               if (failed) {
+                       put_tree(tree);
+                       mutex_lock(&audit_filter_mutex);
+                       break;
+               }
+
+               mutex_lock(&audit_filter_mutex);
+               spin_lock(&hash_lock);
+               if (!tree->goner) {
+                       list_del(&tree->list);
+                       list_add(&tree->list, &tree_list);
+               }
+               spin_unlock(&hash_lock);
+               put_tree(tree);
+       }
+
+       while (barrier.prev != &tree_list) {
+               struct audit_tree *tree;
+
+               tree = container_of(barrier.prev, struct audit_tree, list);
+               get_tree(tree);
+               list_del(&tree->list);
+               list_add(&tree->list, &barrier);
+               mutex_unlock(&audit_filter_mutex);
+
+               if (!failed) {
+                       struct node *node;
+                       spin_lock(&hash_lock);
+                       list_for_each_entry(node, &tree->chunks, list)
+                               node->index &= ~(1U<<31);
+                       spin_unlock(&hash_lock);
+               } else {
+                       trim_marked(tree);
+               }
+
+               put_tree(tree);
+               mutex_lock(&audit_filter_mutex);
+       }
+       list_del(&barrier);
+       list_del(&cursor);
+       list_del(&list);
+       mutex_unlock(&audit_filter_mutex);
+       dput(dentry);
+       mntput(mnt);
+       drop_collected_mounts(tagged);
+       return failed;
+}
+
+/*
+ * That gets run when evict_chunk() ends up needing to kill audit_tree.
+ * Runs from a separate thread, with audit_cmd_mutex held.
+ */
+void audit_prune_trees(void)
+{
+       mutex_lock(&audit_filter_mutex);
+
+       while (!list_empty(&prune_list)) {
+               struct audit_tree *victim;
+
+               victim = list_entry(prune_list.next, struct audit_tree, list);
+               list_del_init(&victim->list);
+
+               mutex_unlock(&audit_filter_mutex);
+
+               prune_one(victim);
+
+               mutex_lock(&audit_filter_mutex);
+       }
+
+       mutex_unlock(&audit_filter_mutex);
+}
+
+/*
+ *  Here comes the stuff asynchronous to auditctl operations
+ */
+
+/* inode->inotify_mutex is locked */
+static void evict_chunk(struct audit_chunk *chunk)
+{
+       struct audit_tree *owner;
+       int n;
+
+       if (chunk->dead)
+               return;
+
+       chunk->dead = 1;
+       mutex_lock(&audit_filter_mutex);
+       spin_lock(&hash_lock);
+       while (!list_empty(&chunk->trees)) {
+               owner = list_entry(chunk->trees.next,
+                                  struct audit_tree, same_root);
+               owner->goner = 1;
+               owner->root = NULL;
+               list_del_init(&owner->same_root);
+               spin_unlock(&hash_lock);
+               kill_rules(owner);
+               list_move(&owner->list, &prune_list);
+               audit_schedule_prune();
+               spin_lock(&hash_lock);
+       }
+       list_del_rcu(&chunk->hash);
+       for (n = 0; n < chunk->count; n++)
+               list_del_init(&chunk->owners[n].list);
+       spin_unlock(&hash_lock);
+       mutex_unlock(&audit_filter_mutex);
+}
+
+static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
+                         u32 cookie, const char *dname, struct inode *inode)
+{
+       struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+
+       if (mask & IN_IGNORED) {
+               evict_chunk(chunk);
+               put_inotify_watch(watch);
+       }
+}
+
+static void destroy_watch(struct inotify_watch *watch)
+{
+       struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+       free_chunk(chunk);
+}
+
+static const struct inotify_operations rtree_inotify_ops = {
+       .handle_event   = handle_event,
+       .destroy_watch  = destroy_watch,
+};
+
+static int __init audit_tree_init(void)
+{
+       int i;
+
+       rtree_ih = inotify_init(&rtree_inotify_ops);
+       if (IS_ERR(rtree_ih))
+               audit_panic("cannot initialize inotify handle for rectree watches");
+
+       for (i = 0; i < HASH_SIZE; i++)
+               INIT_LIST_HEAD(&chunk_hash_heads[i]);
+
+       return 0;
+}
+__initcall(audit_tree_init);
 
 #endif
 };
 
-static DEFINE_MUTEX(audit_filter_mutex);
+DEFINE_MUTEX(audit_filter_mutex);
 
 /* Inotify handle */
 extern struct inotify_handle *audit_ih;
        kfree(e);
 }
 
-static inline void audit_free_rule_rcu(struct rcu_head *head)
+void audit_free_rule_rcu(struct rcu_head *head)
 {
        struct audit_entry *e = container_of(head, struct audit_entry, rcu);
        audit_free_rule(e);
 
 /* Unpack a filter field's string representation from user-space
  * buffer. */
-static char *audit_unpack_string(void **bufp, size_t *remain, size_t len)
+char *audit_unpack_string(void **bufp, size_t *remain, size_t len)
 {
        char *str;
 
                                 struct audit_field *f)
 {
        if (krule->listnr != AUDIT_FILTER_EXIT ||
-           krule->watch || krule->inode_f)
+           krule->watch || krule->inode_f || krule->tree)
                return -EINVAL;
 
        krule->inode_f = f;
        if (path[0] != '/' || path[len-1] == '/' ||
            krule->listnr != AUDIT_FILTER_EXIT ||
            op & ~AUDIT_EQUAL ||
-           krule->inode_f || krule->watch) /* 1 inode # per rule, for hash */
+           krule->inode_f || krule->watch || krule->tree)
                return -EINVAL;
 
        watch = audit_init_watch(path);
                                goto exit_free;
                        }
                        break;
+               case AUDIT_DIR:
+                       str = audit_unpack_string(&bufp, &remain, f->val);
+                       if (IS_ERR(str))
+                               goto exit_free;
+                       entry->rule.buflen += f->val;
+
+                       err = audit_make_tree(&entry->rule, str, f->op);
+                       kfree(str);
+                       if (err)
+                               goto exit_free;
+                       break;
                case AUDIT_INODE:
                        err = audit_to_inode(&entry->rule, f);
                        if (err)
 }
 
 /* Pack a filter field's string representation into data block. */
-static inline size_t audit_pack_string(void **bufp, char *str)
+static inline size_t audit_pack_string(void **bufp, const char *str)
 {
        size_t len = strlen(str);
 
                        data->buflen += data->values[i] =
                                audit_pack_string(&bufp, krule->watch->path);
                        break;
+               case AUDIT_DIR:
+                       data->buflen += data->values[i] =
+                               audit_pack_string(&bufp,
+                                                 audit_tree_path(krule->tree));
+                       break;
                case AUDIT_FILTERKEY:
                        data->buflen += data->values[i] =
                                audit_pack_string(&bufp, krule->filterkey);
                        if (strcmp(a->watch->path, b->watch->path))
                                return 1;
                        break;
+               case AUDIT_DIR:
+                       if (strcmp(audit_tree_path(a->tree),
+                                  audit_tree_path(b->tree)))
+                               return 1;
+                       break;
                case AUDIT_FILTERKEY:
                        /* both filterkeys exist based on above type compare */
                        if (strcmp(a->filterkey, b->filterkey))
        new->inode_f = old->inode_f;
        new->watch = NULL;
        new->field_count = old->field_count;
+       /*
+        * note that we are OK with not refcounting here; audit_match_tree()
+        * never dereferences tree and we can't get false positives there
+        * since we'd have to have rule gone from the list *and* removed
+        * before the chunks found by lookup had been allocated, i.e. before
+        * the beginning of list scan.
+        */
+       new->tree = old->tree;
        memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount);
 
        /* deep copy this information, updating the se_rule fields, because
        struct audit_entry *e;
        struct audit_field *inode_f = entry->rule.inode_f;
        struct audit_watch *watch = entry->rule.watch;
+       struct audit_tree *tree = entry->rule.tree;
        struct nameidata *ndp = NULL, *ndw = NULL;
        int h, err;
 #ifdef CONFIG_AUDITSYSCALL
        mutex_unlock(&audit_filter_mutex);
        if (e) {
                err = -EEXIST;
+               /* normally audit_add_tree_rule() will free it on failure */
+               if (tree)
+                       audit_put_tree(tree);
                goto error;
        }
 
                h = audit_hash_ino((u32)watch->ino);
                list = &audit_inode_hash[h];
        }
+       if (tree) {
+               err = audit_add_tree_rule(&entry->rule);
+               if (err) {
+                       mutex_unlock(&audit_filter_mutex);
+                       goto error;
+               }
+       }
 
        if (entry->rule.flags & AUDIT_FILTER_PREPEND) {
                list_add_rcu(&entry->list, list);
        struct audit_entry  *e;
        struct audit_field *inode_f = entry->rule.inode_f;
        struct audit_watch *watch, *tmp_watch = entry->rule.watch;
+       struct audit_tree *tree = entry->rule.tree;
        LIST_HEAD(inotify_list);
        int h, ret = 0;
 #ifdef CONFIG_AUDITSYSCALL
                }
        }
 
+       if (e->rule.tree)
+               audit_remove_tree_rule(&e->rule);
+
        list_del_rcu(&e->list);
        call_rcu(&e->rcu, audit_free_rule_rcu);
 
 out:
        if (tmp_watch)
                audit_put_watch(tmp_watch); /* match initial get */
+       if (tree)
+               audit_put_tree(tree);   /* that's the temporary one */
 
        return ret;
 }
 {
        struct audit_entry *entry, *n, *nentry;
        struct audit_watch *watch;
+       struct audit_tree *tree;
        int i, err = 0;
 
        /* audit_filter_mutex synchronizes the writers */
                                continue;
 
                        watch = entry->rule.watch;
+                       tree = entry->rule.tree;
                        nentry = audit_dupe_rule(&entry->rule, watch);
                        if (unlikely(IS_ERR(nentry))) {
                                /* save the first error encountered for the
                                        list_add(&nentry->rule.rlist,
                                                 &watch->rules);
                                        list_del(&entry->rule.rlist);
-                               }
+                               } else if (tree)
+                                       list_replace_init(&entry->rule.rlist,
+                                                    &nentry->rule.rlist);
                                list_replace_rcu(&entry->list, &nentry->list);
                        }
                        call_rcu(&entry->rcu, audit_free_rule_rcu);
 
 #include <linux/binfmts.h>
 #include <linux/highmem.h>
 #include <linux/syscalls.h>
+#include <linux/inotify.h>
 
 #include "audit.h"
 
        int                     pid_count;
 };
 
+struct audit_tree_refs {
+       struct audit_tree_refs *next;
+       struct audit_chunk *c[31];
+};
+
 /* The per-task audit context. */
 struct audit_context {
        int                 dummy;      /* must be the first element */
        pid_t               target_pid;
        u32                 target_sid;
 
+       struct audit_tree_refs *trees, *first_trees;
+       int tree_count;
+
 #if AUDIT_DEBUG
        int                 put_count;
        int                 ino_count;
        }
 }
 
+/*
+ * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *;
+ * ->first_trees points to its beginning, ->trees - to the current end of data.
+ * ->tree_count is the number of free entries in array pointed to by ->trees.
+ * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL,
+ * "empty" becomes (p, p, 31) afterwards.  We don't shrink the list (and seriously,
+ * it's going to remain 1-element for almost any setup) until we free context itself.
+ * References in it _are_ dropped - at the same time we free/drop aux stuff.
+ */
+
+#ifdef CONFIG_AUDIT_TREE
+static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
+{
+       struct audit_tree_refs *p = ctx->trees;
+       int left = ctx->tree_count;
+       if (likely(left)) {
+               p->c[--left] = chunk;
+               ctx->tree_count = left;
+               return 1;
+       }
+       if (!p)
+               return 0;
+       p = p->next;
+       if (p) {
+               p->c[30] = chunk;
+               ctx->trees = p;
+               ctx->tree_count = 30;
+               return 1;
+       }
+       return 0;
+}
+
+static int grow_tree_refs(struct audit_context *ctx)
+{
+       struct audit_tree_refs *p = ctx->trees;
+       ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL);
+       if (!ctx->trees) {
+               ctx->trees = p;
+               return 0;
+       }
+       if (p)
+               p->next = ctx->trees;
+       else
+               ctx->first_trees = ctx->trees;
+       ctx->tree_count = 31;
+       return 1;
+}
+#endif
+
+static void unroll_tree_refs(struct audit_context *ctx,
+                     struct audit_tree_refs *p, int count)
+{
+#ifdef CONFIG_AUDIT_TREE
+       struct audit_tree_refs *q;
+       int n;
+       if (!p) {
+               /* we started with empty chain */
+               p = ctx->first_trees;
+               count = 31;
+               /* if the very first allocation has failed, nothing to do */
+               if (!p)
+                       return;
+       }
+       n = count;
+       for (q = p; q != ctx->trees; q = q->next, n = 31) {
+               while (n--) {
+                       audit_put_chunk(q->c[n]);
+                       q->c[n] = NULL;
+               }
+       }
+       while (n-- > ctx->tree_count) {
+               audit_put_chunk(q->c[n]);
+               q->c[n] = NULL;
+       }
+       ctx->trees = p;
+       ctx->tree_count = count;
+#endif
+}
+
+static void free_tree_refs(struct audit_context *ctx)
+{
+       struct audit_tree_refs *p, *q;
+       for (p = ctx->first_trees; p; p = q) {
+               q = p->next;
+               kfree(p);
+       }
+}
+
+static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
+{
+#ifdef CONFIG_AUDIT_TREE
+       struct audit_tree_refs *p;
+       int n;
+       if (!tree)
+               return 0;
+       /* full ones */
+       for (p = ctx->first_trees; p != ctx->trees; p = p->next) {
+               for (n = 0; n < 31; n++)
+                       if (audit_tree_match(p->c[n], tree))
+                               return 1;
+       }
+       /* partial */
+       if (p) {
+               for (n = ctx->tree_count; n < 31; n++)
+                       if (audit_tree_match(p->c[n], tree))
+                               return 1;
+       }
+#endif
+       return 0;
+}
+
 /* Determine if any context name data matches a rule's watch data */
 /* Compare a task_struct with an audit_rule.  Return 1 on match, 0
  * otherwise. */
                                result = (name->dev == rule->watch->dev &&
                                          name->ino == rule->watch->ino);
                        break;
+               case AUDIT_DIR:
+                       if (ctx)
+                               result = match_tree_refs(ctx, rule->tree);
+                       break;
                case AUDIT_LOGINUID:
                        result = 0;
                        if (ctx)
                               context->name_count, count);
                }
                audit_free_names(context);
+               unroll_tree_refs(context, NULL, 0);
+               free_tree_refs(context);
                audit_free_aux(context);
                kfree(context->filterkey);
                kfree(context);
                tsk->audit_context = new_context;
        } else {
                audit_free_names(context);
+               unroll_tree_refs(context, NULL, 0);
                audit_free_aux(context);
                context->aux = NULL;
                context->aux_pids = NULL;
        }
 }
 
+static inline void handle_one(const struct inode *inode)
+{
+#ifdef CONFIG_AUDIT_TREE
+       struct audit_context *context;
+       struct audit_tree_refs *p;
+       struct audit_chunk *chunk;
+       int count;
+       if (likely(list_empty(&inode->inotify_watches)))
+               return;
+       context = current->audit_context;
+       p = context->trees;
+       count = context->tree_count;
+       rcu_read_lock();
+       chunk = audit_tree_lookup(inode);
+       rcu_read_unlock();
+       if (!chunk)
+               return;
+       if (likely(put_tree_ref(context, chunk)))
+               return;
+       if (unlikely(!grow_tree_refs(context))) {
+               printk(KERN_WARNING "out of memory, audit has lost a tree reference");
+               audit_set_auditable(context);
+               audit_put_chunk(chunk);
+               unroll_tree_refs(context, p, count);
+               return;
+       }
+       put_tree_ref(context, chunk);
+#endif
+}
+
+static void handle_path(const struct dentry *dentry)
+{
+#ifdef CONFIG_AUDIT_TREE
+       struct audit_context *context;
+       struct audit_tree_refs *p;
+       const struct dentry *d, *parent;
+       struct audit_chunk *drop;
+       unsigned long seq;
+       int count;
+
+       context = current->audit_context;
+       p = context->trees;
+       count = context->tree_count;
+retry:
+       drop = NULL;
+       d = dentry;
+       rcu_read_lock();
+       seq = read_seqbegin(&rename_lock);
+       for(;;) {
+               struct inode *inode = d->d_inode;
+               if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
+                       struct audit_chunk *chunk;
+                       chunk = audit_tree_lookup(inode);
+                       if (chunk) {
+                               if (unlikely(!put_tree_ref(context, chunk))) {
+                                       drop = chunk;
+                                       break;
+                               }
+                       }
+               }
+               parent = d->d_parent;
+               if (parent == d)
+                       break;
+               d = parent;
+       }
+       if (unlikely(read_seqretry(&rename_lock, seq) || drop)) {  /* in this order */
+               rcu_read_unlock();
+               if (!drop) {
+                       /* just a race with rename */
+                       unroll_tree_refs(context, p, count);
+                       goto retry;
+               }
+               audit_put_chunk(drop);
+               if (grow_tree_refs(context)) {
+                       /* OK, got more space */
+                       unroll_tree_refs(context, p, count);
+                       goto retry;
+               }
+               /* too bad */
+               printk(KERN_WARNING
+                       "out of memory, audit has lost a tree reference");
+               unroll_tree_refs(context, p, count);
+               audit_set_auditable(context);
+               return;
+       }
+       rcu_read_unlock();
+#endif
+}
+
 /**
  * audit_getname - add a name to the list
  * @name: name to add
 {
        int idx;
        struct audit_context *context = current->audit_context;
-       const struct inode *inode = inode = dentry->d_inode;
+       const struct inode *inode = dentry->d_inode;
 
        if (!context->in_syscall)
                return;
                idx = context->name_count - 1;
                context->names[idx].name = NULL;
        }
+       handle_path(dentry);
        audit_copy_inode(&context->names[idx], inode);
 }
 
        if (!context->in_syscall)
                return;
 
+       if (inode)
+               handle_one(inode);
        /* determine matching parent */
        if (!dname)
                goto add_names;