diff options
author | Amir Goldstein <amir73il@gmail.com> | 2018-06-23 17:54:49 +0300 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2018-06-27 13:45:05 +0200 |
commit | 36f10f55ff1d2867bfc48ed898a9cc0dc6b49dd2 (patch) | |
tree | 0b7a840c5c8e0b2bd44aeca2c21a144b25d89457 /kernel/audit_tree.c | |
parent | b812a9f5896379b6cff2ac168ddb5b89037d8e78 (diff) | |
download | linux-36f10f55ff1d2867bfc48ed898a9cc0dc6b49dd2.tar.bz2 |
fsnotify: let connector point to an abstract object
Make the code to attach/detach a connector to object more generic
by letting the fsnotify connector point to an abstract fsnotify_connp_t.
Code that needs to dereference an inode or mount object now uses the
helpers fsnotify_conn_{inode,mount}.
Signed-off-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r-- | kernel/audit_tree.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index c99ebaae5abc..02feef939560 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -168,7 +168,8 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); /* Function to return search key in our hash from inode. */ static unsigned long inode_to_key(const struct inode *inode) { - return (unsigned long)inode; + /* Use address pointed to by connector->obj as the key */ + return (unsigned long)&inode->i_fsnotify_marks; } /* @@ -183,7 +184,7 @@ static unsigned long chunk_to_key(struct audit_chunk *chunk) */ if (WARN_ON_ONCE(!chunk->mark.connector)) return 0; - return (unsigned long)chunk->mark.connector->inode; + return (unsigned long)chunk->mark.connector->obj; } static inline struct list_head *chunk_hash(unsigned long key) @@ -258,7 +259,7 @@ static void untag_chunk(struct node *p) spin_lock(&entry->lock); /* * mark_mutex protects mark from getting detached and thus also from - * mark->connector->inode getting NULL. + * mark->connector->obj getting NULL. */ if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { spin_unlock(&entry->lock); @@ -288,8 +289,8 @@ static void untag_chunk(struct node *p) if (!new) goto Fallback; - if (fsnotify_add_inode_mark_locked(&new->mark, entry->connector->inode, - 1)) { + if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj, + FSNOTIFY_OBJ_TYPE_INODE, 1)) { fsnotify_put_mark(&new->mark); goto Fallback; } @@ -423,7 +424,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_lock(&old_entry->lock); /* * mark_mutex protects mark from getting detached and thus also from - * mark->connector->inode getting NULL. + * mark->connector->obj getting NULL. */ if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { /* old_entry is being shot, lets just lie */ @@ -434,8 +435,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) return -ENOENT; } - if (fsnotify_add_inode_mark_locked(chunk_entry, - old_entry->connector->inode, 1)) { + if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, + FSNOTIFY_OBJ_TYPE_INODE, 1)) { spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(chunk_entry); |