Merge branch 'for-next' of git://git.infradead.org/users/eparis/notify

Pull filesystem notification updates from Eric Paris:
 "This pull mostly is about locking changes in the fsnotify system.  By
  switching the group lock from a spin_lock() to a mutex() we can now
  hold the lock across things like iput().  This fixes a problem
  involving unmounting a fs and having inodes be busy, first pointed out
  by FAT, but reproducible with tmpfs.

  This also restores signal driven I/O for inotify, which has been
  broken since about 2.6.32."

Ugh.  I *hate* the timing of this.  It was rebased after the merge
window opened, and then left to sit with the pull request coming the day
before the merge window closes.  That's just crap.  But apparently the
patches themselves have been around for over a year, just gathering
dust, so now it's suddenly critical.

Fixed up semantic conflict in fs/notify/fdinfo.c as per Stephen
Rothwell's fixes from -next.

* 'for-next' of git://git.infradead.org/users/eparis/notify:
  inotify: automatically restart syscalls
  inotify: dont skip removal of watch descriptor if creation of ignored event failed
  fanotify: dont merge permission events
  fsnotify: make fasync generic for both inotify and fanotify
  fsnotify: change locking order
  fsnotify: dont put marks on temporary list when clearing marks by group
  fsnotify: introduce locked versions of fsnotify_add_mark() and fsnotify_remove_mark()
  fsnotify: pass group to fsnotify_destroy_mark()
  fsnotify: use a mutex instead of a spinlock to protect a groups mark list
  fanotify: add an extra flag to mark_remove_from_mask that indicates wheather a mark should be destroyed
  fsnotify: take groups mark_lock before mark lock
  fsnotify: use reference counting for groups
  fsnotify: introduce fsnotify_get_group()
  inotify, fanotify: replace fsnotify_put_group() with fsnotify_destroy_group()
This commit is contained in:
Linus Torvalds 2012-12-20 20:11:52 -08:00
commit 96680d2b91
14 changed files with 180 additions and 121 deletions

View File

@ -201,7 +201,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dn_mark->dn == NULL)
fsnotify_destroy_mark(fsn_mark);
fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
@ -385,7 +385,7 @@ out:
spin_unlock(&fsn_mark->lock);
if (destroy)
fsnotify_destroy_mark(fsn_mark);
fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);

View File

@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
old->tgid == new->tgid) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_PATH):
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* dont merge two permission events */
if ((old->mask & FAN_ALL_PERM_EVENTS) &&
(new->mask & FAN_ALL_PERM_EVENTS))
return false;
#endif
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;

View File

@ -397,8 +397,12 @@ static int fanotify_release(struct inode *ignored, struct file *file)
wake_up(&group->fanotify_data.access_waitq);
#endif
if (file->f_flags & FASYNC)
fsnotify_fasync(-1, file, 0);
/* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_put_group(group);
fsnotify_destroy_group(group);
return 0;
}
@ -493,7 +497,8 @@ out:
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
unsigned int flags)
unsigned int flags,
int *destroy)
{
__u32 oldmask;
@ -507,8 +512,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
}
spin_unlock(&fsn_mark->lock);
if (!(oldmask & ~mask))
fsnotify_destroy_mark(fsn_mark);
*destroy = !(oldmask & ~mask);
return mask & oldmask;
}
@ -519,12 +523,17 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
int destroy_mark;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
&destroy_mark);
if (destroy_mark)
fsnotify_destroy_mark(fsn_mark, group);
fsnotify_put_mark(fsn_mark);
if (removed & real_mount(mnt)->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
@ -538,12 +547,16 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
int destroy_mark;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
&destroy_mark);
if (destroy_mark)
fsnotify_destroy_mark(fsn_mark, group);
/* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask)
@ -710,13 +723,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
break;
default:
fd = -EINVAL;
goto out_put_group;
goto out_destroy_group;
}
if (flags & FAN_UNLIMITED_QUEUE) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out_put_group;
goto out_destroy_group;
group->max_events = UINT_MAX;
} else {
group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
@ -725,7 +738,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if (flags & FAN_UNLIMITED_MARKS) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out_put_group;
goto out_destroy_group;
group->fanotify_data.max_marks = UINT_MAX;
} else {
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
@ -733,12 +746,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
goto out_put_group;
goto out_destroy_group;
return fd;
out_put_group:
fsnotify_put_group(group);
out_destroy_group:
fsnotify_destroy_group(group);
return fd;
}

View File

@ -27,13 +27,13 @@ static int show_fdinfo(struct seq_file *m, struct file *f,
struct fsnotify_mark *mark;
int ret = 0;
spin_lock(&group->mark_lock);
mutex_lock(&group->mark_mutex);
list_for_each_entry(mark, &group->marks_list, g_list) {
ret = show(m, mark);
if (ret)
break;
}
spin_unlock(&group->mark_lock);
mutex_unlock(&group->mark_mutex);
return ret;
}

View File

@ -33,9 +33,6 @@
*/
void fsnotify_final_destroy_group(struct fsnotify_group *group)
{
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
@ -43,23 +40,30 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
}
/*
* Trying to get rid of a group. We need to first get rid of any outstanding
* allocations and then free the group. Remember that fsnotify_clear_marks_by_group
* could miss marks that are being freed by inode and those marks could still
* hold a reference to this group (via group->num_marks) If we get into that
* situtation, the fsnotify_final_destroy_group will get called when that final
* mark is freed.
* Trying to get rid of a group. Remove all marks, flush all events and release
* the group reference.
* Note that another thread calling fsnotify_clear_marks_by_group() may still
* hold a ref to the group.
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
void fsnotify_destroy_group(struct fsnotify_group *group)
{
/* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
synchronize_srcu(&fsnotify_mark_srcu);
/* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group);
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
fsnotify_put_group(group);
}
/*
* Get reference to a group.
*/
void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
/*
@ -68,7 +72,7 @@ static void fsnotify_destroy_group(struct fsnotify_group *group)
void fsnotify_put_group(struct fsnotify_group *group)
{
if (atomic_dec_and_test(&group->refcnt))
fsnotify_destroy_group(group);
fsnotify_final_destroy_group(group);
}
/*
@ -84,21 +88,24 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
/* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
/*
* hits 0 when there are no external references AND no marks for
* this group
*/
atomic_set(&group->num_marks, 1);
atomic_set(&group->num_marks, 0);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock);
mutex_init(&group->mark_mutex);
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
return group;
}
int fsnotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
}

View File

@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
struct inode *inode = mark->i.inode;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
assert_spin_locked(&mark->group->mark_lock);
spin_lock(&inode->i_lock);
@ -99,8 +99,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)
spin_unlock(&inode->i_lock);
list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
fsnotify_destroy_mark(mark);
struct fsnotify_group *group;
spin_lock(&mark->lock);
fsnotify_get_group(mark->group);
group = mark->group;
spin_unlock(&mark->lock);
fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark);
fsnotify_put_group(group);
}
}
@ -192,8 +200,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
assert_spin_locked(&group->mark_lock);
spin_lock(&inode->i_lock);

View File

@ -118,6 +118,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsnotify_get_group(group);
fsn_event_priv->group = group;
event_priv->wd = wd;
@ -131,7 +132,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
}
if (inode_mark->mask & IN_ONESHOT)
fsnotify_destroy_mark(inode_mark);
fsnotify_destroy_mark(inode_mark, group);
return ret;
}
@ -210,6 +211,7 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
fsnotify_put_group(fsn_event_priv->group);
kmem_cache_free(event_priv_cachep, event_priv);
}

View File

@ -265,7 +265,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
@ -281,23 +281,17 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
return ret;
}
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group);
if (file->f_flags & FASYNC)
fsnotify_fasync(-1, file, 0);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
fsnotify_destroy_group(group);
return 0;
}
@ -339,7 +333,7 @@ static const struct file_operations inotify_fops = {
.show_fdinfo = inotify_show_fdinfo,
.poll = inotify_poll,
.read = inotify_read,
.fasync = inotify_fasync,
.fasync = fsnotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
@ -521,13 +515,13 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
goto skip_send_ignore;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
@ -535,6 +529,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsnotify_get_group(group);
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
@ -548,9 +543,9 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
if (ignored_event)
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
@ -709,12 +704,11 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.fa = NULL;
group->inotify_data.user = get_current_user();
if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
inotify_max_user_instances) {
fsnotify_put_group(group);
fsnotify_destroy_group(group);
return ERR_PTR(-EMFILE);
}
@ -743,7 +737,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret < 0)
fsnotify_put_group(group);
fsnotify_destroy_group(group);
return ret;
}
@ -819,7 +813,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark);
fsnotify_destroy_mark(&i_mark->fsn_mark, group);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);

View File

@ -109,8 +109,11 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
void fsnotify_put_mark(struct fsnotify_mark *mark)
{
if (atomic_dec_and_test(&mark->refcnt))
if (atomic_dec_and_test(&mark->refcnt)) {
if (mark->group)
fsnotify_put_group(mark->group);
mark->free_mark(mark);
}
}
/*
@ -118,14 +121,14 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the mark->lock
*/
void fsnotify_destroy_mark(struct fsnotify_mark *mark)
void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group)
{
struct fsnotify_group *group;
struct inode *inode = NULL;
spin_lock(&mark->lock);
BUG_ON(!mutex_is_locked(&group->mark_mutex));
group = mark->group;
spin_lock(&mark->lock);
/* something else already called this function on this mark */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
@ -135,8 +138,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
spin_lock(&group->mark_lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
inode = mark->i.inode;
fsnotify_destroy_inode_mark(mark);
@ -147,13 +148,22 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
list_del_init(&mark->g_list);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
iput(inode);
/* release lock temporarily */
mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
/*
* We don't necessarily have a ref on mark from caller so the above destroy
* may have actually freed it, unless this group provides a 'freeing_mark'
* function which must be holding a reference.
*/
/*
* Some groups like to know that marks are being freed. This is a
@ -175,21 +185,17 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
* is just a lazy update (and could be a perf win...)
*/
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
iput(inode);
atomic_dec(&group->num_marks);
/*
* We don't necessarily have a ref on mark from caller so the above iput
* may have already destroyed it. Don't touch from now on.
*/
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
}
/*
* it's possible that this group tried to destroy itself, but this
* this mark was simultaneously being freed by inode. If that's the
* case, we finish freeing the group here.
*/
if (unlikely(atomic_dec_and_test(&group->num_marks)))
fsnotify_final_destroy_group(group);
void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group)
{
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
fsnotify_destroy_mark_locked(mark, group);
mutex_unlock(&group->mark_mutex);
}
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
@ -214,26 +220,26 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group.
*/
int fsnotify_add_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
struct vfsmount *mnt, int allow_dups)
int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
struct vfsmount *mnt, int allow_dups)
{
int ret = 0;
BUG_ON(inode && mnt);
BUG_ON(!inode && !mnt);
BUG_ON(!mutex_is_locked(&group->mark_mutex));
/*
* LOCKING ORDER!!!!
* group->mark_mutex
* mark->lock
* group->mark_lock
* inode->i_lock
*/
spin_lock(&mark->lock);
spin_lock(&group->mark_lock);
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
fsnotify_get_group(group);
mark->group = group;
list_add(&mark->g_list, &group->marks_list);
atomic_inc(&group->num_marks);
@ -251,11 +257,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
BUG();
}
spin_unlock(&group->mark_lock);
/* this will pin the object if appropriate */
fsnotify_set_mark_mask_locked(mark, mark->mask);
spin_unlock(&mark->lock);
if (inode)
@ -265,10 +268,10 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
err:
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
list_del_init(&mark->g_list);
fsnotify_put_group(group);
mark->group = NULL;
atomic_dec(&group->num_marks);
spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
spin_lock(&destroy_lock);
@ -279,6 +282,16 @@ err:
return ret;
}
int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups)
{
int ret;
mutex_lock(&group->mark_mutex);
ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
mutex_unlock(&group->mark_mutex);
return ret;
}
/*
* clear any marks in a group in which mark->flags & flags is true
*/
@ -286,22 +299,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock);
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) {
list_add(&mark->free_g_list, &free_list);
list_del_init(&mark->g_list);
fsnotify_get_mark(mark);
fsnotify_destroy_mark_locked(mark, group);
fsnotify_put_mark(mark);
}
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
fsnotify_destroy_mark(mark);
fsnotify_put_mark(mark);
}
mutex_unlock(&group->mark_mutex);
}
/*
@ -317,6 +324,8 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol
assert_spin_locked(&old->lock);
new->i.inode = old->i.inode;
new->m.mnt = old->m.mnt;
if (old->group)
fsnotify_get_group(old->group);
new->group = old->group;
new->mask = old->mask;
new->free_mark = old->free_mark;

View File

@ -225,6 +225,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
return return_event;
}

View File

@ -46,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
spin_unlock(&mnt->mnt_root->d_lock);
list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
fsnotify_destroy_mark(mark);
struct fsnotify_group *group;
spin_lock(&mark->lock);
fsnotify_get_group(mark->group);
group = mark->group;
spin_unlock(&mark->lock);
fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark);
fsnotify_put_group(group);
}
}
@ -88,8 +96,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{
struct vfsmount *mnt = mark->m.mnt;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
assert_spin_locked(&mark->group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);
@ -151,8 +159,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
assert_spin_locked(&group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);

View File

@ -88,9 +88,10 @@ struct fsnotify_event_private_data;
* if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing-mark - this means that a mark has been flagged to die when everything
* finishes using it. The function is supplied with what must be a
* valid group and inode to use to clean up.
* freeing_mark - called when a mark is being destroyed for some reason. The group
* MUST be holding a reference on each mark and that reference must be
* dropped in this function. inotify uses this function to send
* userspace messages that marks have been removed.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
@ -141,12 +142,14 @@ struct fsnotify_group {
unsigned int priority;
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect marks_list */
struct mutex mark_mutex; /* protect marks_list */
atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
/* groups can define private fields here or use the void *private */
union {
void *private;
@ -155,7 +158,6 @@ struct fsnotify_group {
spinlock_t idr_lock;
struct idr idr;
u32 last_wd;
struct fasync_struct *fa; /* async notification */
struct user_struct *user;
} inotify_data;
#endif
@ -287,7 +289,6 @@ struct fsnotify_mark {
struct fsnotify_inode_mark i;
struct fsnotify_vfsmount_mark m;
};
struct list_head free_g_list; /* tmp list used when freeing this mark */
__u32 ignored_mask; /* events types to ignore */
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
@ -360,11 +361,16 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* called from fsnotify listeners, such as fanotify or dnotify */
/* get a reference to an existing or create a new group */
/* create a new group */
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
extern int fsnotify_fasync(int fd, struct file *file, int on);
/* take a reference to an event */
extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event);
@ -405,8 +411,13 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */

View File

@ -249,7 +249,7 @@ static void untag_chunk(struct node *p)
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry);
fsnotify_destroy_mark(entry, audit_tree_group);
goto out;
}
@ -291,7 +291,7 @@ static void untag_chunk(struct node *p)
owner->root = new;
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry);
fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(&new->mark); /* drop initial reference */
goto out;
@ -331,7 +331,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
chunk->dead = 1;
spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry);
fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(entry);
return 0;
}
@ -412,7 +412,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
fsnotify_destroy_mark(chunk_entry);
fsnotify_destroy_mark(chunk_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
@ -443,7 +443,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
fsnotify_destroy_mark(old_entry);
fsnotify_destroy_mark(old_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry); /* drop initial reference */
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
return 0;

View File

@ -350,7 +350,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
}
mutex_unlock(&audit_filter_mutex);
fsnotify_destroy_mark(&parent->mark);
fsnotify_destroy_mark(&parent->mark, audit_watch_group);
}
/* Get path information necessary for adding watches. */
@ -457,7 +457,7 @@ void audit_remove_watch_rule(struct audit_krule *krule)
if (list_empty(&parent->watches)) {
audit_get_parent(parent);
fsnotify_destroy_mark(&parent->mark);
fsnotify_destroy_mark(&parent->mark, audit_watch_group);
audit_put_parent(parent);
}
}