media: v4l: event: Prevent freeing event subscriptions while accessed
commit ad608fbcf166fec809e402d548761768f602702c upstream.
The event subscriptions are added to the subscribed event list while
holding a spinlock, but that lock is subsequently released while still
accessing the subscription object. This makes it possible to unsubscribe
the event --- and freeing the subscription object's memory --- while
the subscription object is simultaneously accessed.
Prevent this by adding a mutex to serialise the event subscription and
unsubscription. This also gives a guarantee to the callback ops that the
add op has returned before the del op is called.
This change also results in making the elems field less special:
subscriptions are only added to the event list once they are fully
initialised.
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Reviewed-by: Hans Verkuil <hans.verkuil@cisco.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: stable@vger.kernel.org # for 4.14 and up
Fixes: c3b5b0241f
("V4L/DVB: V4L: Events: Add backend")
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a488247ff6
commit
0daf8a201b
|
@ -119,14 +119,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
|
||||||
if (sev == NULL)
|
if (sev == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* If the event has been added to the fh->subscribed list, but its
|
|
||||||
* add op has not completed yet elems will be 0, treat this as
|
|
||||||
* not being subscribed.
|
|
||||||
*/
|
|
||||||
if (!sev->elems)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Increase event sequence number on fh. */
|
/* Increase event sequence number on fh. */
|
||||||
fh->sequence++;
|
fh->sequence++;
|
||||||
|
|
||||||
|
@ -209,6 +201,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||||
struct v4l2_subscribed_event *sev, *found_ev;
|
struct v4l2_subscribed_event *sev, *found_ev;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (sub->type == V4L2_EVENT_ALL)
|
if (sub->type == V4L2_EVENT_ALL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -226,31 +219,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||||
sev->flags = sub->flags;
|
sev->flags = sub->flags;
|
||||||
sev->fh = fh;
|
sev->fh = fh;
|
||||||
sev->ops = ops;
|
sev->ops = ops;
|
||||||
|
sev->elems = elems;
|
||||||
|
|
||||||
|
mutex_lock(&fh->subscribe_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||||
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||||
if (!found_ev)
|
|
||||||
list_add(&sev->list, &fh->subscribed);
|
|
||||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||||
|
|
||||||
if (found_ev) {
|
if (found_ev) {
|
||||||
|
/* Already listening */
|
||||||
kfree(sev);
|
kfree(sev);
|
||||||
return 0; /* Already listening */
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sev->ops && sev->ops->add) {
|
if (sev->ops && sev->ops->add) {
|
||||||
int ret = sev->ops->add(sev, elems);
|
ret = sev->ops->add(sev, elems);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
sev->ops = NULL;
|
kfree(sev);
|
||||||
v4l2_event_unsubscribe(fh, sub);
|
goto out_unlock;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark as ready for use */
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||||
sev->elems = elems;
|
list_add(&sev->list, &fh->subscribed);
|
||||||
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||||
|
|
||||||
return 0;
|
out_unlock:
|
||||||
|
mutex_unlock(&fh->subscribe_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
||||||
|
|
||||||
|
@ -289,6 +287,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&fh->subscribe_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||||
|
|
||||||
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||||
|
@ -307,6 +307,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
||||||
sev->ops->del(sev);
|
sev->ops->del(sev);
|
||||||
|
|
||||||
kfree(sev);
|
kfree(sev);
|
||||||
|
mutex_unlock(&fh->subscribe_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
|
||||||
INIT_LIST_HEAD(&fh->available);
|
INIT_LIST_HEAD(&fh->available);
|
||||||
INIT_LIST_HEAD(&fh->subscribed);
|
INIT_LIST_HEAD(&fh->subscribed);
|
||||||
fh->sequence = -1;
|
fh->sequence = -1;
|
||||||
|
mutex_init(&fh->subscribe_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(v4l2_fh_init);
|
EXPORT_SYMBOL_GPL(v4l2_fh_init);
|
||||||
|
|
||||||
|
@ -88,6 +89,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
|
||||||
if (fh->vdev == NULL)
|
if (fh->vdev == NULL)
|
||||||
return;
|
return;
|
||||||
v4l2_event_unsubscribe_all(fh);
|
v4l2_event_unsubscribe_all(fh);
|
||||||
|
mutex_destroy(&fh->subscribe_lock);
|
||||||
fh->vdev = NULL;
|
fh->vdev = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
||||||
|
|
|
@ -41,6 +41,7 @@ struct v4l2_fh {
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
struct list_head subscribed; /* Subscribed events */
|
struct list_head subscribed; /* Subscribed events */
|
||||||
struct list_head available; /* Dequeueable event */
|
struct list_head available; /* Dequeueable event */
|
||||||
|
struct mutex subscribe_lock;
|
||||||
unsigned int navailable;
|
unsigned int navailable;
|
||||||
u32 sequence;
|
u32 sequence;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue