mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-09-20 19:34:46 +00:00
bdi: avoid oops on device removal
commit 5acda9d12dcf1ad0d9a5a2a7c646de3472fa7555 upstream. After commit839a8e8660
("writeback: replace custom worker pool implementation with unbound workqueue") when device is removed while we are writing to it we crash in bdi_writeback_workfn() -> set_worker_desc() because bdi->dev is NULL. This can happen because even though bdi_unregister() cancels all pending flushing work, nothing really prevents new ones from being queued from balance_dirty_pages() or other places. Fix the problem by clearing BDI_registered bit in bdi_unregister() and checking it before scheduling of any flushing work. Fixes:839a8e8660
Reviewed-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jan Kara <jack@suse.cz> Cc: Derek Basehore <dbasehore@chromium.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
39305a6ac7
commit
bf0972039d
|
@ -87,16 +87,29 @@ static inline struct inode *wb_inode(struct list_head *head)
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/writeback.h>
|
||||
|
||||
static void bdi_wakeup_thread(struct backing_dev_info *bdi)
|
||||
{
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
if (test_bit(BDI_registered, &bdi->state))
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
}
|
||||
|
||||
static void bdi_queue_work(struct backing_dev_info *bdi,
|
||||
struct wb_writeback_work *work)
|
||||
{
|
||||
trace_writeback_queue(bdi, work);
|
||||
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
if (!test_bit(BDI_registered, &bdi->state)) {
|
||||
if (work->done)
|
||||
complete(work->done);
|
||||
goto out_unlock;
|
||||
}
|
||||
list_add_tail(&work->list, &bdi->work_list);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
out_unlock:
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -112,7 +125,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
|||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
trace_writeback_nowork(bdi);
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
bdi_wakeup_thread(bdi);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -159,7 +172,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
|||
* writeback as soon as there is no other work to do.
|
||||
*/
|
||||
trace_writeback_wake_background(bdi);
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
bdi_wakeup_thread(bdi);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1016,7 +1029,7 @@ void bdi_writeback_workfn(struct work_struct *work)
|
|||
current->flags |= PF_SWAPWRITE;
|
||||
|
||||
if (likely(!current_is_workqueue_rescuer() ||
|
||||
list_empty(&bdi->bdi_list))) {
|
||||
!test_bit(BDI_registered, &bdi->state))) {
|
||||
/*
|
||||
* The normal path. Keep writing back @bdi until its
|
||||
* work_list is empty. Note that this path is also taken
|
||||
|
|
|
@ -95,7 +95,7 @@ struct backing_dev_info {
|
|||
unsigned int max_ratio, max_prop_frac;
|
||||
|
||||
struct bdi_writeback wb; /* default writeback info for this bdi */
|
||||
spinlock_t wb_lock; /* protects work_list */
|
||||
spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
|
||||
|
||||
struct list_head work_list;
|
||||
|
||||
|
|
|
@ -296,7 +296,10 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
|
|||
unsigned long timeout;
|
||||
|
||||
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
|
||||
queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
if (test_bit(BDI_registered, &bdi->state))
|
||||
queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -309,9 +312,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
|
|||
spin_unlock_bh(&bdi_lock);
|
||||
|
||||
synchronize_rcu_expedited();
|
||||
|
||||
/* bdi_list is now unused, clear it to mark @bdi dying */
|
||||
INIT_LIST_HEAD(&bdi->bdi_list);
|
||||
}
|
||||
|
||||
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
||||
|
@ -362,6 +362,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
|||
*/
|
||||
bdi_remove_from_list(bdi);
|
||||
|
||||
/* Make sure nobody queues further work */
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
clear_bit(BDI_registered, &bdi->state);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
|
||||
/*
|
||||
* Drain work list and shutdown the delayed_work. At this point,
|
||||
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
|
||||
|
|
Loading…
Reference in a new issue