mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: vmscan: take page buffers dirty and locked state into account
Page reclaim keeps track of dirty and under writeback pages and uses it to determine if wait_iff_congested() should stall or if kswapd should begin writing back pages. This fails to account for buffer pages that can be under writeback but not PageWriteback which is the case for filesystems like ext3 ordered mode. Furthermore, PageDirty buffer pages can have all the buffers clean and writepage does no IO so it should not be accounted as congested. This patch adds an address_space operation that filesystems may optionally use to check if a page is really dirty or really under writeback. An implementation is provided for for buffer_heads is added and used for block operations and ext3 in ordered mode. By default the page flags are obeyed. Credit goes to Jan Kara for identifying that the page flags alone are not sufficient for ext3 and sanity checking a number of ideas on how the problem could be addressed. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Cc: Zlatko Calusic <zcalusic@bitsync.net> Cc: dormando <dormando@rydia.net> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Git-commit: b45972265f823ed01eae0867a176320071665787 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Change-Id: Idabea6f388eddcf5acf4725975d51119169da211 [vinmenon@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
f7d25bdff9
commit
0c08500607
6 changed files with 50 additions and 0 deletions
|
@ -1590,6 +1590,7 @@ static const struct address_space_operations def_blk_aops = {
|
|||
.writepages = generic_writepages,
|
||||
.releasepage = blkdev_releasepage,
|
||||
.direct_IO = blkdev_direct_IO,
|
||||
.is_dirty_writeback = buffer_check_dirty_writeback,
|
||||
};
|
||||
|
||||
const struct file_operations def_blk_fops = {
|
||||
|
|
34
fs/buffer.c
34
fs/buffer.c
|
@ -82,6 +82,40 @@ void unlock_buffer(struct buffer_head *bh)
|
|||
}
|
||||
EXPORT_SYMBOL(unlock_buffer);
|
||||
|
||||
/*
|
||||
* Returns if the page has dirty or writeback buffers. If all the buffers
|
||||
* are unlocked and clean then the PageDirty information is stale. If
|
||||
* any of the pages are locked, it is assumed they are locked for IO.
|
||||
*/
|
||||
void buffer_check_dirty_writeback(struct page *page,
|
||||
bool *dirty, bool *writeback)
|
||||
{
|
||||
struct buffer_head *head, *bh;
|
||||
*dirty = false;
|
||||
*writeback = false;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
return;
|
||||
|
||||
if (PageWriteback(page))
|
||||
*writeback = true;
|
||||
|
||||
head = page_buffers(page);
|
||||
bh = head;
|
||||
do {
|
||||
if (buffer_locked(bh))
|
||||
*writeback = true;
|
||||
|
||||
if (buffer_dirty(bh))
|
||||
*dirty = true;
|
||||
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
}
|
||||
EXPORT_SYMBOL(buffer_check_dirty_writeback);
|
||||
|
||||
/*
|
||||
* Block until a buffer comes unlocked. This doesn't stop it
|
||||
* from becoming locked again - you have to lock it yourself
|
||||
|
|
|
@ -1984,6 +1984,7 @@ static const struct address_space_operations ext3_ordered_aops = {
|
|||
.direct_IO = ext3_direct_IO,
|
||||
.migratepage = buffer_migrate_page,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.is_dirty_writeback = buffer_check_dirty_writeback,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
};
|
||||
|
||||
|
|
|
@ -139,6 +139,9 @@ BUFFER_FNS(Prio, prio)
|
|||
})
|
||||
#define page_has_buffers(page) PagePrivate(page)
|
||||
|
||||
void buffer_check_dirty_writeback(struct page *page,
|
||||
bool *dirty, bool *writeback);
|
||||
|
||||
/*
|
||||
* Declarations
|
||||
*/
|
||||
|
|
|
@ -380,6 +380,7 @@ struct address_space_operations {
|
|||
int (*launder_page) (struct page *);
|
||||
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
|
||||
unsigned long);
|
||||
void (*is_dirty_writeback) (struct page *, bool *, bool *);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
|
||||
/* swapfile support */
|
||||
|
|
10
mm/vmscan.c
10
mm/vmscan.c
|
@ -761,6 +761,8 @@ static enum page_references page_check_references(struct page *page,
|
|||
static void page_check_dirty_writeback(struct page *page,
|
||||
bool *dirty, bool *writeback)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
/*
|
||||
* Anonymous pages are not handled by flushers and must be written
|
||||
* from reclaim context. Do not stall reclaim based on them
|
||||
|
@ -774,6 +776,14 @@ static void page_check_dirty_writeback(struct page *page,
|
|||
/* By default assume that the page flags are accurate */
|
||||
*dirty = PageDirty(page);
|
||||
*writeback = PageWriteback(page);
|
||||
|
||||
/* Verify dirty/writeback state if the filesystem supports it */
|
||||
if (!page_has_private(page))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
if (mapping && mapping->a_ops->is_dirty_writeback)
|
||||
mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue