mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: more rmap checking
Re-introduce rmap verification patches that Hugh removed when he removed PG_map_lock. PG_map_lock actually isn't needed to synchronise access to anonymous pages, because PG_locked and PTL together already do. These checks were important in discovering and fixing a rare rmap corruption in SLES9. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ea125892a1
commit
c97a9e10ea
3 changed files with 62 additions and 11 deletions
|
@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
|
|||
void page_add_file_rmap(struct page *);
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *);
|
||||
|
||||
/**
|
||||
* page_dup_rmap - duplicate pte mapping to a page
|
||||
* @page: the page to add the mapping to
|
||||
*
|
||||
* For copy_page_range only: minimal extract from page_add_rmap,
|
||||
* avoiding unnecessary tests (already checked) so it's quicker.
|
||||
*/
|
||||
static inline void page_dup_rmap(struct page *page)
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
|
||||
#else
|
||||
static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called from mm/vmscan.c to handle paging out
|
||||
|
|
|
@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||
page = vm_normal_page(vma, addr, pte);
|
||||
if (page) {
|
||||
get_page(page);
|
||||
page_dup_rmap(page);
|
||||
page_dup_rmap(page, vma, addr);
|
||||
rss[!!PageAnon(page)]++;
|
||||
}
|
||||
|
||||
|
|
58
mm/rmap.c
58
mm/rmap.c
|
@ -529,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page,
|
|||
__inc_zone_page_state(page, NR_ANON_PAGES);
|
||||
}
|
||||
|
||||
/**
|
||||
* page_set_anon_rmap - sanity check anonymous rmap addition
|
||||
* @page: the page to add the mapping to
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*/
|
||||
static void __page_check_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/*
|
||||
* The page's anon-rmap details (mapping and index) are guaranteed to
|
||||
* be set up correctly at this point.
|
||||
*
|
||||
* We have exclusion against page_add_anon_rmap because the caller
|
||||
* always holds the page locked, except if called from page_dup_rmap,
|
||||
* in which case the page is already known to be setup.
|
||||
*
|
||||
* We have exclusion against page_add_new_anon_rmap because those pages
|
||||
* are initially only visible via the pagetables, and the pte is locked
|
||||
* over the call to page_add_new_anon_rmap.
|
||||
*/
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
||||
BUG_ON(page->mapping != (struct address_space *)anon_vma);
|
||||
BUG_ON(page->index != linear_page_index(vma, address));
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* page_add_anon_rmap - add pte mapping to an anonymous page
|
||||
* @page: the page to add the mapping to
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*
|
||||
* The caller needs to hold the pte lock.
|
||||
* The caller needs to hold the pte lock and the page must be locked.
|
||||
*/
|
||||
void page_add_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
if (atomic_inc_and_test(&page->_mapcount))
|
||||
__page_set_anon_rmap(page, vma, address);
|
||||
/* else checking page index and mapping is racy */
|
||||
else
|
||||
__page_check_anon_rmap(page, vma, address);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
|
|||
*
|
||||
* Same as page_add_anon_rmap but must only be called on *new* pages.
|
||||
* This means the inc-and-test can be bypassed.
|
||||
* Page does not have to be locked.
|
||||
*/
|
||||
void page_add_new_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
|
||||
__page_set_anon_rmap(page, vma, address);
|
||||
}
|
||||
|
@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page)
|
|||
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/**
|
||||
* page_dup_rmap - duplicate pte mapping to a page
|
||||
* @page: the page to add the mapping to
|
||||
*
|
||||
* For copy_page_range only: minimal extract from page_add_file_rmap /
|
||||
* page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
|
||||
* quicker.
|
||||
*
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
BUG_ON(page_mapcount(page) == 0);
|
||||
if (PageAnon(page))
|
||||
__page_check_anon_rmap(page, vma, address);
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* page_remove_rmap - take down pte mapping from a page
|
||||
* @page: page to remove mapping from
|
||||
|
|
Loading…
Reference in a new issue