mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: add CONFIG_DEBUG_VM_RB build option
Add a CONFIG_DEBUG_VM_RB build option for the previously existing DEBUG_MM_RB code. Now that Andi Kleen modified it to avoid using recursive algorithms, we can expose it a bit more. Also extend this code to validate_mm() after stack expansion, and to check that the vma's start and last pgoffs have not changed since the nodes were inserted on the anon vma interval tree (as it is important that the nodes be reindexed after each such update). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
86c2ad1995
commit
ed8ea81501
5 changed files with 64 additions and 11 deletions
|
@ -1386,6 +1386,9 @@ struct anon_vma_chain *anon_vma_interval_tree_iter_first(
|
|||
struct rb_root *root, unsigned long start, unsigned long last);
|
||||
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
|
||||
struct anon_vma_chain *node, unsigned long start, unsigned long last);
|
||||
#ifdef CONFIG_DEBUG_VM_RB
|
||||
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
|
||||
#endif
|
||||
|
||||
#define anon_vma_interval_tree_foreach(avc, root, start, last) \
|
||||
for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
|
||||
|
|
|
@ -66,6 +66,9 @@ struct anon_vma_chain {
|
|||
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
|
||||
struct rb_node rb; /* locked by anon_vma->mutex */
|
||||
unsigned long rb_subtree_last;
|
||||
#ifdef CONFIG_DEBUG_VM_RB
|
||||
unsigned long cached_vma_start, cached_vma_last;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
|
@ -798,6 +798,15 @@ config DEBUG_VM
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_VM_RB
|
||||
bool "Debug VM red-black trees"
|
||||
depends on DEBUG_VM
|
||||
help
|
||||
Enable this to turn on more extended checks in the virtual-memory
|
||||
system that may impact performance.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DEBUG_VIRTUAL
|
||||
bool "Debug VM translations"
|
||||
depends on DEBUG_KERNEL && X86
|
||||
|
|
|
@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
|
|||
}
|
||||
|
||||
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
|
||||
avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree)
|
||||
avc_start_pgoff, avc_last_pgoff,
|
||||
static inline, __anon_vma_interval_tree)
|
||||
|
||||
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
|
||||
struct rb_root *root)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM_RB
|
||||
node->cached_vma_start = avc_start_pgoff(node);
|
||||
node->cached_vma_last = avc_last_pgoff(node);
|
||||
#endif
|
||||
__anon_vma_interval_tree_insert(node, root);
|
||||
}
|
||||
|
||||
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
|
||||
struct rb_root *root)
|
||||
{
|
||||
__anon_vma_interval_tree_remove(node, root);
|
||||
}
|
||||
|
||||
struct anon_vma_chain *
|
||||
anon_vma_interval_tree_iter_first(struct rb_root *root,
|
||||
unsigned long first, unsigned long last)
|
||||
{
|
||||
return __anon_vma_interval_tree_iter_first(root, first, last);
|
||||
}
|
||||
|
||||
struct anon_vma_chain *
|
||||
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
|
||||
unsigned long first, unsigned long last)
|
||||
{
|
||||
return __anon_vma_interval_tree_iter_next(node, first, last);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM_RB
|
||||
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
|
||||
{
|
||||
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
|
||||
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
|
||||
}
|
||||
#endif
|
||||
|
|
19
mm/mmap.c
19
mm/mmap.c
|
@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm,
|
|||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* WARNING: the debugging will use recursive algorithms so never enable this
|
||||
* unless you know what you are doing.
|
||||
*/
|
||||
#undef DEBUG_MM_RB
|
||||
|
||||
/* description of effects of mapping type and prot in current implementation.
|
||||
* this is due to the limited x86 page protection hardware. The expected
|
||||
* behavior is in parens:
|
||||
|
@ -303,7 +297,7 @@ out:
|
|||
return retval;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MM_RB
|
||||
#ifdef CONFIG_DEBUG_VM_RB
|
||||
static int browse_rb(struct rb_root *root)
|
||||
{
|
||||
int i = 0, j;
|
||||
|
@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm)
|
|||
{
|
||||
int bug = 0;
|
||||
int i = 0;
|
||||
struct vm_area_struct *tmp = mm->mmap;
|
||||
while (tmp) {
|
||||
tmp = tmp->vm_next;
|
||||
struct vm_area_struct *vma = mm->mmap;
|
||||
while (vma) {
|
||||
struct anon_vma_chain *avc;
|
||||
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
||||
anon_vma_interval_tree_verify(avc);
|
||||
vma = vma->vm_next;
|
||||
i++;
|
||||
}
|
||||
if (i != mm->map_count)
|
||||
|
@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|||
}
|
||||
vma_unlock_anon_vma(vma);
|
||||
khugepaged_enter_vma_merge(vma);
|
||||
validate_mm(vma->vm_mm);
|
||||
return error;
|
||||
}
|
||||
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
|
||||
|
@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
}
|
||||
vma_unlock_anon_vma(vma);
|
||||
khugepaged_enter_vma_merge(vma);
|
||||
validate_mm(vma->vm_mm);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue