diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d25bd224d370..567d43f29a10 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -28,6 +28,9 @@ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ #ifdef CONFIG_KSM atomic_t ksm_refcount; +#endif +#ifdef CONFIG_MIGRATION + atomic_t migrate_refcount; #endif /* * NOTE: the LSB of the head.next is set by @@ -81,6 +84,26 @@ static inline int ksm_refcount(struct anon_vma *anon_vma) return 0; } #endif /* CONFIG_KSM */ +#ifdef CONFIG_MIGRATION +static inline void migrate_refcount_init(struct anon_vma *anon_vma) +{ + atomic_set(&anon_vma->migrate_refcount, 0); +} + +static inline int migrate_refcount(struct anon_vma *anon_vma) +{ + return atomic_read(&anon_vma->migrate_refcount); +} +#else +static inline void migrate_refcount_init(struct anon_vma *anon_vma) +{ +} + +static inline int migrate_refcount(struct anon_vma *anon_vma) +{ + return 0; +} +#endif /* CONFIG_MIGRATE */ static inline struct anon_vma *page_anon_vma(struct page *page) { diff --git a/mm/migrate.c b/mm/migrate.c index 5938db54e1d7..b768a1d4fa43 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -543,6 +543,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int rcu_locked = 0; int charge = 0; struct mem_cgroup *mem = NULL; + struct anon_vma *anon_vma = NULL; if (!newpage) return -ENOMEM; @@ -599,6 +600,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, if (PageAnon(page)) { rcu_read_lock(); rcu_locked = 1; + anon_vma = page_anon_vma(page); + atomic_inc(&anon_vma->migrate_refcount); } /* @@ -638,6 +641,15 @@ skip_unmap: if (rc) remove_migration_ptes(page, page); rcu_unlock: + + /* Drop an anon_vma reference if we took one */ + if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) { + int empty = list_empty(&anon_vma->head); + spin_unlock(&anon_vma->lock); + if (empty) + anon_vma_free(anon_vma); + } + if (rcu_locked) rcu_read_unlock(); uncharge: diff --git a/mm/rmap.c b/mm/rmap.c index 0feeef860a8f..f522cb008646 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -250,7 +250,8 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) list_del(&anon_vma_chain->same_anon_vma); /* We must garbage collect the anon_vma if it's empty */ - empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); + empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) && + !migrate_refcount(anon_vma); spin_unlock(&anon_vma->lock); if (empty) @@ -275,6 +276,7 @@ static void anon_vma_ctor(void *data) spin_lock_init(&anon_vma->lock); ksm_refcount_init(anon_vma); + migrate_refcount_init(anon_vma); INIT_LIST_HEAD(&anon_vma->head); } @@ -1355,10 +1357,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, /* * Note: remove_migration_ptes() cannot use page_lock_anon_vma() * because that depends on page_mapped(); but not all its usages - * are holding mmap_sem, which also gave the necessary guarantee - * (that this anon_vma's slab has not already been destroyed). - * This needs to be reviewed later: avoiding page_lock_anon_vma() - * is risky, and currently limits the usefulness of rmap_walk(). + * are holding mmap_sem. Users without mmap_sem are required to + * take a reference count to prevent the anon_vma disappearing */ anon_vma = page_anon_vma(page); if (!anon_vma)