mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
[PATCH] mm: update comments to pte lock
Updated several references to page_table_lock in common code comments. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
f412ac08c9
commit
b8072f099b
5 changed files with 11 additions and 13 deletions
|
@ -8,7 +8,7 @@
|
|||
* - update the page tables
|
||||
* - inform the TLB about the new one
|
||||
*
|
||||
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
|
||||
* We hold the mm semaphore for reading, and the pte lock.
|
||||
*
|
||||
* Note: the old pte is known to not be writable, so we don't need to
|
||||
* worry about dirty bits etc getting lost.
|
||||
|
|
|
@ -47,8 +47,7 @@ struct vm_area_struct;
|
|||
* Locking policy for interlave:
|
||||
* In process context there is no locking because only the process accesses
|
||||
* its own state. All vma manipulation is somewhat protected by a down_read on
|
||||
* mmap_sem. For allocating in the interleave policy the page_table_lock
|
||||
* must be also aquired to protect il_next.
|
||||
* mmap_sem.
|
||||
*
|
||||
* Freeing policy:
|
||||
* When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
|
||||
|
|
|
@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
|||
*
|
||||
* ->mmap_sem
|
||||
* ->i_mmap_lock
|
||||
* ->page_table_lock (various places, mainly in mmap.c)
|
||||
* ->page_table_lock or pte_lock (various, mainly in memory.c)
|
||||
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
|
||||
*
|
||||
* ->mmap_sem
|
||||
|
@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
|||
* ->anon_vma.lock (vma_adjust)
|
||||
*
|
||||
* ->anon_vma.lock
|
||||
* ->page_table_lock (anon_vma_prepare and various)
|
||||
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
|
||||
*
|
||||
* ->page_table_lock
|
||||
* ->page_table_lock or pte_lock
|
||||
* ->swap_lock (try_to_unmap_one)
|
||||
* ->private_lock (try_to_unmap_one)
|
||||
* ->tree_lock (try_to_unmap_one)
|
||||
|
|
10
mm/rmap.c
10
mm/rmap.c
|
@ -32,7 +32,7 @@
|
|||
* page->flags PG_locked (lock_page)
|
||||
* mapping->i_mmap_lock
|
||||
* anon_vma->lock
|
||||
* mm->page_table_lock
|
||||
* mm->page_table_lock or pte_lock
|
||||
* zone->lru_lock (in mark_page_accessed)
|
||||
* swap_lock (in swap_duplicate, swap_info_get)
|
||||
* mmlist_lock (in mmput, drain_mmlist and others)
|
||||
|
@ -244,7 +244,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
|||
/*
|
||||
* Check that @page is mapped at @address into @mm.
|
||||
*
|
||||
* On success returns with mapped pte and locked mm->page_table_lock.
|
||||
* On success returns with pte mapped and locked.
|
||||
*/
|
||||
pte_t *page_check_address(struct page *page, struct mm_struct *mm,
|
||||
unsigned long address, spinlock_t **ptlp)
|
||||
|
@ -445,7 +445,7 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
|
|||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*
|
||||
* The caller needs to hold the mm->page_table_lock.
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_add_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
|
@ -468,7 +468,7 @@ void page_add_anon_rmap(struct page *page,
|
|||
* page_add_file_rmap - add pte mapping to a file page
|
||||
* @page: the page to add the mapping to
|
||||
*
|
||||
* The caller needs to hold the mm->page_table_lock.
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_add_file_rmap(struct page *page)
|
||||
{
|
||||
|
@ -483,7 +483,7 @@ void page_add_file_rmap(struct page *page)
|
|||
* page_remove_rmap - take down pte mapping from a page
|
||||
* @page: page to remove mapping from
|
||||
*
|
||||
* Caller needs to hold the mm->page_table_lock.
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_remove_rmap(struct page *page)
|
||||
{
|
||||
|
|
|
@ -259,8 +259,7 @@ static inline void free_swap_cache(struct page *page)
|
|||
|
||||
/*
|
||||
* Perform a free_page(), also freeing any swap cache associated with
|
||||
* this page if it is the last user of the page. Can not do a lock_page,
|
||||
* as we are holding the page_table_lock spinlock.
|
||||
* this page if it is the last user of the page.
|
||||
*/
|
||||
void free_page_and_swap_cache(struct page *page)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue