mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
x86: PAT: hooks in generic vm code to help archs to track pfnmap regions - v3
Impact: Introduces new hooks, which are currently null. Introduce generic hooks in remap_pfn_range and vm_insert_pfn and corresponding copy and free routines with reserve and free tracking. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
e121e41844
commit
2ab640379a
2 changed files with 81 additions and 1 deletions
|
@ -155,6 +155,12 @@ static inline int is_pfn_mapping(struct vm_area_struct *vma)
|
||||||
return (vma->vm_flags & VM_PFNMAP);
|
return (vma->vm_flags & VM_PFNMAP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
||||||
|
unsigned long pfn, unsigned long size);
|
||||||
|
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
|
||||||
|
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
|
unsigned long size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_fault is filled by the the pagefault handler and passed to the vma's
|
* vm_fault is filled by the the pagefault handler and passed to the vma's
|
||||||
* ->fault function. The vma's ->fault is responsible for returning a bitmask
|
* ->fault function. The vma's ->fault is responsible for returning a bitmask
|
||||||
|
|
76
mm/memory.c
76
mm/memory.c
|
@ -99,6 +99,50 @@ int randomize_va_space __read_mostly =
|
||||||
2;
|
2;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef track_pfn_vma_new
|
||||||
|
/*
|
||||||
|
* Interface that can be used by architecture code to keep track of
|
||||||
|
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
|
||||||
|
*
|
||||||
|
* track_pfn_vma_new is called when a _new_ pfn mapping is being established
|
||||||
|
* for physical range indicated by pfn and size.
|
||||||
|
*/
|
||||||
|
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
||||||
|
unsigned long pfn, unsigned long size)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef track_pfn_vma_copy
|
||||||
|
/*
|
||||||
|
* Interface that can be used by architecture code to keep track of
|
||||||
|
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
|
||||||
|
*
|
||||||
|
* track_pfn_vma_copy is called when vma that is covering the pfnmap gets
|
||||||
|
* copied through copy_page_range().
|
||||||
|
*/
|
||||||
|
int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef untrack_pfn_vma
|
||||||
|
/*
|
||||||
|
* Interface that can be used by architecture code to keep track of
|
||||||
|
* memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
|
||||||
|
*
|
||||||
|
* untrack_pfn_vma is called while unmapping a pfnmap for a region.
|
||||||
|
* untrack can be called for a specific region indicated by pfn and size or
|
||||||
|
* can be for the entire vma (in which case size can be zero).
|
||||||
|
*/
|
||||||
|
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
|
unsigned long size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int __init disable_randmaps(char *s)
|
static int __init disable_randmaps(char *s)
|
||||||
{
|
{
|
||||||
randomize_va_space = 0;
|
randomize_va_space = 0;
|
||||||
|
@ -669,6 +713,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
if (is_vm_hugetlb_page(vma))
|
if (is_vm_hugetlb_page(vma))
|
||||||
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
||||||
|
|
||||||
|
if (is_pfn_mapping(vma)) {
|
||||||
|
/*
|
||||||
|
* We do not free on error cases below as remove_vma
|
||||||
|
* gets called on error from higher level routine
|
||||||
|
*/
|
||||||
|
ret = track_pfn_vma_copy(vma);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to invalidate the secondary MMU mappings only when
|
* We need to invalidate the secondary MMU mappings only when
|
||||||
* there could be a permission downgrade on the ptes of the
|
* there could be a permission downgrade on the ptes of the
|
||||||
|
@ -915,6 +969,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
||||||
if (vma->vm_flags & VM_ACCOUNT)
|
if (vma->vm_flags & VM_ACCOUNT)
|
||||||
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (is_pfn_mapping(vma))
|
||||||
|
untrack_pfn_vma(vma, 0, 0);
|
||||||
|
|
||||||
while (start != end) {
|
while (start != end) {
|
||||||
if (!tlb_start_valid) {
|
if (!tlb_start_valid) {
|
||||||
tlb_start = start;
|
tlb_start = start;
|
||||||
|
@ -1473,6 +1530,7 @@ out:
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
/*
|
/*
|
||||||
* Technically, architectures with pte_special can avoid all these
|
* Technically, architectures with pte_special can avoid all these
|
||||||
* restrictions (same for remap_pfn_range). However we would like
|
* restrictions (same for remap_pfn_range). However we would like
|
||||||
|
@ -1487,7 +1545,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
untrack_pfn_vma(vma, pfn, PAGE_SIZE);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vm_insert_pfn);
|
EXPORT_SYMBOL(vm_insert_pfn);
|
||||||
|
|
||||||
|
@ -1625,6 +1691,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
|
||||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||||
|
|
||||||
|
err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
BUG_ON(addr >= end);
|
BUG_ON(addr >= end);
|
||||||
pfn -= addr >> PAGE_SHIFT;
|
pfn -= addr >> PAGE_SHIFT;
|
||||||
pgd = pgd_offset(mm, addr);
|
pgd = pgd_offset(mm, addr);
|
||||||
|
@ -1636,6 +1706,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remap_pfn_range);
|
EXPORT_SYMBOL(remap_pfn_range);
|
||||||
|
|
Loading…
Reference in a new issue