mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: add support for direct_IO to highmem pages
The patch "mm: add support for a filesystem to activate swap files and use direct_IO for writing swap pages" added support for using direct_IO to write swap pages but it is insufficient for highmem pages. To support highmem pages, this patch kmaps() the page before calling the direct_IO() handler. As direct_IO deals with virtual addresses an additional helper is necessary for get_kernel_pages() to lookup the struct page for a kmap virtual address. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: David S. Miller <davem@davemloft.net> Cc: Eric B Munson <emunson@mgebm.net> Cc: Eric Paris <eparis@redhat.com> Cc: James Morris <jmorris@namei.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Neil Brown <neilb@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: Xiaotian Feng <dfeng@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a509bc1a9e
commit
5a178119b0
4 changed files with 22 additions and 3 deletions
|
@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages;
|
|||
|
||||
void kmap_flush_unused(void);
|
||||
|
||||
struct page *kmap_to_page(void *addr);
|
||||
|
||||
#else /* CONFIG_HIGHMEM */
|
||||
|
||||
static inline unsigned int nr_free_highpages(void) { return 0; }
|
||||
|
||||
static inline struct page *kmap_to_page(void *addr)
|
||||
{
|
||||
return virt_to_page(addr);
|
||||
}
|
||||
|
||||
#define totalhigh_pages 0UL
|
||||
|
||||
#ifndef ARCH_HAS_KMAP
|
||||
|
|
12
mm/highmem.c
12
mm/highmem.c
|
@ -94,6 +94,18 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
|
|||
do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
|
||||
#endif
|
||||
|
||||
struct page *kmap_to_page(void *vaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)vaddr;
|
||||
|
||||
if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
|
||||
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
|
||||
return pte_page(pkmap_page_table[i]);
|
||||
}
|
||||
|
||||
return virt_to_page(addr);
|
||||
}
|
||||
|
||||
static void flush_all_zero_pkmaps(void)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -205,7 +205,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
|
|||
struct file *swap_file = sis->swap_file;
|
||||
struct address_space *mapping = swap_file->f_mapping;
|
||||
struct iovec iov = {
|
||||
.iov_base = page_address(page),
|
||||
.iov_base = kmap(page),
|
||||
.iov_len = PAGE_SIZE,
|
||||
};
|
||||
|
||||
|
@ -218,6 +218,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
|
|||
ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
|
||||
&kiocb, &iov,
|
||||
kiocb.ki_pos, 1);
|
||||
kunmap(page);
|
||||
if (ret == PAGE_SIZE) {
|
||||
count_vm_event(PSWPOUT);
|
||||
ret = 0;
|
||||
|
|
|
@ -258,8 +258,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
|
|||
if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
|
||||
return seg;
|
||||
|
||||
/* virt_to_page sanity checks the PFN */
|
||||
pages[seg] = virt_to_page(kiov[seg].iov_base);
|
||||
pages[seg] = kmap_to_page(kiov[seg].iov_base);
|
||||
page_cache_get(pages[seg]);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue