mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
[PATCH] Direct Migration V9: Avoid writeback / page_migrate() method
Migrate a page with buffers without requiring writeback This introduces a new address space operation migratepage() that may be used by a filesystem to implement its own version of page migration. A version is provided that migrates buffers attached to pages. Some filesystems (ext2, ext3, xfs) are modified to utilize this feature. The swapper address space operation are modified so that a regular migrate_page() will occur for anonymous pages without writeback (migrate_pages forces every anonymous page to have a swap entry). Signed-off-by: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
7e2ab150d1
commit
e965f9630c
10 changed files with 100 additions and 1 deletions
60
fs/buffer.c
60
fs/buffer.c
|
@ -3049,6 +3049,66 @@ asmlinkage long sys_bdflush(int func, long data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Migration function for pages with buffers. This function can only be used
|
||||
* if the underlying filesystem guarantees that no other references to "page"
|
||||
* exist.
|
||||
*/
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int buffer_migrate_page(struct page *newpage, struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct buffer_head *bh, *head;
|
||||
|
||||
if (!mapping)
|
||||
return -EAGAIN;
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
return migrate_page(newpage, page);
|
||||
|
||||
head = page_buffers(page);
|
||||
|
||||
if (migrate_page_remove_references(newpage, page, 3))
|
||||
return -EAGAIN;
|
||||
|
||||
bh = head;
|
||||
do {
|
||||
get_bh(bh);
|
||||
lock_buffer(bh);
|
||||
bh = bh->b_this_page;
|
||||
|
||||
} while (bh != head);
|
||||
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(newpage, page_private(page));
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
get_page(newpage);
|
||||
|
||||
bh = head;
|
||||
do {
|
||||
set_bh_page(bh, newpage, bh_offset(bh));
|
||||
bh = bh->b_this_page;
|
||||
|
||||
} while (bh != head);
|
||||
|
||||
SetPagePrivate(newpage);
|
||||
|
||||
migrate_page_copy(newpage, page);
|
||||
|
||||
bh = head;
|
||||
do {
|
||||
unlock_buffer(bh);
|
||||
put_bh(bh);
|
||||
bh = bh->b_this_page;
|
||||
|
||||
} while (bh != head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(buffer_migrate_page);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Buffer-head allocation
|
||||
*/
|
||||
|
|
|
@ -706,6 +706,7 @@ struct address_space_operations ext2_aops = {
|
|||
.bmap = ext2_bmap,
|
||||
.direct_IO = ext2_direct_IO,
|
||||
.writepages = ext2_writepages,
|
||||
.migratepage = buffer_migrate_page,
|
||||
};
|
||||
|
||||
struct address_space_operations ext2_aops_xip = {
|
||||
|
@ -723,6 +724,7 @@ struct address_space_operations ext2_nobh_aops = {
|
|||
.bmap = ext2_bmap,
|
||||
.direct_IO = ext2_direct_IO,
|
||||
.writepages = ext2_writepages,
|
||||
.migratepage = buffer_migrate_page,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1559,6 +1559,7 @@ static struct address_space_operations ext3_ordered_aops = {
|
|||
.invalidatepage = ext3_invalidatepage,
|
||||
.releasepage = ext3_releasepage,
|
||||
.direct_IO = ext3_direct_IO,
|
||||
.migratepage = buffer_migrate_page,
|
||||
};
|
||||
|
||||
static struct address_space_operations ext3_writeback_aops = {
|
||||
|
@ -1572,6 +1573,7 @@ static struct address_space_operations ext3_writeback_aops = {
|
|||
.invalidatepage = ext3_invalidatepage,
|
||||
.releasepage = ext3_releasepage,
|
||||
.direct_IO = ext3_direct_IO,
|
||||
.migratepage = buffer_migrate_page,
|
||||
};
|
||||
|
||||
static struct address_space_operations ext3_journalled_aops = {
|
||||
|
|
|
@ -1462,4 +1462,5 @@ struct address_space_operations linvfs_aops = {
|
|||
.commit_write = generic_commit_write,
|
||||
.bmap = linvfs_bmap,
|
||||
.direct_IO = linvfs_direct_IO,
|
||||
.migratepage = buffer_migrate_page,
|
||||
};
|
||||
|
|
|
@ -1521,6 +1521,7 @@ xfs_mapping_buftarg(
|
|||
struct address_space *mapping;
|
||||
static struct address_space_operations mapping_aops = {
|
||||
.sync_page = block_sync_page,
|
||||
.migratepage = fail_migrate_page,
|
||||
};
|
||||
|
||||
inode = new_inode(bdev->bd_inode->i_sb);
|
||||
|
|
|
@ -363,6 +363,8 @@ struct address_space_operations {
|
|||
loff_t offset, unsigned long nr_segs);
|
||||
struct page* (*get_xip_page)(struct address_space *, sector_t,
|
||||
int);
|
||||
/* migrate the contents of a page to the specified target */
|
||||
int (*migratepage) (struct page *, struct page *);
|
||||
};
|
||||
|
||||
struct backing_dev_info;
|
||||
|
@ -1719,6 +1721,12 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
|
|||
|
||||
extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
extern int buffer_migrate_page(struct page *, struct page *);
|
||||
#else
|
||||
#define buffer_migrate_page NULL
|
||||
#endif
|
||||
|
||||
extern int inode_change_ok(struct inode *, struct iattr *);
|
||||
extern int __must_check inode_setattr(struct inode *, struct iattr *);
|
||||
|
||||
|
|
|
@ -193,13 +193,18 @@ extern int isolate_lru_page(struct page *p);
|
|||
extern int putback_lru_pages(struct list_head *l);
|
||||
extern int migrate_page(struct page *, struct page *);
|
||||
extern void migrate_page_copy(struct page *, struct page *);
|
||||
extern int migrate_page_remove_references(struct page *, struct page *, int);
|
||||
extern int migrate_pages(struct list_head *l, struct list_head *t,
|
||||
struct list_head *moved, struct list_head *failed);
|
||||
extern int fail_migrate_page(struct page *, struct page *);
|
||||
#else
|
||||
static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
|
||||
static inline int putback_lru_pages(struct list_head *l) { return 0; }
|
||||
static inline int migrate_pages(struct list_head *l, struct list_head *t,
|
||||
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
|
||||
/* Possible settings for the migrate_page() method in address_operations */
|
||||
#define migrate_page NULL
|
||||
#define fail_migrate_page NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
|
@ -233,6 +233,7 @@ void remove_from_swap(struct page *page)
|
|||
|
||||
delete_from_swap_cache(page);
|
||||
}
|
||||
EXPORT_SYMBOL(remove_from_swap);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -27,6 +27,7 @@ static struct address_space_operations swap_aops = {
|
|||
.writepage = swap_writepage,
|
||||
.sync_page = block_sync_page,
|
||||
.set_page_dirty = __set_page_dirty_nobuffers,
|
||||
.migratepage = migrate_page,
|
||||
};
|
||||
|
||||
static struct backing_dev_info swap_backing_dev_info = {
|
||||
|
|
20
mm/vmscan.c
20
mm/vmscan.c
|
@ -614,6 +614,15 @@ int putback_lru_pages(struct list_head *l)
|
|||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Non migratable page
|
||||
*/
|
||||
int fail_migrate_page(struct page *newpage, struct page *page)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
EXPORT_SYMBOL(fail_migrate_page);
|
||||
|
||||
/*
|
||||
* swapout a single page
|
||||
* page is locked upon entry, unlocked on exit
|
||||
|
@ -659,6 +668,7 @@ unlock_retry:
|
|||
retry:
|
||||
return -EAGAIN;
|
||||
}
|
||||
EXPORT_SYMBOL(swap_page);
|
||||
|
||||
/*
|
||||
* Page migration was first developed in the context of the memory hotplug
|
||||
|
@ -674,7 +684,7 @@ retry:
|
|||
* Remove references for a page and establish the new page with the correct
|
||||
* basic settings to be able to stop accesses to the page.
|
||||
*/
|
||||
static int migrate_page_remove_references(struct page *newpage,
|
||||
int migrate_page_remove_references(struct page *newpage,
|
||||
struct page *page, int nr_refs)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
@ -749,6 +759,7 @@ static int migrate_page_remove_references(struct page *newpage,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(migrate_page_remove_references);
|
||||
|
||||
/*
|
||||
* Copy the page to its new location
|
||||
|
@ -788,6 +799,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
|
|||
if (PageWriteback(newpage))
|
||||
end_page_writeback(newpage);
|
||||
}
|
||||
EXPORT_SYMBOL(migrate_page_copy);
|
||||
|
||||
/*
|
||||
* Common logic to directly migrate a single page suitable for
|
||||
|
@ -815,6 +827,7 @@ int migrate_page(struct page *newpage, struct page *page)
|
|||
remove_from_swap(newpage);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(migrate_page);
|
||||
|
||||
/*
|
||||
* migrate_pages
|
||||
|
@ -914,6 +927,11 @@ redo:
|
|||
if (!mapping)
|
||||
goto unlock_both;
|
||||
|
||||
if (mapping->a_ops->migratepage) {
|
||||
rc = mapping->a_ops->migratepage(newpage, page);
|
||||
goto unlock_both;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger writeout if page is dirty
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue