mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
mm: remove the second argument of k[un]map_atomic()
Signed-off-by: Cong Wang <amwang@redhat.com>
This commit is contained in:
parent
c3eede8e0a
commit
9b04c5fec4
7 changed files with 35 additions and 35 deletions
|
@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
|
||||||
unsigned char *vto;
|
unsigned char *vto;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
|
vto = kmap_atomic(to->bv_page);
|
||||||
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
|
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
|
||||||
kunmap_atomic(vto, KM_BOUNCE_READ);
|
kunmap_atomic(vto);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
|
||||||
* taking the kmap.
|
* taking the kmap.
|
||||||
*/
|
*/
|
||||||
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
|
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page);
|
||||||
left = __copy_to_user_inatomic(desc->arg.buf,
|
left = __copy_to_user_inatomic(desc->arg.buf,
|
||||||
kaddr + offset, size);
|
kaddr + offset, size);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr);
|
||||||
if (left == 0)
|
if (left == 0)
|
||||||
goto success;
|
goto success;
|
||||||
}
|
}
|
||||||
|
@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||||
size_t copied;
|
size_t copied;
|
||||||
|
|
||||||
BUG_ON(!in_atomic());
|
BUG_ON(!in_atomic());
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page);
|
||||||
if (likely(i->nr_segs == 1)) {
|
if (likely(i->nr_segs == 1)) {
|
||||||
int left;
|
int left;
|
||||||
char __user *buf = i->iov->iov_base + i->iov_offset;
|
char __user *buf = i->iov->iov_base + i->iov_offset;
|
||||||
|
@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||||
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
||||||
i->iov, i->iov_offset, bytes);
|
i->iov, i->iov_offset, bytes);
|
||||||
}
|
}
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr);
|
||||||
|
|
||||||
return copied;
|
return copied;
|
||||||
}
|
}
|
||||||
|
|
12
mm/ksm.c
12
mm/ksm.c
|
@ -672,9 +672,9 @@ error:
|
||||||
static u32 calc_checksum(struct page *page)
|
static u32 calc_checksum(struct page *page)
|
||||||
{
|
{
|
||||||
u32 checksum;
|
u32 checksum;
|
||||||
void *addr = kmap_atomic(page, KM_USER0);
|
void *addr = kmap_atomic(page);
|
||||||
checksum = jhash2(addr, PAGE_SIZE / 4, 17);
|
checksum = jhash2(addr, PAGE_SIZE / 4, 17);
|
||||||
kunmap_atomic(addr, KM_USER0);
|
kunmap_atomic(addr);
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
|
||||||
char *addr1, *addr2;
|
char *addr1, *addr2;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
addr1 = kmap_atomic(page1, KM_USER0);
|
addr1 = kmap_atomic(page1);
|
||||||
addr2 = kmap_atomic(page2, KM_USER1);
|
addr2 = kmap_atomic(page2);
|
||||||
ret = memcmp(addr1, addr2, PAGE_SIZE);
|
ret = memcmp(addr1, addr2, PAGE_SIZE);
|
||||||
kunmap_atomic(addr2, KM_USER1);
|
kunmap_atomic(addr2);
|
||||||
kunmap_atomic(addr1, KM_USER0);
|
kunmap_atomic(addr1);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
|
||||||
* fails, we just zero-fill it. Live with it.
|
* fails, we just zero-fill it. Live with it.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!src)) {
|
if (unlikely(!src)) {
|
||||||
void *kaddr = kmap_atomic(dst, KM_USER0);
|
void *kaddr = kmap_atomic(dst);
|
||||||
void __user *uaddr = (void __user *)(va & PAGE_MASK);
|
void __user *uaddr = (void __user *)(va & PAGE_MASK);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
|
||||||
*/
|
*/
|
||||||
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
|
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
|
||||||
clear_page(kaddr);
|
clear_page(kaddr);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr);
|
||||||
flush_dcache_page(dst);
|
flush_dcache_page(dst);
|
||||||
} else
|
} else
|
||||||
copy_user_highpage(dst, src, va, vma);
|
copy_user_highpage(dst, src, va, vma);
|
||||||
|
|
|
@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
|
||||||
}
|
}
|
||||||
inode->i_mapping->a_ops = &shmem_aops;
|
inode->i_mapping->a_ops = &shmem_aops;
|
||||||
inode->i_op = &shmem_symlink_inode_operations;
|
inode->i_op = &shmem_symlink_inode_operations;
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page);
|
||||||
memcpy(kaddr, symname, len);
|
memcpy(kaddr, symname, len);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr);
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
|
|
@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
|
||||||
if (!(count & COUNT_CONTINUED))
|
if (!(count & COUNT_CONTINUED))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
map = kmap_atomic(list_page, KM_USER0) + offset;
|
map = kmap_atomic(list_page) + offset;
|
||||||
count = *map;
|
count = *map;
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this continuation count now has some space in it,
|
* If this continuation count now has some space in it,
|
||||||
|
@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
|
||||||
|
|
||||||
offset &= ~PAGE_MASK;
|
offset &= ~PAGE_MASK;
|
||||||
page = list_entry(head->lru.next, struct page, lru);
|
page = list_entry(head->lru.next, struct page, lru);
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
|
|
||||||
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
|
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
|
||||||
goto init_map; /* jump over SWAP_CONT_MAX checks */
|
goto init_map; /* jump over SWAP_CONT_MAX checks */
|
||||||
|
@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
|
||||||
* Think of how you add 1 to 999
|
* Think of how you add 1 to 999
|
||||||
*/
|
*/
|
||||||
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
|
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.next, struct page, lru);
|
page = list_entry(page->lru.next, struct page, lru);
|
||||||
BUG_ON(page == head);
|
BUG_ON(page == head);
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
}
|
}
|
||||||
if (*map == SWAP_CONT_MAX) {
|
if (*map == SWAP_CONT_MAX) {
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.next, struct page, lru);
|
page = list_entry(page->lru.next, struct page, lru);
|
||||||
if (page == head)
|
if (page == head)
|
||||||
return false; /* add count continuation */
|
return false; /* add count continuation */
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
init_map: *map = 0; /* we didn't zero the page */
|
init_map: *map = 0; /* we didn't zero the page */
|
||||||
}
|
}
|
||||||
*map += 1;
|
*map += 1;
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.prev, struct page, lru);
|
page = list_entry(page->lru.prev, struct page, lru);
|
||||||
while (page != head) {
|
while (page != head) {
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
*map = COUNT_CONTINUED;
|
*map = COUNT_CONTINUED;
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.prev, struct page, lru);
|
page = list_entry(page->lru.prev, struct page, lru);
|
||||||
}
|
}
|
||||||
return true; /* incremented */
|
return true; /* incremented */
|
||||||
|
@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */
|
||||||
*/
|
*/
|
||||||
BUG_ON(count != COUNT_CONTINUED);
|
BUG_ON(count != COUNT_CONTINUED);
|
||||||
while (*map == COUNT_CONTINUED) {
|
while (*map == COUNT_CONTINUED) {
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.next, struct page, lru);
|
page = list_entry(page->lru.next, struct page, lru);
|
||||||
BUG_ON(page == head);
|
BUG_ON(page == head);
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
}
|
}
|
||||||
BUG_ON(*map == 0);
|
BUG_ON(*map == 0);
|
||||||
*map -= 1;
|
*map -= 1;
|
||||||
if (*map == 0)
|
if (*map == 0)
|
||||||
count = 0;
|
count = 0;
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.prev, struct page, lru);
|
page = list_entry(page->lru.prev, struct page, lru);
|
||||||
while (page != head) {
|
while (page != head) {
|
||||||
map = kmap_atomic(page, KM_USER0) + offset;
|
map = kmap_atomic(page) + offset;
|
||||||
*map = SWAP_CONT_MAX | count;
|
*map = SWAP_CONT_MAX | count;
|
||||||
count = COUNT_CONTINUED;
|
count = COUNT_CONTINUED;
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
page = list_entry(page->lru.prev, struct page, lru);
|
page = list_entry(page->lru.prev, struct page, lru);
|
||||||
}
|
}
|
||||||
return count == COUNT_CONTINUED;
|
return count == COUNT_CONTINUED;
|
||||||
|
|
|
@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
|
||||||
* we can expect USER0 is not used (see vread/vwrite's
|
* we can expect USER0 is not used (see vread/vwrite's
|
||||||
* function description)
|
* function description)
|
||||||
*/
|
*/
|
||||||
void *map = kmap_atomic(p, KM_USER0);
|
void *map = kmap_atomic(p);
|
||||||
memcpy(buf, map + offset, length);
|
memcpy(buf, map + offset, length);
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
} else
|
} else
|
||||||
memset(buf, 0, length);
|
memset(buf, 0, length);
|
||||||
|
|
||||||
|
@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
|
||||||
* we can expect USER0 is not used (see vread/vwrite's
|
* we can expect USER0 is not used (see vread/vwrite's
|
||||||
* function description)
|
* function description)
|
||||||
*/
|
*/
|
||||||
void *map = kmap_atomic(p, KM_USER0);
|
void *map = kmap_atomic(p);
|
||||||
memcpy(map + offset, buf, length);
|
memcpy(map + offset, buf, length);
|
||||||
kunmap_atomic(map, KM_USER0);
|
kunmap_atomic(map);
|
||||||
}
|
}
|
||||||
addr += length;
|
addr += length;
|
||||||
buf += length;
|
buf += length;
|
||||||
|
|
Loading…
Reference in a new issue