mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
f2fs: preallocate blocks for buffered aio writes
This patch preallocates data blocks for buffered aio writes. With this patch, we can avoid redundant locking and unlocking of node pages given consecutive aio request. [For 3.10] - Add preallocationg for generic_splice_write(sendfile) for xfstests/249, 285 Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> Conflicts: fs/f2fs/data.c
This commit is contained in:
parent
e1336354b2
commit
228f875452
4 changed files with 66 additions and 17 deletions
|
@ -565,23 +565,31 @@ alloc:
|
|||
return 0;
|
||||
}
|
||||
|
||||
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, size_t count)
|
||||
ssize_t f2fs_preallocate_blocks(struct inode *inode, loff_t pos, size_t count, bool dio)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
struct f2fs_map_blocks map;
|
||||
ssize_t ret = 0;
|
||||
|
||||
map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
|
||||
map.m_len = F2FS_BYTES_TO_BLK(count);
|
||||
map.m_lblk = F2FS_BYTES_TO_BLK(pos);
|
||||
map.m_len = F2FS_BLK_ALIGN(count);
|
||||
map.m_next_pgofs = NULL;
|
||||
|
||||
if (iocb->ki_filp->f_flags & O_DIRECT &&
|
||||
!(f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))) {
|
||||
if (f2fs_encrypted_inode(inode))
|
||||
return 0;
|
||||
|
||||
if (dio) {
|
||||
ret = f2fs_convert_inline_inode(inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
|
||||
return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
|
||||
}
|
||||
if (pos + count > MAX_INLINE_DATA) {
|
||||
ret = f2fs_convert_inline_inode(inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (!f2fs_has_inline_data(inode))
|
||||
return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -613,7 +621,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
|
|||
/* it only supports block size == page size */
|
||||
pgofs = (pgoff_t)map->m_lblk;
|
||||
|
||||
if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
|
||||
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
|
||||
map->m_pblk = ei.blk + pgofs - ei.fofs;
|
||||
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
|
||||
map->m_flags = F2FS_MAP_MAPPED;
|
||||
|
@ -648,7 +656,12 @@ next_block:
|
|||
err = -EIO;
|
||||
goto sync_out;
|
||||
}
|
||||
err = __allocate_data_block(&dn);
|
||||
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
|
||||
if (blkaddr == NULL_ADDR)
|
||||
err = reserve_new_block(&dn);
|
||||
} else {
|
||||
err = __allocate_data_block(&dn);
|
||||
}
|
||||
if (err)
|
||||
goto sync_out;
|
||||
allocated = true;
|
||||
|
@ -680,7 +693,8 @@ next_block:
|
|||
} else if ((map->m_pblk != NEW_ADDR &&
|
||||
blkaddr == (map->m_pblk + ofs)) ||
|
||||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
|
||||
flag == F2FS_GET_BLOCK_PRE_DIO) {
|
||||
flag == F2FS_GET_BLOCK_PRE_DIO ||
|
||||
flag == F2FS_GET_BLOCK_PRE_AIO) {
|
||||
ofs++;
|
||||
map->m_len++;
|
||||
} else {
|
||||
|
@ -1419,6 +1433,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
|
|||
struct extent_info ei;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* we already allocated all the blocks, so we don't need to get
|
||||
* the block addresses when there is no need to fill the page.
|
||||
*/
|
||||
if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
|
||||
len == PAGE_CACHE_SIZE)
|
||||
return 0;
|
||||
|
||||
if (f2fs_has_inline_data(inode) ||
|
||||
(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
|
||||
f2fs_lock_op(sbi);
|
||||
|
|
|
@ -422,6 +422,7 @@ struct f2fs_map_blocks {
|
|||
#define F2FS_GET_BLOCK_FIEMAP 2
|
||||
#define F2FS_GET_BLOCK_BMAP 3
|
||||
#define F2FS_GET_BLOCK_PRE_DIO 4
|
||||
#define F2FS_GET_BLOCK_PRE_AIO 5
|
||||
|
||||
/*
|
||||
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
|
||||
|
@ -1945,7 +1946,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *);
|
|||
void set_data_blkaddr(struct dnode_of_data *);
|
||||
int reserve_new_block(struct dnode_of_data *);
|
||||
int f2fs_get_block(struct dnode_of_data *, pgoff_t);
|
||||
ssize_t f2fs_preallocate_blocks(struct kiocb *, size_t);
|
||||
ssize_t f2fs_preallocate_blocks(struct inode *, loff_t, size_t, bool);
|
||||
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
|
||||
struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
|
||||
struct page *find_data_page(struct inode *, pgoff_t);
|
||||
|
|
|
@ -1898,6 +1898,7 @@ static ssize_t f2fs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
size_t count;
|
||||
ssize_t ret;
|
||||
|
||||
if (f2fs_encrypted_inode(inode) &&
|
||||
|
@ -1905,11 +1906,19 @@ static ssize_t f2fs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
f2fs_get_encryption_info(inode))
|
||||
return -EACCES;
|
||||
|
||||
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
inode_lock(inode);
|
||||
ret = f2fs_preallocate_blocks(iocb, iov_length(iov, nr_segs));
|
||||
if (!ret)
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs,
|
||||
&iocb->ki_pos);
|
||||
ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (!ret) {
|
||||
ret = f2fs_preallocate_blocks(inode, pos, count,
|
||||
iocb->ki_filp->f_flags & O_DIRECT);
|
||||
if (!ret)
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs,
|
||||
&iocb->ki_pos);
|
||||
}
|
||||
inode_unlock(inode);
|
||||
|
||||
if (ret > 0 || ret == -EIOCBQUEUED) {
|
||||
|
@ -1955,6 +1964,23 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
static ssize_t f2fs_file_splice_write(struct pipe_inode_info *pipe,
|
||||
struct file *out,
|
||||
loff_t *ppos, size_t len, unsigned int flags)
|
||||
{
|
||||
struct address_space *mapping = out->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
int ret;
|
||||
|
||||
ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = f2fs_preallocate_blocks(inode, *ppos, len, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
return generic_file_splice_write(pipe, out, ppos, len, flags);
|
||||
}
|
||||
|
||||
const struct file_operations f2fs_file_operations = {
|
||||
.llseek = f2fs_llseek,
|
||||
.read = do_sync_read,
|
||||
|
@ -1971,5 +1997,5 @@ const struct file_operations f2fs_file_operations = {
|
|||
.compat_ioctl = f2fs_compat_ioctl,
|
||||
#endif
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = generic_file_splice_write,
|
||||
.splice_write = f2fs_file_splice_write,
|
||||
};
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
|
||||
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
|
||||
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
|
||||
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
|
||||
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
|
||||
|
||||
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
|
||||
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
|
||||
|
|
Loading…
Reference in a new issue