ext4: Use page_mkwrite vma_operations to get mmap write notification.

We would like to get notified when we are doing a write on mmap section.
This is needed with respect to preallocated area. We split the preallocated
area into initialzed extent and uninitialzed extent in the call back. This
let us handle ENOSPC better. Otherwise we get ENOSPC in the writepage and
that would result in data loss. The changes are also needed to handle ENOSPC
when writing to an mmap section of files with holes.

Acked-by: Jan Kara <jack@suse.cz>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
Aneesh Kumar K.V 2008-07-11 19:27:31 -04:00 committed by Theodore Ts'o
parent 93e3270c87
commit 2e9ee85035
3 changed files with 80 additions and 1 deletions

View file

@ -1068,6 +1068,7 @@ extern void ext4_set_aops(struct inode *inode);
extern int ext4_writepage_trans_blocks(struct inode *); extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_block_truncate_page(handle_t *handle, struct page *page, extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
struct address_space *mapping, loff_t from); struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
/* ioctl.c */ /* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_ioctl(struct file *, unsigned int, unsigned long);

View file

@ -123,6 +123,23 @@ force_commit:
return ret; return ret;
} }
static struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = ext4_page_mkwrite,
};
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ext4_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
const struct file_operations ext4_file_operations = { const struct file_operations ext4_file_operations = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
.read = do_sync_read, .read = do_sync_read,
@ -133,7 +150,7 @@ const struct file_operations ext4_file_operations = {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl, .compat_ioctl = ext4_compat_ioctl,
#endif #endif
.mmap = generic_file_mmap, .mmap = ext4_file_mmap,
.open = generic_file_open, .open = generic_file_open,
.release = ext4_release_file, .release = ext4_release_file,
.fsync = ext4_sync_file, .fsync = ext4_sync_file,

View file

@ -3564,3 +3564,64 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return err; return err;
} }
static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
{
return !buffer_mapped(bh);
}
int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
{
loff_t size;
unsigned long len;
int ret = -EINVAL;
struct file *file = vma->vm_file;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
/*
* Get i_alloc_sem to stop truncates messing with the inode. We cannot
* get i_mutex because we are already holding mmap_sem.
*/
down_read(&inode->i_alloc_sem);
size = i_size_read(inode);
if (page->mapping != mapping || size <= page_offset(page)
|| !PageUptodate(page)) {
/* page got truncated from under us? */
goto out_unlock;
}
ret = 0;
if (PageMappedToDisk(page))
goto out_unlock;
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (page_has_buffers(page)) {
/* return if we have all the buffers mapped */
if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_unmapped))
goto out_unlock;
}
/*
* OK, we need to fill the hole... Do write_begin write_end
* to do block allocation/reservation.We are not holding
* inode.i__mutex here. That allow * parallel write_begin,
* write_end call. lock_page prevent this from happening
* on the same page though
*/
ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
if (ret < 0)
goto out_unlock;
ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
len, len, page, NULL);
if (ret < 0)
goto out_unlock;
ret = 0;
out_unlock:
up_read(&inode->i_alloc_sem);
return ret;
}