Revert "tmpfs: support fallocate preallocation"

This reverts commit 0aa35ef98818fade8566568866ddc0a3db802e0f.
This commit is contained in:
RomanDesigner 2020-01-04 17:47:12 +03:00 committed by Francescodario Cuzzocrea
parent 9d2445d8d5
commit 3c14ba2074
1 changed files with 0 additions and 84 deletions

View File

@ -1605,90 +1605,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
pgoff_t start, index, end;
int error;
mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
error = 0;
goto out;
}
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
error = inode_newsize_ok(inode, offset + len);
if (error)
goto out;
start = offset >> PAGE_CACHE_SHIFT;
end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
/* Try to avoid a swapstorm if len is impossible to satisfy */
if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
error = -ENOSPC;
goto out;
}
for (index = start; index < end; index++) {
struct page *page;
/*
* Good, the fallocate(2) manpage permits EINTR: we may have
* been interrupted because we are using up too much memory.
*/
if (signal_pending(current))
error = -EINTR;
else
error = shmem_getpage(inode, index, &page, SGP_WRITE,
NULL);
if (error) {
/*
* We really ought to free what we allocated so far,
* but it would be wrong to free pages allocated
* earlier, or already now in use: i_mutex does not
* exclude all cases. We do not know what to free.
*/
goto ctime;
}
if (!PageUptodate(page)) {
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
/*
* set_page_dirty so that memory pressure will swap rather
* than free the pages we are allocating (and SGP_CACHE pages
* might still be clean: we now need to mark those dirty too).
*/
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
cond_resched();
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
ctime:
inode->i_ctime = CURRENT_TIME;
out:
mutex_unlock(&inode->i_mutex);
return error;
}
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);