Merge branch 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] vfs: add splice_write and splice_read to documentation
  [PATCH] Remove sys_ prefix of new syscalls from __NR_sys_*
  [PATCH] splice: warning fix
  [PATCH] another round of fs/pipe.c cleanups
  [PATCH] splice: comment styles
  [PATCH] splice: add Ingo as addition copyright holder
  [PATCH] splice: unlikely() optimizations
  [PATCH] splice: speedups and optimizations
  [PATCH] pipe.c/fifo.c code cleanups
  [PATCH] get rid of the PIPE_*() macros
  [PATCH] splice: speedup __generic_file_splice_read
  [PATCH] splice: add direct fd <-> fd splicing support
  [PATCH] splice: add optional input and output offsets
  [PATCH] introduce a "kernel-internal pipe object" abstraction
  [PATCH] splice: be smarter about calling do_page_cache_readahead()
  [PATCH] splice: optimize the splice buffer mapping
  [PATCH] splice: cleanup __generic_file_splice_read()
  [PATCH] splice: only call wake_up_interruptible() when we really have to
  [PATCH] splice: potential !page dereference
  [PATCH] splice: mark the io page as accessed
This commit is contained in:
Linus Torvalds 2006-04-11 06:34:02 -07:00
commit 88dd9c16ce
16 changed files with 608 additions and 367 deletions

View file

@ -694,7 +694,7 @@ struct file_operations
----------------------
This describes how the VFS can manipulate an open file. As of kernel
2.6.13, the following members are defined:
2.6.17, the following members are defined:
struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
@ -723,6 +723,10 @@ struct file_operations {
int (*check_flags)(int);
int (*dir_notify)(struct file *filp, unsigned long arg);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned
int);
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned
int);
};
Again, all methods are called without any locks being held, unless
@ -790,6 +794,12 @@ otherwise noted.
flock: called by the flock(2) system call
splice_write: called by the VFS to splice data from a pipe to a file. This
method is used by the splice(2) system call
splice_read: called by the VFS to splice data from file to a pipe. This
method is used by the splice(2) system call
Note that the file operations are implemented by the specific
filesystem in which the inode resides. When opening a device node
(character or block special) most filesystems will call special

View file

@ -15,30 +15,35 @@
#include <linux/fs.h>
#include <linux/pipe_fs_i.h>
static void wait_for_partner(struct inode* inode, unsigned int* cnt)
static void wait_for_partner(struct inode* inode, unsigned int *cnt)
{
int cur = *cnt;
while(cur == *cnt) {
pipe_wait(inode);
if(signal_pending(current))
while (cur == *cnt) {
pipe_wait(inode->i_pipe);
if (signal_pending(current))
break;
}
}
static void wake_up_partner(struct inode* inode)
{
wake_up_interruptible(PIPE_WAIT(*inode));
wake_up_interruptible(&inode->i_pipe->wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
int ret;
mutex_lock(PIPE_MUTEX(*inode));
if (!inode->i_pipe) {
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
if (!pipe) {
ret = -ENOMEM;
if(!pipe_new(inode))
pipe = alloc_pipe_info(inode);
if (!pipe)
goto err_nocleanup;
inode->i_pipe = pipe;
}
filp->f_version = 0;
@ -53,18 +58,18 @@ static int fifo_open(struct inode *inode, struct file *filp)
* opened, even when there is no process writing the FIFO.
*/
filp->f_op = &read_fifo_fops;
PIPE_RCOUNTER(*inode)++;
if (PIPE_READERS(*inode)++ == 0)
pipe->r_counter++;
if (pipe->readers++ == 0)
wake_up_partner(inode);
if (!PIPE_WRITERS(*inode)) {
if (!pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = PIPE_WCOUNTER(*inode);
filp->f_version = pipe->w_counter;
} else
{
wait_for_partner(inode, &PIPE_WCOUNTER(*inode));
wait_for_partner(inode, &pipe->w_counter);
if(signal_pending(current))
goto err_rd;
}
@ -78,16 +83,16 @@ static int fifo_open(struct inode *inode, struct file *filp)
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
if ((filp->f_flags & O_NONBLOCK) && !PIPE_READERS(*inode))
if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
goto err;
filp->f_op = &write_fifo_fops;
PIPE_WCOUNTER(*inode)++;
if (!PIPE_WRITERS(*inode)++)
pipe->w_counter++;
if (!pipe->writers++)
wake_up_partner(inode);
if (!PIPE_READERS(*inode)) {
wait_for_partner(inode, &PIPE_RCOUNTER(*inode));
if (!pipe->readers) {
wait_for_partner(inode, &pipe->r_counter);
if (signal_pending(current))
goto err_wr;
}
@ -102,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
*/
filp->f_op = &rdwr_fifo_fops;
PIPE_READERS(*inode)++;
PIPE_WRITERS(*inode)++;
PIPE_RCOUNTER(*inode)++;
PIPE_WCOUNTER(*inode)++;
if (PIPE_READERS(*inode) == 1 || PIPE_WRITERS(*inode) == 1)
pipe->readers++;
pipe->writers++;
pipe->r_counter++;
pipe->w_counter++;
if (pipe->readers == 1 || pipe->writers == 1)
wake_up_partner(inode);
break;
@ -116,27 +121,27 @@ static int fifo_open(struct inode *inode, struct file *filp)
}
/* Ok! */
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
return 0;
err_rd:
if (!--PIPE_READERS(*inode))
wake_up_interruptible(PIPE_WAIT(*inode));
if (!--pipe->readers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--PIPE_WRITERS(*inode))
wake_up_interruptible(PIPE_WAIT(*inode));
if (!--pipe->writers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err:
if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode))
if (!pipe->readers && !pipe->writers)
free_pipe_info(inode);
err_nocleanup:
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
return ret;
}

312
fs/pipe.c
View file

@ -36,7 +36,7 @@
*/
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct inode * inode)
void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
@ -44,11 +44,14 @@ void pipe_wait(struct inode * inode)
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
mutex_unlock(PIPE_MUTEX(*inode));
prepare_to_wait(&pipe->wait, &wait,
TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
if (pipe->inode)
mutex_unlock(&pipe->inode->i_mutex);
schedule();
finish_wait(PIPE_WAIT(*inode), &wait);
mutex_lock(PIPE_MUTEX(*inode));
finish_wait(&pipe->wait, &wait);
if (pipe->inode)
mutex_lock(&pipe->inode->i_mutex);
}
static int
@ -91,7 +94,8 @@ pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len)
return 0;
}
static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buffer *buf)
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
@ -100,30 +104,27 @@ static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buff
/*
* If nobody else uses this page, and we don't already have a
* temporary page, let's keep track of it as a one-deep
* allocation cache
* allocation cache. (Otherwise just release our reference to it)
*/
if (page_count(page) == 1 && !info->tmp_page) {
info->tmp_page = page;
return;
}
/*
* Otherwise just release our reference to it
*/
page_cache_release(page);
if (page_count(page) == 1 && !pipe->tmp_page)
pipe->tmp_page = page;
else
page_cache_release(page);
}
static void *anon_pipe_buf_map(struct file *file, struct pipe_inode_info *info, struct pipe_buffer *buf)
static void * anon_pipe_buf_map(struct file *file, struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return kmap(buf->page);
}
static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer *buf)
static void anon_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
kunmap(buf->page);
}
static int anon_pipe_buf_steal(struct pipe_inode_info *info,
static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
buf->flags |= PIPE_BUF_FLAG_STOLEN;
@ -143,7 +144,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
unsigned long nr_segs, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
struct pipe_inode_info *info;
struct pipe_inode_info *pipe;
int do_wakeup;
ssize_t ret;
struct iovec *iov = (struct iovec *)_iov;
@ -156,13 +157,13 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
for (;;) {
int bufs = info->nrbufs;
int bufs = pipe->nrbufs;
if (bufs) {
int curbuf = info->curbuf;
struct pipe_buffer *buf = info->bufs + curbuf;
int curbuf = pipe->curbuf;
struct pipe_buffer *buf = pipe->bufs + curbuf;
struct pipe_buf_operations *ops = buf->ops;
void *addr;
size_t chars = buf->len;
@ -171,16 +172,17 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
if (chars > total_len)
chars = total_len;
addr = ops->map(filp, info, buf);
addr = ops->map(filp, pipe, buf);
if (IS_ERR(addr)) {
if (!ret)
ret = PTR_ERR(addr);
break;
}
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars);
ops->unmap(info, buf);
ops->unmap(pipe, buf);
if (unlikely(error)) {
if (!ret) ret = -EFAULT;
if (!ret)
ret = -EFAULT;
break;
}
ret += chars;
@ -188,10 +190,10 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
buf->len -= chars;
if (!buf->len) {
buf->ops = NULL;
ops->release(info, buf);
ops->release(pipe, buf);
curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
info->curbuf = curbuf;
info->nrbufs = --bufs;
pipe->curbuf = curbuf;
pipe->nrbufs = --bufs;
do_wakeup = 1;
}
total_len -= chars;
@ -200,9 +202,9 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
if (bufs) /* More to do? */
continue;
if (!PIPE_WRITERS(*inode))
if (!pipe->writers)
break;
if (!PIPE_WAITING_WRITERS(*inode)) {
if (!pipe->waiting_writers) {
/* syscall merging: Usually we must not sleep
* if O_NONBLOCK is set, or if we got some data.
* But if a writer sleeps in kernel space, then
@ -216,20 +218,22 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
}
if (signal_pending(current)) {
if (!ret) ret = -ERESTARTSYS;
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(inode);
pipe_wait(pipe);
}
mutex_unlock(PIPE_MUTEX(*inode));
/* Signal writers asynchronously that there is more room. */
mutex_unlock(&inode->i_mutex);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
file_accessed(filp);
@ -240,6 +244,7 @@ static ssize_t
pipe_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
{
struct iovec iov = { .iov_base = buf, .iov_len = count };
return pipe_readv(filp, &iov, 1, ppos);
}
@ -248,7 +253,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
unsigned long nr_segs, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
struct pipe_inode_info *info;
struct pipe_inode_info *pipe;
ssize_t ret;
int do_wakeup;
struct iovec *iov = (struct iovec *)_iov;
@ -262,10 +267,10 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
if (!PIPE_READERS(*inode)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
@ -273,23 +278,25 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
if (info->nrbufs && chars != 0) {
int lastbuf = (info->curbuf + info->nrbufs - 1) & (PIPE_BUFFERS-1);
struct pipe_buffer *buf = info->bufs + lastbuf;
if (pipe->nrbufs && chars != 0) {
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(PIPE_BUFFERS-1);
struct pipe_buffer *buf = pipe->bufs + lastbuf;
struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
void *addr;
int error;
addr = ops->map(filp, info, buf);
addr = ops->map(filp, pipe, buf);
if (IS_ERR(addr)) {
error = PTR_ERR(addr);
goto out;
}
error = pipe_iov_copy_from_user(offset + addr, iov,
chars);
ops->unmap(info, buf);
ops->unmap(pipe, buf);
ret = error;
do_wakeup = 1;
if (error)
@ -304,16 +311,18 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
for (;;) {
int bufs;
if (!PIPE_READERS(*inode)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret) ret = -EPIPE;
if (!ret)
ret = -EPIPE;
break;
}
bufs = info->nrbufs;
bufs = pipe->nrbufs;
if (bufs < PIPE_BUFFERS) {
int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS-1);
struct pipe_buffer *buf = info->bufs + newbuf;
struct page *page = info->tmp_page;
int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pipe->tmp_page;
int error;
if (!page) {
@ -322,9 +331,9 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
ret = ret ? : -ENOMEM;
break;
}
info->tmp_page = page;
pipe->tmp_page = page;
}
/* Always wakeup, even if the copy fails. Otherwise
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
@ -337,7 +346,8 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
error = pipe_iov_copy_from_user(kmap(page), iov, chars);
kunmap(page);
if (unlikely(error)) {
if (!ret) ret = -EFAULT;
if (!ret)
ret = -EFAULT;
break;
}
ret += chars;
@ -347,8 +357,8 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = chars;
info->nrbufs = ++bufs;
info->tmp_page = NULL;
pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
total_len -= chars;
if (!total_len)
@ -357,27 +367,29 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
if (bufs < PIPE_BUFFERS)
continue;
if (filp->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
if (!ret)
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
if (!ret) ret = -ERESTARTSYS;
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
PIPE_WAITING_WRITERS(*inode)++;
pipe_wait(inode);
PIPE_WAITING_WRITERS(*inode)--;
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
out:
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
file_update_time(filp);
@ -389,6 +401,7 @@ pipe_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
return pipe_writev(filp, &iov, 1, ppos);
}
@ -399,7 +412,8 @@ bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
}
static ssize_t
bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
loff_t *ppos)
{
return -EBADF;
}
@ -409,21 +423,22 @@ pipe_ioctl(struct inode *pino, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
struct pipe_inode_info *info;
struct pipe_inode_info *pipe;
int count, buf, nrbufs;
switch (cmd) {
case FIONREAD:
mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
count = 0;
buf = info->curbuf;
nrbufs = info->nrbufs;
buf = pipe->curbuf;
nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) {
count += info->bufs[buf].len;
count += pipe->bufs[buf].len;
buf = (buf+1) & (PIPE_BUFFERS-1);
}
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
return put_user(count, (int __user *)arg);
default:
return -EINVAL;
@ -436,17 +451,17 @@ pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
struct inode *inode = filp->f_dentry->d_inode;
struct pipe_inode_info *info = inode->i_pipe;
struct pipe_inode_info *pipe = inode->i_pipe;
int nrbufs;
poll_wait(filp, PIPE_WAIT(*inode), wait);
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
nrbufs = info->nrbufs;
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
if (!PIPE_WRITERS(*inode) && filp->f_version != PIPE_WCOUNTER(*inode))
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= POLLHUP;
}
@ -456,7 +471,7 @@ pipe_poll(struct file *filp, poll_table *wait)
* Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!PIPE_READERS(*inode))
if (!pipe->readers)
mask |= POLLERR;
}
@ -466,17 +481,21 @@ pipe_poll(struct file *filp, poll_table *wait)
static int
pipe_release(struct inode *inode, int decr, int decw)
{
mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode) -= decr;
PIPE_WRITERS(*inode) -= decw;
if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
struct pipe_inode_info *pipe;
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
pipe->readers -= decr;
pipe->writers -= decw;
if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
return 0;
}
@ -487,9 +506,9 @@ pipe_read_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
mutex_unlock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@ -504,9 +523,9 @@ pipe_write_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
mutex_unlock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@ -519,16 +538,17 @@ static int
pipe_rdwr_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = filp->f_dentry->d_inode;
struct pipe_inode_info *pipe = inode->i_pipe;
int retval;
mutex_lock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
if (retval >= 0)
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
mutex_unlock(PIPE_MUTEX(*inode));
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@ -567,9 +587,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
{
/* We could have perhaps used atomic_t, but this and friends
below are the only places. So it doesn't seem worthwhile. */
mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode)++;
mutex_unlock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
inode->i_pipe->readers++;
mutex_unlock(&inode->i_mutex);
return 0;
}
@ -577,9 +597,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
static int
pipe_write_open(struct inode *inode, struct file *filp)
{
mutex_lock(PIPE_MUTEX(*inode));
PIPE_WRITERS(*inode)++;
mutex_unlock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
inode->i_pipe->writers++;
mutex_unlock(&inode->i_mutex);
return 0;
}
@ -587,12 +607,12 @@ pipe_write_open(struct inode *inode, struct file *filp)
static int
pipe_rdwr_open(struct inode *inode, struct file *filp)
{
mutex_lock(PIPE_MUTEX(*inode));
mutex_lock(&inode->i_mutex);
if (filp->f_mode & FMODE_READ)
PIPE_READERS(*inode)++;
inode->i_pipe->readers++;
if (filp->f_mode & FMODE_WRITE)
PIPE_WRITERS(*inode)++;
mutex_unlock(PIPE_MUTEX(*inode));
inode->i_pipe->writers++;
mutex_unlock(&inode->i_mutex);
return 0;
}
@ -675,37 +695,38 @@ static struct file_operations rdwr_pipe_fops = {
.fasync = pipe_rdwr_fasync,
};
void free_pipe_info(struct inode *inode)
struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
{
int i;
struct pipe_inode_info *info = inode->i_pipe;
struct pipe_inode_info *pipe;
inode->i_pipe = NULL;
for (i = 0; i < PIPE_BUFFERS; i++) {
struct pipe_buffer *buf = info->bufs + i;
if (buf->ops)
buf->ops->release(info, buf);
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->inode = inode;
}
if (info->tmp_page)
__free_page(info->tmp_page);
kfree(info);
return pipe;
}
struct inode* pipe_new(struct inode* inode)
void __free_pipe_info(struct pipe_inode_info *pipe)
{
struct pipe_inode_info *info;
int i;
info = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (!info)
goto fail_page;
inode->i_pipe = info;
for (i = 0; i < PIPE_BUFFERS; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
buf->ops->release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe);
}
init_waitqueue_head(PIPE_WAIT(*inode));
PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1;
return inode;
fail_page:
return NULL;
void free_pipe_info(struct inode *inode)
{
__free_pipe_info(inode->i_pipe);
inode->i_pipe = NULL;
}
static struct vfsmount *pipe_mnt __read_mostly;
@ -713,6 +734,7 @@ static int pipefs_delete_dentry(struct dentry *dentry)
{
return 1;
}
static struct dentry_operations pipefs_dentry_operations = {
.d_delete = pipefs_delete_dentry,
};
@ -720,13 +742,17 @@ static struct dentry_operations pipefs_dentry_operations = {
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode(pipe_mnt->mnt_sb);
struct pipe_inode_info *pipe;
if (!inode)
goto fail_inode;
if(!pipe_new(inode))
pipe = alloc_pipe_info(inode);
if (!pipe)
goto fail_iput;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
inode->i_pipe = pipe;
pipe->readers = pipe->writers = 1;
inode->i_fop = &rdwr_pipe_fops;
/*
@ -741,10 +767,12 @@ static struct inode * get_pipe_inode(void)
inode->i_gid = current->fsgid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_blksize = PAGE_SIZE;
return inode;
fail_iput:
iput(inode);
fail_inode:
return NULL;
}
@ -757,7 +785,7 @@ int do_pipe(int *fd)
struct inode * inode;
struct file *f1, *f2;
int error;
int i,j;
int i, j;
error = -ENFILE;
f1 = get_empty_filp();
@ -790,6 +818,7 @@ int do_pipe(int *fd)
dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
if (!dentry)
goto close_f12_inode_i_j;
dentry->d_op = &pipefs_dentry_operations;
d_add(dentry, inode);
f1->f_vfsmnt = f2->f_vfsmnt = mntget(mntget(pipe_mnt));
@ -813,6 +842,7 @@ int do_pipe(int *fd)
fd_install(j, f2);
fd[0] = i;
fd[1] = j;
return 0;
close_f12_inode_i_j:
@ -837,8 +867,9 @@ no_files:
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
static struct super_block *pipefs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
static struct super_block *
pipefs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
}
@ -852,6 +883,7 @@ static struct file_system_type pipe_fs_type = {
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
if (!err) {
pipe_mnt = kern_mount(&pipe_fs_type);
if (IS_ERR(pipe_mnt)) {

View file

@ -202,7 +202,7 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
goto Einval;
inode = file->f_dentry->d_inode;
if (inode->i_flock && MANDATORY_LOCK(inode)) {
if (unlikely(inode->i_flock && MANDATORY_LOCK(inode))) {
int retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count);

View file

@ -9,11 +9,12 @@
* that transfers data buffers to or from a pipe buffer.
*
* Named by Larry McVoy, original implementation from Linus, extended by
* Jens to support splicing to files and fixing the initial implementation
* bugs.
* Jens to support splicing to files, network, direct splicing, etc and
* fixing lots of bugs.
*
* Copyright (C) 2005 Jens Axboe <axboe@suse.de>
* Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
* Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
* Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
* Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
*
*/
#include <linux/fs.h>
@ -84,26 +85,43 @@ static void *page_cache_pipe_buf_map(struct file *file,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
lock_page(page);
int err;
if (!PageUptodate(page)) {
lock_page(page);
/*
* Page got truncated/unhashed. This will cause a 0-byte
* splice, if this is the first page.
*/
if (!page->mapping) {
err = -ENODATA;
goto error;
}
/*
* Uh oh, read-error from disk.
*/
if (!PageUptodate(page)) {
err = -EIO;
goto error;
}
/*
* Page is ok afterall, fall through to mapping.
*/
unlock_page(page);
return ERR_PTR(-EIO);
}
if (!page->mapping) {
unlock_page(page);
return ERR_PTR(-ENODATA);
}
return kmap(buf->page);
return kmap(page);
error:
unlock_page(page);
return ERR_PTR(err);
}
static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
unlock_page(buf->page);
kunmap(buf->page);
}
@ -119,34 +137,30 @@ static struct pipe_buf_operations page_cache_pipe_buf_ops = {
* Pipe output worker. This sets up our pipe format with the page cache
* pipe buffer operations. Otherwise very similar to the regular pipe_writev().
*/
static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
int nr_pages, unsigned long offset,
unsigned long len, unsigned int flags)
{
struct pipe_inode_info *info;
int ret, do_wakeup, i;
ret = 0;
do_wakeup = 0;
i = 0;
mutex_lock(PIPE_MUTEX(*inode));
if (pipe->inode)
mutex_lock(&pipe->inode->i_mutex);
info = inode->i_pipe;
for (;;) {
int bufs;
if (!PIPE_READERS(*inode)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
bufs = info->nrbufs;
if (bufs < PIPE_BUFFERS) {
int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
struct pipe_buffer *buf = info->bufs + newbuf;
if (pipe->nrbufs < PIPE_BUFFERS) {
int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pages[i++];
unsigned long this_len;
@ -158,8 +172,9 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
buf->offset = offset;
buf->len = this_len;
buf->ops = &page_cache_pipe_buf_ops;
info->nrbufs = ++bufs;
do_wakeup = 1;
pipe->nrbufs++;
if (pipe->inode)
do_wakeup = 1;
ret += this_len;
len -= this_len;
@ -168,7 +183,7 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
break;
if (!len)
break;
if (bufs < PIPE_BUFFERS)
if (pipe->nrbufs < PIPE_BUFFERS)
continue;
break;
@ -187,22 +202,26 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
}
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
POLL_IN);
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
PIPE_WAITING_WRITERS(*inode)++;
pipe_wait(inode);
PIPE_WAITING_WRITERS(*inode)--;
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
mutex_unlock(PIPE_MUTEX(*inode));
if (pipe->inode)
mutex_unlock(&pipe->inode->i_mutex);
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
while (i < nr_pages)
@ -211,15 +230,16 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
return ret;
}
static int __generic_file_splice_read(struct file *in, struct inode *pipe,
size_t len, unsigned int flags)
static int
__generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
unsigned int offset, nr_pages;
struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
struct page *pages[PIPE_BUFFERS];
struct page *page;
pgoff_t index, pidx;
int i, j;
pgoff_t index;
int i, error;
index = in->f_pos >> PAGE_CACHE_SHIFT;
offset = in->f_pos & ~PAGE_CACHE_MASK;
@ -229,78 +249,94 @@ static int __generic_file_splice_read(struct file *in, struct inode *pipe,
nr_pages = PIPE_BUFFERS;
/*
* initiate read-ahead on this page range
* Initiate read-ahead on this page range. however, don't call into
* read-ahead if this is a non-zero offset (we are likely doing small
* chunk splice and the page is already there) for a single page.
*/
do_page_cache_readahead(mapping, in, index, nr_pages);
if (!offset || nr_pages > 1)
do_page_cache_readahead(mapping, in, index, nr_pages);
/*
* Get as many pages from the page cache as possible..
* Start IO on the page cache entries we create (we
* can assume that any pre-existing ones we find have
* already had IO started on them).
* Now fill in the holes:
*/
i = find_get_pages(mapping, index, nr_pages, pages);
/*
* common case - we found all pages and they are contiguous,
* kick them off
*/
if (i && (pages[i - 1]->index == index + i - 1))
goto splice_them;
/*
* fill shadow[] with pages at the right locations, so we only
* have to fill holes
*/
memset(shadow, 0, nr_pages * sizeof(struct page *));
for (j = 0; j < i; j++)
shadow[pages[j]->index - index] = pages[j];
/*
* now fill in the holes
*/
for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
int error;
if (shadow[i])
continue;
error = 0;
for (i = 0; i < nr_pages; i++, index++) {
find_page:
/*
* no page there, look one up / create it
* lookup the page for this index
*/
page = find_or_create_page(mapping, pidx,
mapping_gfp_mask(mapping));
if (!page)
break;
page = find_get_page(mapping, index);
if (!page) {
/*
* If in nonblock mode then dont block on
* readpage (we've kicked readahead so there
* will be asynchronous progress):
*/
if (flags & SPLICE_F_NONBLOCK)
break;
if (PageUptodate(page))
unlock_page(page);
else {
error = mapping->a_ops->readpage(in, page);
/*
* page didn't exist, allocate one
*/
page = page_cache_alloc_cold(mapping);
if (!page)
break;
error = add_to_page_cache_lru(page, mapping, index,
mapping_gfp_mask(mapping));
if (unlikely(error)) {
page_cache_release(page);
break;
}
goto readpage;
}
shadow[i] = page;
/*
* If the page isn't uptodate, we may need to start io on it
*/
if (!PageUptodate(page)) {
lock_page(page);
/*
* page was truncated, stop here. if this isn't the
* first page, we'll just complete what we already
* added
*/
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
break;
}
/*
* page was already under io and is now done, great
*/
if (PageUptodate(page)) {
unlock_page(page);
goto fill_it;
}
readpage:
/*
* need to read in the page
*/
error = mapping->a_ops->readpage(in, page);
if (unlikely(error)) {
page_cache_release(page);
if (error == AOP_TRUNCATED_PAGE)
goto find_page;
break;
}
}
fill_it:
pages[i] = page;
}
if (!i) {
for (i = 0; i < nr_pages; i++) {
if (shadow[i])
page_cache_release(shadow[i]);
}
return 0;
}
if (i)
return move_to_pipe(pipe, pages, i, offset, len, flags);
memcpy(pages, shadow, i * sizeof(struct page *));
/*
* Now we splice them into the pipe..
*/
splice_them:
return move_to_pipe(pipe, pages, i, offset, len, flags);
return error;
}
/**
@ -311,9 +347,8 @@ splice_them:
* @flags: splice modifier flags
*
* Will read pages from given file and fill them into a pipe.
*
*/
ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
ssize_t spliced;
@ -321,6 +356,7 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
ret = 0;
spliced = 0;
while (len) {
ret = __generic_file_splice_read(in, pipe, len, flags);
@ -360,10 +396,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
int more;
/*
* sub-optimal, but we are limited by the pipe ->map. we don't
* Sub-optimal, but we are limited by the pipe ->map. We don't
* need a kmap'ed buffer here, we just want to make sure we
* have the page pinned if the pipe page originates from the
* page cache
* page cache.
*/
ptr = buf->ops->map(file, info, buf);
if (IS_ERR(ptr))
@ -414,7 +450,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
int ret;
/*
* after this, page will be locked and unmapped
* make sure the data in this buffer is uptodate
*/
src = buf->ops->map(file, info, buf);
if (IS_ERR(src))
@ -424,7 +460,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
offset = sd->pos & ~PAGE_CACHE_MASK;
/*
* reuse buf page, if SPLICE_F_MOVE is set
* Reuse buf page, if SPLICE_F_MOVE is set.
*/
if (sd->flags & SPLICE_F_MOVE) {
/*
@ -434,6 +470,9 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
if (buf->ops->steal(info, buf))
goto find_page;
/*
* this will also set the page locked
*/
page = buf->page;
if (add_to_page_cache(page, mapping, index, gfp_mask))
goto find_page;
@ -445,7 +484,7 @@ find_page:
ret = -ENOMEM;
page = find_or_create_page(mapping, index, gfp_mask);
if (!page)
goto out;
goto out_nomem;
/*
* If the page is uptodate, it is also locked. If it isn't
@ -462,7 +501,7 @@ find_page:
if (!PageUptodate(page)) {
/*
* page got invalidated, repeat
* Page got invalidated, repeat.
*/
if (!page->mapping) {
unlock_page(page);
@ -501,12 +540,14 @@ find_page:
} else if (ret)
goto out;
mark_page_accessed(page);
balance_dirty_pages_ratelimited(mapping);
out:
if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
page_cache_release(page);
unlock_page(page);
}
out_nomem:
buf->ops->unmap(info, buf);
return ret;
}
@ -519,11 +560,10 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
* key here is the 'actor' worker passed in that actually moves the data
* to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
*/
static ssize_t move_from_pipe(struct inode *inode, struct file *out,
static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
size_t len, unsigned int flags,
splice_actor *actor)
{
struct pipe_inode_info *info;
int ret, do_wakeup, err;
struct splice_desc sd;
@ -535,22 +575,19 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
sd.file = out;
sd.pos = out->f_pos;
mutex_lock(PIPE_MUTEX(*inode));
if (pipe->inode)
mutex_lock(&pipe->inode->i_mutex);
info = inode->i_pipe;
for (;;) {
int bufs = info->nrbufs;
if (bufs) {
int curbuf = info->curbuf;
struct pipe_buffer *buf = info->bufs + curbuf;
if (pipe->nrbufs) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
struct pipe_buf_operations *ops = buf->ops;
sd.len = buf->len;
if (sd.len > sd.total_len)
sd.len = sd.total_len;
err = actor(info, buf, &sd);
err = actor(pipe, buf, &sd);
if (err) {
if (!ret && err != -ENODATA)
ret = err;
@ -561,13 +598,14 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
ret += sd.len;
buf->offset += sd.len;
buf->len -= sd.len;
if (!buf->len) {
buf->ops = NULL;
ops->release(info, buf);
curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
info->curbuf = curbuf;
info->nrbufs = --bufs;
do_wakeup = 1;
ops->release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
pipe->nrbufs--;
if (pipe->inode)
do_wakeup = 1;
}
sd.pos += sd.len;
@ -576,11 +614,11 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
break;
}
if (bufs)
if (pipe->nrbufs)
continue;
if (!PIPE_WRITERS(*inode))
if (!pipe->writers)
break;
if (!PIPE_WAITING_WRITERS(*inode)) {
if (!pipe->waiting_writers) {
if (ret)
break;
}
@ -598,31 +636,34 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
}
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
do_wakeup = 0;
}
pipe_wait(inode);
pipe_wait(pipe);
}
mutex_unlock(PIPE_MUTEX(*inode));
if (pipe->inode)
mutex_unlock(&pipe->inode->i_mutex);
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
mutex_lock(&out->f_mapping->host->i_mutex);
out->f_pos = sd.pos;
mutex_unlock(&out->f_mapping->host->i_mutex);
return ret;
}
/**
* generic_file_splice_write - splice data from a pipe to a file
* @inode: pipe inode
* @pipe: pipe info
* @out: file to write to
* @len: number of bytes to splice
* @flags: splice modifier flags
@ -631,14 +672,17 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
* the given pipe inode to the given file.
*
*/
ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
size_t len, unsigned int flags)
ssize_t
generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
size_t len, unsigned int flags)
{
struct address_space *mapping = out->f_mapping;
ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
ssize_t ret;
ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
/*
* if file or inode is SYNC and we actually wrote some data, sync it
* If file or inode is SYNC and we actually wrote some data, sync it.
*/
if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
&& ret > 0) {
@ -647,7 +691,7 @@ ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
mutex_lock(&inode->i_mutex);
err = generic_osync_inode(mapping->host, mapping,
OSYNC_METADATA|OSYNC_DATA);
OSYNC_METADATA|OSYNC_DATA);
mutex_unlock(&inode->i_mutex);
if (err)
@ -670,10 +714,10 @@ EXPORT_SYMBOL(generic_file_splice_write);
* is involved.
*
*/
ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
size_t len, unsigned int flags)
{
return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
}
EXPORT_SYMBOL(generic_splice_sendpage);
@ -681,19 +725,20 @@ EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
unsigned int flags)
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
size_t len, unsigned int flags)
{
loff_t pos;
int ret;
if (!out->f_op || !out->f_op->splice_write)
if (unlikely(!out->f_op || !out->f_op->splice_write))
return -EINVAL;
if (!(out->f_mode & FMODE_WRITE))
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
pos = out->f_pos;
ret = rw_verify_area(WRITE, out, &pos, len);
if (unlikely(ret < 0))
return ret;
@ -704,19 +749,20 @@ static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
/*
* Attempt to initiate a splice from a file to a pipe.
*/
static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
unsigned int flags)
static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
loff_t pos, isize, left;
int ret;
if (!in->f_op || !in->f_op->splice_read)
if (unlikely(!in->f_op || !in->f_op->splice_read))
return -EINVAL;
if (!(in->f_mode & FMODE_READ))
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
pos = in->f_pos;
ret = rw_verify_area(READ, in, &pos, len);
if (unlikely(ret < 0))
return ret;
@ -726,32 +772,168 @@ static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
return 0;
left = isize - in->f_pos;
if (left < len)
if (unlikely(left < len))
len = left;
return in->f_op->splice_read(in, pipe, len, flags);
}
long do_splice_direct(struct file *in, struct file *out, size_t len,
unsigned int flags)
{
struct pipe_inode_info *pipe;
long ret, bytes;
umode_t i_mode;
int i;
/*
* We require the input being a regular file, as we don't want to
* randomly drop data for eg socket -> socket splicing. Use the
* piped splicing for that!
*/
i_mode = in->f_dentry->d_inode->i_mode;
if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
return -EINVAL;
/*
* neither in nor out is a pipe, setup an internal pipe attached to
* 'out' and transfer the wanted data from 'in' to 'out' through that
*/
pipe = current->splice_pipe;
if (unlikely(!pipe)) {
pipe = alloc_pipe_info(NULL);
if (!pipe)
return -ENOMEM;
/*
* We don't have an immediate reader, but we'll read the stuff
* out of the pipe right after the move_to_pipe(). So set
* PIPE_READERS appropriately.
*/
pipe->readers = 1;
current->splice_pipe = pipe;
}
/*
* Do the splice.
*/
ret = 0;
bytes = 0;
while (len) {
size_t read_len, max_read_len;
/*
* Do at most PIPE_BUFFERS pages worth of transfer:
*/
max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
ret = do_splice_to(in, pipe, max_read_len, flags);
if (unlikely(ret < 0))
goto out_release;
read_len = ret;
/*
* NOTE: nonblocking mode only applies to the input. We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe:
*/
ret = do_splice_from(pipe, out, read_len,
flags & ~SPLICE_F_NONBLOCK);
if (unlikely(ret < 0))
goto out_release;
bytes += ret;
len -= ret;
/*
* In nonblocking mode, if we got back a short read then
* that was due to either an IO error or due to the
* pagecache entry not being there. In the IO error case
* the _next_ splice attempt will produce a clean IO error
* return value (not a short read), so in both cases it's
* correct to break out of the loop here:
*/
if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
break;
}
pipe->nrbufs = pipe->curbuf = 0;
return bytes;
out_release:
/*
* If we did an incomplete transfer we must release
* the pipe buffers in question:
*/
for (i = 0; i < PIPE_BUFFERS; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops) {
buf->ops->release(pipe, buf);
buf->ops = NULL;
}
}
pipe->nrbufs = pipe->curbuf = 0;
/*
* If we transferred some data, return the number of bytes:
*/
if (bytes > 0)
return bytes;
return ret;
}
EXPORT_SYMBOL(do_splice_direct);
/*
* Determine where to splice to/from.
*/
static long do_splice(struct file *in, struct file *out, size_t len,
unsigned int flags)
static long do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
struct inode *pipe;
struct pipe_inode_info *pipe;
pipe = in->f_dentry->d_inode->i_pipe;
if (pipe) {
if (off_in)
return -ESPIPE;
if (off_out) {
if (out->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&out->f_pos, off_out,
sizeof(loff_t)))
return -EFAULT;
}
pipe = in->f_dentry->d_inode;
if (pipe->i_pipe)
return do_splice_from(pipe, out, len, flags);
}
pipe = out->f_dentry->d_inode->i_pipe;
if (pipe) {
if (off_out)
return -ESPIPE;
if (off_in) {
if (in->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
return -EFAULT;
}
pipe = out->f_dentry->d_inode;
if (pipe->i_pipe)
return do_splice_to(in, pipe, len, flags);
}
return -EINVAL;
}
asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
long error;
struct file *in, *out;
@ -761,13 +943,15 @@ asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
return 0;
error = -EBADF;
in = fget_light(fdin, &fput_in);
in = fget_light(fd_in, &fput_in);
if (in) {
if (in->f_mode & FMODE_READ) {
out = fget_light(fdout, &fput_out);
out = fget_light(fd_out, &fput_out);
if (out) {
if (out->f_mode & FMODE_WRITE)
error = do_splice(in, out, len, flags);
error = do_splice(in, off_in,
out, off_out,
len, flags);
fput_light(out, fput_out);
}
}

View file

@ -252,7 +252,7 @@ xfs_file_sendfile_invis(
STATIC ssize_t
xfs_file_splice_read(
struct file *infilp,
struct inode *pipe,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
@ -266,7 +266,7 @@ xfs_file_splice_read(
STATIC ssize_t
xfs_file_splice_read_invis(
struct file *infilp,
struct inode *pipe,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
@ -279,7 +279,7 @@ xfs_file_splice_read_invis(
STATIC ssize_t
xfs_file_splice_write(
struct inode *pipe,
struct pipe_inode_info *pipe,
struct file *outfilp,
size_t len,
unsigned int flags)
@ -293,7 +293,7 @@ xfs_file_splice_write(
STATIC ssize_t
xfs_file_splice_write_invis(
struct inode *pipe,
struct pipe_inode_info *pipe,
struct file *outfilp,
size_t len,
unsigned int flags)

View file

@ -338,7 +338,7 @@ ssize_t
xfs_splice_read(
bhv_desc_t *bdp,
struct file *infilp,
struct inode *pipe,
struct pipe_inode_info *pipe,
size_t count,
int flags,
int ioflags,
@ -380,7 +380,7 @@ xfs_splice_read(
ssize_t
xfs_splice_write(
bhv_desc_t *bdp,
struct inode *pipe,
struct pipe_inode_info *pipe,
struct file *outfilp,
size_t count,
int flags,

View file

@ -94,9 +94,9 @@ extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
loff_t *, int, size_t, read_actor_t,
void *, struct cred *);
extern ssize_t xfs_splice_read(struct bhv_desc *, struct file *,
struct inode *, size_t, int, int,
struct pipe_inode_info *, size_t, int, int,
struct cred *);
extern ssize_t xfs_splice_write(struct bhv_desc *, struct inode *,
extern ssize_t xfs_splice_write(struct bhv_desc *, struct pipe_inode_info *,
struct file *, size_t, int, int,
struct cred *);

View file

@ -174,9 +174,9 @@ typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
loff_t *, int, size_t, read_actor_t,
void *, struct cred *);
typedef ssize_t (*vop_splice_read_t)(bhv_desc_t *, struct file *,
struct inode *, size_t, int, int,
struct pipe_inode_info *, size_t, int, int,
struct cred *);
typedef ssize_t (*vop_splice_write_t)(bhv_desc_t *, struct inode *,
typedef ssize_t (*vop_splice_write_t)(bhv_desc_t *, struct pipe_inode_info *,
struct file *, size_t, int, int,
struct cred *);
typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,

View file

@ -318,8 +318,8 @@
#define __NR_unshare 310
#define __NR_set_robust_list 311
#define __NR_get_robust_list 312
#define __NR_sys_splice 313
#define __NR_sys_sync_file_range 314
#define __NR_splice 313
#define __NR_sync_file_range 314
#define NR_syscalls 315

View file

@ -1039,8 +1039,8 @@ struct file_operations {
int (*check_flags)(int);
int (*dir_notify)(struct file *filp, unsigned long arg);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct inode *, struct file *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, struct inode *, size_t, unsigned int);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
};
struct inode_operations {
@ -1611,8 +1611,17 @@ extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
loff_t *, read_descriptor_t *, read_actor_t);
extern ssize_t generic_file_splice_read(struct file *, struct inode *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct inode *, struct file *, size_t, unsigned int);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, size_t len, unsigned int flags);
extern long do_splice_direct(struct file *in, struct file *out,
size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,

View file

@ -36,27 +36,19 @@ struct pipe_inode_info {
unsigned int w_counter;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct inode *inode;
};
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
#define PIPE_MUTEX(inode) (&(inode).i_mutex)
#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
#define PIPE_READERS(inode) ((inode).i_pipe->readers)
#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
#define PIPE_WAITING_WRITERS(inode) ((inode).i_pipe->waiting_writers)
#define PIPE_RCOUNTER(inode) ((inode).i_pipe->r_counter)
#define PIPE_WCOUNTER(inode) ((inode).i_pipe->w_counter)
#define PIPE_FASYNC_READERS(inode) (&((inode).i_pipe->fasync_readers))
#define PIPE_FASYNC_WRITERS(inode) (&((inode).i_pipe->fasync_writers))
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct inode * inode);
void pipe_wait(struct pipe_inode_info *pipe);
struct inode* pipe_new(struct inode* inode);
void free_pipe_info(struct inode* inode);
struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
void free_pipe_info(struct inode * inode);
void __free_pipe_info(struct pipe_inode_info *);
/*
* splice is tied to pipes as a transport (at least for now), so we'll just

View file

@ -684,6 +684,7 @@ static inline void prefetch_stack(struct task_struct *t) { }
struct audit_context; /* See audit.c */
struct mempolicy;
struct pipe_inode_info;
enum sleep_type {
SLEEP_NORMAL,
@ -882,6 +883,11 @@ struct task_struct {
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
};
static inline pid_t process_group(struct task_struct *tsk)

View file

@ -569,8 +569,11 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
int flags, int mode);
asmlinkage long sys_unshare(unsigned long unshare_flags);
asmlinkage long sys_splice(int fdin, int fdout, size_t len,
unsigned int flags);
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags);
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
unsigned int flags);

View file

@ -34,6 +34,7 @@
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/pipe_fs_i.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@ -941,6 +942,9 @@ fastcall NORET_TYPE void do_exit(long code)
if (tsk->io_context)
exit_io_context();
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
BUG_ON(tsk->flags & PF_DEAD);

View file

@ -119,10 +119,6 @@ static ssize_t sock_writev(struct file *file, const struct iovec *vector,
static ssize_t sock_sendpage(struct file *file, struct page *page,
int offset, size_t size, loff_t *ppos, int more);
extern ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
size_t len, unsigned int flags);
/*
* Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
* in the operation structures but are done directly via the socketcall() multiplexor.