get rid of s_files and files_lock

commit eee5cc2702929fd41cce28058dc6d6717f723f87 upstream.

The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
[wangkai: backport to 3.10: adjust context]
Signed-off-by: Wang Kai <morgan.wang@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Al Viro 2013-10-04 11:06:42 -04:00 committed by Greg Kroah-Hartman
parent 2a7abb5a0b
commit 68c8a7ae30
5 changed files with 2 additions and 162 deletions

View File

@ -36,8 +36,6 @@ struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
DEFINE_STATIC_LGLOCK(files_lglock);
/* SLAB cache for file structures */
static struct kmem_cache *filp_cachep __read_mostly;
@ -134,7 +132,6 @@ struct file *get_empty_filp(void)
return ERR_PTR(error);
}
INIT_LIST_HEAD(&f->f_u.fu_list);
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
@ -304,7 +301,6 @@ void fput(struct file *file)
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
file_sb_list_del(file);
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
init_task_work(&file->f_u.fu_rcuhead, ____fput);
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
@ -328,7 +324,6 @@ void __fput_sync(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
file_sb_list_del(file);
BUG_ON(!(task->flags & PF_KTHREAD));
__fput(file);
}
@ -340,127 +335,10 @@ void put_filp(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
security_file_free(file);
file_sb_list_del(file);
file_free(file);
}
}
static inline int file_list_cpu(struct file *file)
{
#ifdef CONFIG_SMP
return file->f_sb_list_cpu;
#else
return smp_processor_id();
#endif
}
/* helper for file_sb_list_add to reduce ifdefs */
static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
{
struct list_head *list;
#ifdef CONFIG_SMP
int cpu;
cpu = smp_processor_id();
file->f_sb_list_cpu = cpu;
list = per_cpu_ptr(sb->s_files, cpu);
#else
list = &sb->s_files;
#endif
list_add(&file->f_u.fu_list, list);
}
/**
* file_sb_list_add - add a file to the sb's file list
* @file: file to add
* @sb: sb to add it to
*
* Use this function to associate a file with the superblock of the inode it
* refers to.
*/
void file_sb_list_add(struct file *file, struct super_block *sb)
{
lg_local_lock(&files_lglock);
__file_sb_list_add(file, sb);
lg_local_unlock(&files_lglock);
}
/**
* file_sb_list_del - remove a file from the sb's file list
* @file: file to remove
* @sb: sb to remove it from
*
* Use this function to remove a file from its superblock.
*/
void file_sb_list_del(struct file *file)
{
if (!list_empty(&file->f_u.fu_list)) {
lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
list_del_init(&file->f_u.fu_list);
lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
}
}
#ifdef CONFIG_SMP
/*
* These macros iterate all files on all CPUs for a given superblock.
* files_lglock must be held globally.
*/
#define do_file_list_for_each_entry(__sb, __file) \
{ \
int i; \
for_each_possible_cpu(i) { \
struct list_head *list; \
list = per_cpu_ptr((__sb)->s_files, i); \
list_for_each_entry((__file), list, f_u.fu_list)
#define while_file_list_for_each_entry \
} \
}
#else
#define do_file_list_for_each_entry(__sb, __file) \
{ \
struct list_head *list; \
list = &(sb)->s_files; \
list_for_each_entry((__file), list, f_u.fu_list)
#define while_file_list_for_each_entry \
}
#endif
/**
* mark_files_ro - mark all files read-only
* @sb: superblock in question
*
* All files are marked read-only. We don't care about pending
* delete files so this should be used in 'force' mode only.
*/
void mark_files_ro(struct super_block *sb)
{
struct file *f;
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
if (!S_ISREG(file_inode(f)->i_mode))
continue;
if (!file_count(f))
continue;
if (!(f->f_mode & FMODE_WRITE))
continue;
spin_lock(&f->f_lock);
f->f_mode &= ~FMODE_WRITE;
spin_unlock(&f->f_lock);
if (file_check_writeable(f) != 0)
continue;
__mnt_drop_write(f->f_path.mnt);
file_release_write(f);
} while_file_list_for_each_entry;
lg_global_unlock(&files_lglock);
}
void __init files_init(unsigned long mempages)
{
unsigned long n;
@ -476,6 +354,5 @@ void __init files_init(unsigned long mempages)
n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
files_defer_init();
lg_lock_init(&files_lglock, "files_lglock");
percpu_counter_init(&nr_files, 0);
}

View File

@ -74,9 +74,6 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
/*
* file_table.c
*/
extern void file_sb_list_add(struct file *f, struct super_block *sb);
extern void file_sb_list_del(struct file *f);
extern void mark_files_ro(struct super_block *);
extern struct file *get_empty_filp(void);
/*

View File

@ -674,7 +674,6 @@ static int do_dentry_open(struct file *f,
}
f->f_mapping = inode->i_mapping;
file_sb_list_add(f, inode->i_sb);
if (unlikely(f->f_mode & FMODE_PATH)) {
f->f_op = &empty_fops;
@ -709,7 +708,6 @@ static int do_dentry_open(struct file *f,
cleanup_all:
fops_put(f->f_op);
file_sb_list_del(f);
if (f->f_mode & FMODE_WRITE) {
if (!special_file(inode->i_mode)) {
/*

View File

@ -163,19 +163,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s = NULL;
goto out;
}
#ifdef CONFIG_SMP
s->s_files = alloc_percpu(struct list_head);
if (!s->s_files)
goto err_out;
else {
int i;
for_each_possible_cpu(i)
INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
}
#else
INIT_LIST_HEAD(&s->s_files);
#endif
if (init_sb_writers(s, type))
goto err_out;
s->s_flags = flags;
@ -225,10 +212,6 @@ out:
return s;
err_out:
security_sb_free(s);
#ifdef CONFIG_SMP
if (s->s_files)
free_percpu(s->s_files);
#endif
destroy_sb_writers(s);
kfree(s);
s = NULL;
@ -243,9 +226,6 @@ err_out:
*/
static inline void destroy_super(struct super_block *s)
{
#ifdef CONFIG_SMP
free_percpu(s->s_files);
#endif
destroy_sb_writers(s);
security_sb_free(s);
WARN_ON(!list_empty(&s->s_mounts));
@ -727,7 +707,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
make sure there are no rw files opened */
if (remount_ro) {
if (force) {
mark_files_ro(sb);
sb->s_readonly_remount = 1;
smp_wmb();
} else {
retval = sb_prepare_remount_readonly(sb);
if (retval)

View File

@ -762,12 +762,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
#define FILE_MNT_WRITE_RELEASED 2
struct file {
/*
* fu_list becomes invalid after file_free is called and queued via
* fu_rcuhead for RCU freeing
*/
union {
struct list_head fu_list;
struct llist_node fu_llist;
struct rcu_head fu_rcuhead;
} f_u;
@ -781,9 +776,6 @@ struct file {
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
#ifdef CONFIG_SMP
int f_sb_list_cpu;
#endif
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@ -1259,11 +1251,6 @@ struct super_block {
struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
#ifdef CONFIG_SMP
struct list_head __percpu *s_files;
#else
struct list_head s_files;
#endif
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
struct list_head s_dentry_lru; /* unused dentry lru */