Revert "vmstat: on-demand vmstat workers V8"

This reverts commit d75907c9fa.

This patch is one among the 6 patches that were initially picked
to fix an issue where tasks were getting blocked in reclaim path.
But these patches are found to cause cpu wakeups.

Change-Id: I4c76dff00e5728f16a8fb0bda2529fd2bfd837d7
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Vinayak Menon 2015-03-26 15:37:18 +05:30
parent 698e00109f
commit 7bdbf805b6

View file

@ -7,7 +7,6 @@
* zoned VM statistics
* Copyright (C) 2006 Silicon Graphics, Inc.,
* Christoph Lameter <christoph@lameter.com>
* Copyright (C) 2008-2014 Christoph Lameter
*/
#include <linux/fs.h>
#include <linux/mm.h>
@ -15,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/vmstat.h>
#include <linux/sched.h>
#include <linux/math64.h>
@ -419,22 +417,13 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
EXPORT_SYMBOL(dec_zone_page_state);
#endif
/*
* Fold a differential into the global counters.
* Returns the number of counters updated.
*/
static int fold_diff(int *diff)
static inline void fold_diff(int *diff)
{
int i;
int changes = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (diff[i]) {
if (diff[i])
atomic_long_add(diff[i], &vm_stat[i]);
changes++;
}
return changes;
}
/*
@ -450,15 +439,12 @@ static int fold_diff(int *diff)
* statistics in the remote zone struct as well as the global cachelines
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*
* The function returns the number of global counters updated.
*/
static int refresh_cpu_vm_stats(void)
static void refresh_cpu_vm_stats(void)
{
struct zone *zone;
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
int changes = 0;
for_each_populated_zone(zone) {
struct per_cpu_pageset __percpu *p = zone->pageset;
@ -498,17 +484,15 @@ static int refresh_cpu_vm_stats(void)
continue;
}
if (__this_cpu_dec_return(p->expire))
continue;
if (__this_cpu_read(p->pcp.count)) {
drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
changes++;
}
if (__this_cpu_read(p->pcp.count))
drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
#endif
}
changes += fold_diff(global_diff);
return changes;
fold_diff(global_diff);
}
/*
@ -1318,108 +1302,20 @@ static const struct file_operations proc_vmstat_file_operations = {
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
static cpumask_var_t cpu_stat_off;
static void vmstat_update(struct work_struct *w)
{
if (refresh_cpu_vm_stats())
/*
* Counters were updated so we expect more updates
* to occur in the future. Keep on running the
* update worker thread.
*/
schedule_delayed_work(this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
else {
/*
* We did not update any counters so the app may be in
* a mode where it does not cause counter updates.
* We may be uselessly running vmstat_update.
* Defer the checking for differentials to the
* shepherd thread on a different processor.
*/
int r;
/*
* Shepherd work thread does not race since it never
* changes the bit if its zero but the cpu
* online / off line code may race if
* worker threads are still allowed during
* shutdown / startup.
*/
r = cpumask_test_and_set_cpu(smp_processor_id(),
cpu_stat_off);
VM_BUG_ON(r);
}
}
/*
* Check if the diffs for a certain cpu indicate that
* an update is needed.
*/
static bool need_update(int cpu)
{
struct zone *zone;
for_each_populated_zone(zone) {
struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
/*
* The fast way of checking if there are any vmstat diffs.
* This works because the diffs are byte sized items.
*/
if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
return true;
}
return false;
}
/*
* Shepherd worker thread that checks the
* differentials of processors that have their worker
* threads for vm statistics updates disabled because of
* inactivity.
*/
static void vmstat_shepherd(struct work_struct *w);
static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
static void vmstat_shepherd(struct work_struct *w)
{
int cpu;
get_online_cpus();
/* Check processors whose vmstat worker threads have been disabled */
for_each_cpu(cpu, cpu_stat_off)
if (need_update(cpu) &&
cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu),
__round_jiffies_relative(sysctl_stat_interval, cpu));
put_online_cpus();
schedule_delayed_work(&shepherd,
refresh_cpu_vm_stats();
schedule_delayed_work(&__get_cpu_var(vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
static void __init start_shepherd_timer(void)
static void __cpuinit start_cpu_timer(int cpu)
{
int cpu;
struct delayed_work *work = &per_cpu(vmstat_work, cpu);
for_each_possible_cpu(cpu)
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
BUG();
cpumask_copy(cpu_stat_off, cpu_online_mask);
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
INIT_DEFERRABLE_WORK(work, vmstat_update);
schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*
@ -1436,17 +1332,17 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
refresh_zone_stat_thresholds();
start_cpu_timer(cpu);
node_set_state(cpu_to_node(cpu), N_CPU);
cpumask_set_cpu(cpu, cpu_stat_off);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
cpumask_clear_cpu(cpu, cpu_stat_off);
per_cpu(vmstat_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
cpumask_set_cpu(cpu, cpu_stat_off);
start_cpu_timer(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
@ -1465,10 +1361,13 @@ static struct notifier_block __cpuinitdata vmstat_notifier =
static int __init setup_vmstat(void)
{
#ifdef CONFIG_SMP
int cpu;
cpu_notifier_register_begin();
__register_cpu_notifier(&vmstat_notifier);
start_shepherd_timer();
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
cpu_notifier_register_done();
#endif
#ifdef CONFIG_PROC_FS