mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 10:33:27 +00:00
flow: structurize flow cache
Group all per-cpu data to one structure instead of having many globals. Also prepare the internals so that we can have multiple instances of the flow cache if needed. Only the kmem_cache is left as a global as all flow caches share the same element size, and benefit from using a common cache. Signed-off-by: Timo Teras <timo.teras@iki.fi> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ea2dea9dac
commit
d7997fe1f4
1 changed files with 118 additions and 103 deletions
221
net/core/flow.c
221
net/core/flow.c
|
@ -35,104 +35,105 @@ struct flow_cache_entry {
|
|||
atomic_t *object_ref;
|
||||
};
|
||||
|
||||
atomic_t flow_cache_genid = ATOMIC_INIT(0);
|
||||
|
||||
static u32 flow_hash_shift;
|
||||
#define flow_hash_size (1 << flow_hash_shift)
|
||||
static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
|
||||
|
||||
#define flow_table(cpu) (per_cpu(flow_tables, cpu))
|
||||
|
||||
static struct kmem_cache *flow_cachep __read_mostly;
|
||||
|
||||
static int flow_lwm, flow_hwm;
|
||||
|
||||
struct flow_percpu_info {
|
||||
int hash_rnd_recalc;
|
||||
u32 hash_rnd;
|
||||
int count;
|
||||
struct flow_cache_percpu {
|
||||
struct flow_cache_entry ** hash_table;
|
||||
int hash_count;
|
||||
u32 hash_rnd;
|
||||
int hash_rnd_recalc;
|
||||
struct tasklet_struct flush_tasklet;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
|
||||
|
||||
#define flow_hash_rnd_recalc(cpu) \
|
||||
(per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
|
||||
#define flow_hash_rnd(cpu) \
|
||||
(per_cpu(flow_hash_info, cpu).hash_rnd)
|
||||
#define flow_count(cpu) \
|
||||
(per_cpu(flow_hash_info, cpu).count)
|
||||
|
||||
static struct timer_list flow_hash_rnd_timer;
|
||||
|
||||
#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
|
||||
|
||||
struct flow_flush_info {
|
||||
atomic_t cpuleft;
|
||||
struct completion completion;
|
||||
struct flow_cache * cache;
|
||||
atomic_t cpuleft;
|
||||
struct completion completion;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
|
||||
|
||||
#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
|
||||
struct flow_cache {
|
||||
u32 hash_shift;
|
||||
unsigned long order;
|
||||
struct flow_cache_percpu * percpu;
|
||||
struct notifier_block hotcpu_notifier;
|
||||
int low_watermark;
|
||||
int high_watermark;
|
||||
struct timer_list rnd_timer;
|
||||
};
|
||||
|
||||
atomic_t flow_cache_genid = ATOMIC_INIT(0);
|
||||
static struct flow_cache flow_cache_global;
|
||||
static struct kmem_cache *flow_cachep;
|
||||
|
||||
#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
|
||||
#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
|
||||
|
||||
static void flow_cache_new_hashrnd(unsigned long arg)
|
||||
{
|
||||
struct flow_cache *fc = (void *) arg;
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
flow_hash_rnd_recalc(i) = 1;
|
||||
per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
|
||||
|
||||
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
||||
add_timer(&flow_hash_rnd_timer);
|
||||
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
||||
add_timer(&fc->rnd_timer);
|
||||
}
|
||||
|
||||
static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
|
||||
static void flow_entry_kill(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp,
|
||||
struct flow_cache_entry *fle)
|
||||
{
|
||||
if (fle->object)
|
||||
atomic_dec(fle->object_ref);
|
||||
kmem_cache_free(flow_cachep, fle);
|
||||
flow_count(cpu)--;
|
||||
fcp->hash_count--;
|
||||
}
|
||||
|
||||
static void __flow_cache_shrink(int cpu, int shrink_to)
|
||||
static void __flow_cache_shrink(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp,
|
||||
int shrink_to)
|
||||
{
|
||||
struct flow_cache_entry *fle, **flp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < flow_hash_size; i++) {
|
||||
for (i = 0; i < flow_cache_hash_size(fc); i++) {
|
||||
int k = 0;
|
||||
|
||||
flp = &flow_table(cpu)[i];
|
||||
flp = &fcp->hash_table[i];
|
||||
while ((fle = *flp) != NULL && k < shrink_to) {
|
||||
k++;
|
||||
flp = &fle->next;
|
||||
}
|
||||
while ((fle = *flp) != NULL) {
|
||||
*flp = fle->next;
|
||||
flow_entry_kill(cpu, fle);
|
||||
flow_entry_kill(fc, fcp, fle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void flow_cache_shrink(int cpu)
|
||||
static void flow_cache_shrink(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp)
|
||||
{
|
||||
int shrink_to = flow_lwm / flow_hash_size;
|
||||
int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
|
||||
|
||||
__flow_cache_shrink(cpu, shrink_to);
|
||||
__flow_cache_shrink(fc, fcp, shrink_to);
|
||||
}
|
||||
|
||||
static void flow_new_hash_rnd(int cpu)
|
||||
static void flow_new_hash_rnd(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp)
|
||||
{
|
||||
get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
|
||||
flow_hash_rnd_recalc(cpu) = 0;
|
||||
|
||||
__flow_cache_shrink(cpu, 0);
|
||||
get_random_bytes(&fcp->hash_rnd, sizeof(u32));
|
||||
fcp->hash_rnd_recalc = 0;
|
||||
__flow_cache_shrink(fc, fcp, 0);
|
||||
}
|
||||
|
||||
static u32 flow_hash_code(struct flowi *key, int cpu)
|
||||
static u32 flow_hash_code(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp,
|
||||
struct flowi *key)
|
||||
{
|
||||
u32 *k = (u32 *) key;
|
||||
|
||||
return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
|
||||
(flow_hash_size - 1));
|
||||
return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
|
||||
& (flow_cache_hash_size(fc) - 1));
|
||||
}
|
||||
|
||||
#if (BITS_PER_LONG == 64)
|
||||
|
@ -168,24 +169,25 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
|
|||
void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
|
||||
flow_resolve_t resolver)
|
||||
{
|
||||
struct flow_cache *fc = &flow_cache_global;
|
||||
struct flow_cache_percpu *fcp;
|
||||
struct flow_cache_entry *fle, **head;
|
||||
unsigned int hash;
|
||||
int cpu;
|
||||
|
||||
local_bh_disable();
|
||||
cpu = smp_processor_id();
|
||||
fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
|
||||
|
||||
fle = NULL;
|
||||
/* Packet really early in init? Making flow_cache_init a
|
||||
* pre-smp initcall would solve this. --RR */
|
||||
if (!flow_table(cpu))
|
||||
if (!fcp->hash_table)
|
||||
goto nocache;
|
||||
|
||||
if (flow_hash_rnd_recalc(cpu))
|
||||
flow_new_hash_rnd(cpu);
|
||||
hash = flow_hash_code(key, cpu);
|
||||
if (fcp->hash_rnd_recalc)
|
||||
flow_new_hash_rnd(fc, fcp);
|
||||
hash = flow_hash_code(fc, fcp, key);
|
||||
|
||||
head = &flow_table(cpu)[hash];
|
||||
head = &fcp->hash_table[hash];
|
||||
for (fle = *head; fle; fle = fle->next) {
|
||||
if (fle->family == family &&
|
||||
fle->dir == dir &&
|
||||
|
@ -204,8 +206,8 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
|
|||
}
|
||||
|
||||
if (!fle) {
|
||||
if (flow_count(cpu) > flow_hwm)
|
||||
flow_cache_shrink(cpu);
|
||||
if (fcp->hash_count > fc->high_watermark)
|
||||
flow_cache_shrink(fc, fcp);
|
||||
|
||||
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
|
||||
if (fle) {
|
||||
|
@ -215,7 +217,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
|
|||
fle->dir = dir;
|
||||
memcpy(&fle->key, key, sizeof(*key));
|
||||
fle->object = NULL;
|
||||
flow_count(cpu)++;
|
||||
fcp->hash_count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,14 +251,15 @@ nocache:
|
|||
static void flow_cache_flush_tasklet(unsigned long data)
|
||||
{
|
||||
struct flow_flush_info *info = (void *)data;
|
||||
struct flow_cache *fc = info->cache;
|
||||
struct flow_cache_percpu *fcp;
|
||||
int i;
|
||||
int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
for (i = 0; i < flow_hash_size; i++) {
|
||||
fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
|
||||
for (i = 0; i < flow_cache_hash_size(fc); i++) {
|
||||
struct flow_cache_entry *fle;
|
||||
|
||||
fle = flow_table(cpu)[i];
|
||||
fle = fcp->hash_table[i];
|
||||
for (; fle; fle = fle->next) {
|
||||
unsigned genid = atomic_read(&flow_cache_genid);
|
||||
|
||||
|
@ -272,7 +275,6 @@ static void flow_cache_flush_tasklet(unsigned long data)
|
|||
complete(&info->completion);
|
||||
}
|
||||
|
||||
static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
|
||||
static void flow_cache_flush_per_cpu(void *data)
|
||||
{
|
||||
struct flow_flush_info *info = data;
|
||||
|
@ -280,8 +282,7 @@ static void flow_cache_flush_per_cpu(void *data)
|
|||
struct tasklet_struct *tasklet;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
tasklet = flow_flush_tasklet(cpu);
|
||||
tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
|
||||
tasklet->data = (unsigned long)info;
|
||||
tasklet_schedule(tasklet);
|
||||
}
|
||||
|
@ -294,6 +295,7 @@ void flow_cache_flush(void)
|
|||
/* Don't want cpus going down or up during this. */
|
||||
get_online_cpus();
|
||||
mutex_lock(&flow_flush_sem);
|
||||
info.cache = &flow_cache_global;
|
||||
atomic_set(&info.cpuleft, num_online_cpus());
|
||||
init_completion(&info.completion);
|
||||
|
||||
|
@ -307,62 +309,75 @@ void flow_cache_flush(void)
|
|||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void __init flow_cache_cpu_prepare(int cpu)
|
||||
static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
|
||||
struct flow_cache_percpu *fcp)
|
||||
{
|
||||
struct tasklet_struct *tasklet;
|
||||
unsigned long order;
|
||||
fcp->hash_table = (struct flow_cache_entry **)
|
||||
__get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
|
||||
if (!fcp->hash_table)
|
||||
panic("NET: failed to allocate flow cache order %lu\n", fc->order);
|
||||
|
||||
for (order = 0;
|
||||
(PAGE_SIZE << order) <
|
||||
(sizeof(struct flow_cache_entry *)*flow_hash_size);
|
||||
order++)
|
||||
/* NOTHING */;
|
||||
|
||||
flow_table(cpu) = (struct flow_cache_entry **)
|
||||
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
|
||||
if (!flow_table(cpu))
|
||||
panic("NET: failed to allocate flow cache order %lu\n", order);
|
||||
|
||||
flow_hash_rnd_recalc(cpu) = 1;
|
||||
flow_count(cpu) = 0;
|
||||
|
||||
tasklet = flow_flush_tasklet(cpu);
|
||||
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
|
||||
fcp->hash_rnd_recalc = 1;
|
||||
fcp->hash_count = 0;
|
||||
tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
|
||||
}
|
||||
|
||||
static int flow_cache_cpu(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
|
||||
int cpu = (unsigned long) hcpu;
|
||||
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
|
||||
|
||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
|
||||
__flow_cache_shrink((unsigned long)hcpu, 0);
|
||||
__flow_cache_shrink(fc, fcp, 0);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init flow_cache_init(void)
|
||||
static int flow_cache_init(struct flow_cache *fc)
|
||||
{
|
||||
unsigned long order;
|
||||
int i;
|
||||
|
||||
flow_cachep = kmem_cache_create("flow_cache",
|
||||
sizeof(struct flow_cache_entry),
|
||||
0, SLAB_PANIC,
|
||||
NULL);
|
||||
flow_hash_shift = 10;
|
||||
flow_lwm = 2 * flow_hash_size;
|
||||
flow_hwm = 4 * flow_hash_size;
|
||||
fc->hash_shift = 10;
|
||||
fc->low_watermark = 2 * flow_cache_hash_size(fc);
|
||||
fc->high_watermark = 4 * flow_cache_hash_size(fc);
|
||||
|
||||
setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
|
||||
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
||||
add_timer(&flow_hash_rnd_timer);
|
||||
for (order = 0;
|
||||
(PAGE_SIZE << order) <
|
||||
(sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc));
|
||||
order++)
|
||||
/* NOTHING */;
|
||||
fc->order = order;
|
||||
fc->percpu = alloc_percpu(struct flow_cache_percpu);
|
||||
|
||||
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
|
||||
(unsigned long) fc);
|
||||
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
|
||||
add_timer(&fc->rnd_timer);
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
flow_cache_cpu_prepare(i);
|
||||
flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
|
||||
|
||||
fc->hotcpu_notifier = (struct notifier_block){
|
||||
.notifier_call = flow_cache_cpu,
|
||||
};
|
||||
register_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||
|
||||
hotcpu_notifier(flow_cache_cpu, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(flow_cache_init);
|
||||
static int __init flow_cache_init_global(void)
|
||||
{
|
||||
flow_cachep = kmem_cache_create("flow_cache",
|
||||
sizeof(struct flow_cache_entry),
|
||||
0, SLAB_PANIC, NULL);
|
||||
|
||||
return flow_cache_init(&flow_cache_global);
|
||||
}
|
||||
|
||||
module_init(flow_cache_init_global);
|
||||
|
||||
EXPORT_SYMBOL(flow_cache_genid);
|
||||
EXPORT_SYMBOL(flow_cache_lookup);
|
||||
|
|
Loading…
Reference in a new issue