mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
netfilter: xtables: stackptr should be percpu
commit f3c5c1bfd4
(netfilter: xtables: make ip_tables reentrant)
introduced a performance regression, because stackptr array is shared by
all cpus, adding cache line ping pongs. (16 cpus share a 64 bytes cache
line)
Fix this using alloc_percpu()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-By: Jan Engelhardt <jengelh@medozas.de>
Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
parent
c936e8bd1d
commit
7489aec8ee
4 changed files with 6 additions and 13 deletions
|
@ -397,7 +397,7 @@ struct xt_table_info {
|
||||||
* @stacksize jumps (number of user chains) can possibly be made.
|
* @stacksize jumps (number of user chains) can possibly be made.
|
||||||
*/
|
*/
|
||||||
unsigned int stacksize;
|
unsigned int stacksize;
|
||||||
unsigned int *stackptr;
|
unsigned int __percpu *stackptr;
|
||||||
void ***jumpstack;
|
void ***jumpstack;
|
||||||
/* ipt_entry tables: one per CPU */
|
/* ipt_entry tables: one per CPU */
|
||||||
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
||||||
|
|
|
@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
table_base = private->entries[cpu];
|
table_base = private->entries[cpu];
|
||||||
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
||||||
stackptr = &private->stackptr[cpu];
|
stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||||
origptr = *stackptr;
|
origptr = *stackptr;
|
||||||
|
|
||||||
e = get_entry(table_base, private->hook_entry[hook]);
|
e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
|
|
|
@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
table_base = private->entries[cpu];
|
table_base = private->entries[cpu];
|
||||||
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
|
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
|
||||||
stackptr = &private->stackptr[cpu];
|
stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||||
origptr = *stackptr;
|
origptr = *stackptr;
|
||||||
|
|
||||||
e = get_entry(table_base, private->hook_entry[hook]);
|
e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
|
|
|
@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
|
||||||
vfree(info->jumpstack);
|
vfree(info->jumpstack);
|
||||||
else
|
else
|
||||||
kfree(info->jumpstack);
|
kfree(info->jumpstack);
|
||||||
if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
|
|
||||||
vfree(info->stackptr);
|
free_percpu(info->stackptr);
|
||||||
else
|
|
||||||
kfree(info->stackptr);
|
|
||||||
|
|
||||||
kfree(info);
|
kfree(info);
|
||||||
}
|
}
|
||||||
|
@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
size = sizeof(unsigned int) * nr_cpu_ids;
|
i->stackptr = alloc_percpu(unsigned int);
|
||||||
if (size > PAGE_SIZE)
|
|
||||||
i->stackptr = vmalloc(size);
|
|
||||||
else
|
|
||||||
i->stackptr = kmalloc(size, GFP_KERNEL);
|
|
||||||
if (i->stackptr == NULL)
|
if (i->stackptr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(i->stackptr, 0, size);
|
|
||||||
|
|
||||||
size = sizeof(void **) * nr_cpu_ids;
|
size = sizeof(void **) * nr_cpu_ids;
|
||||||
if (size > PAGE_SIZE)
|
if (size > PAGE_SIZE)
|
||||||
|
|
Loading…
Reference in a new issue