mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
[PATCH] x86-64: Some cleanup and optimization to the processor data area.
- Remove unused irqrsp field - Remove pda->me - Optimize set_softirq_pending slightly Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
459192c92c
commit
3f74478b5f
6 changed files with 18 additions and 14 deletions
|
@ -39,7 +39,6 @@ int main(void)
|
|||
ENTRY(kernelstack);
|
||||
ENTRY(oldrsp);
|
||||
ENTRY(pcurrent);
|
||||
ENTRY(irqrsp);
|
||||
ENTRY(irqcount);
|
||||
ENTRY(cpunumber);
|
||||
ENTRY(irqstackptr);
|
||||
|
|
|
@ -119,7 +119,6 @@ void pda_init(int cpu)
|
|||
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
|
||||
wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
|
||||
|
||||
pda->me = pda;
|
||||
pda->cpunumber = cpu;
|
||||
pda->irqcount = -1;
|
||||
pda->kernelstack =
|
||||
|
|
|
@ -9,11 +9,12 @@
|
|||
|
||||
#define __ARCH_IRQ_STAT 1
|
||||
|
||||
/* Generate a lvalue for a pda member. Should fix softirq.c instead to use
|
||||
special access macros. This would generate better code. */
|
||||
#define __IRQ_STAT(cpu,member) (read_pda(me)->member)
|
||||
#define local_softirq_pending() read_pda(__softirq_pending)
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING 1
|
||||
|
||||
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
|
||||
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
|
|
|
@ -10,10 +10,8 @@
|
|||
struct x8664_pda {
|
||||
struct task_struct *pcurrent; /* Current process */
|
||||
unsigned long data_offset; /* Per cpu data offset from linker address */
|
||||
struct x8664_pda *me; /* Pointer to itself */
|
||||
unsigned long kernelstack; /* top of kernel stack for current */
|
||||
unsigned long oldrsp; /* user rsp for system call */
|
||||
unsigned long irqrsp; /* Old rsp for interrupts. */
|
||||
int irqcount; /* Irq nesting counter. Starts with -1 */
|
||||
int cpunumber; /* Logical CPU number */
|
||||
char *irqstackptr; /* top of irqstack */
|
||||
|
@ -42,13 +40,14 @@ extern void __bad_pda_field(void);
|
|||
#define pda_offset(field) offsetof(struct x8664_pda, field)
|
||||
|
||||
#define pda_to_op(op,field,val) do { \
|
||||
typedef typeof_field(struct x8664_pda, field) T__; \
|
||||
switch (sizeof_field(struct x8664_pda, field)) { \
|
||||
case 2: \
|
||||
asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
|
||||
asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
|
||||
case 4: \
|
||||
asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
|
||||
asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
|
||||
case 8: \
|
||||
asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
|
||||
asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
|
||||
default: __bad_pda_field(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -58,7 +57,7 @@ asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); bre
|
|||
* Unfortunately removing them causes all hell to break lose currently.
|
||||
*/
|
||||
#define pda_from_op(op,field) ({ \
|
||||
typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
|
||||
typeof_field(struct x8664_pda, field) ret__; \
|
||||
switch (sizeof_field(struct x8664_pda, field)) { \
|
||||
case 2: \
|
||||
asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
|
||||
|
@ -75,6 +74,7 @@ asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); b
|
|||
#define write_pda(field,val) pda_to_op("mov",field,val)
|
||||
#define add_pda(field,val) pda_to_op("add",field,val)
|
||||
#define sub_pda(field,val) pda_to_op("sub",field,val)
|
||||
#define or_pda(field,val) pda_to_op("or",field,val)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -57,6 +57,11 @@ extern void disable_irq(unsigned int irq);
|
|||
extern void enable_irq(unsigned int irq);
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
#define set_softirq_pending(x) (local_softirq_pending() = (x))
|
||||
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Temporary defines for UP kernels, until all code gets fixed.
|
||||
*/
|
||||
|
@ -123,7 +128,7 @@ struct softirq_action
|
|||
asmlinkage void do_softirq(void);
|
||||
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
|
||||
extern void softirq_init(void);
|
||||
#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
|
||||
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
|
||||
extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
|
||||
extern void FASTCALL(raise_softirq(unsigned int nr));
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ asmlinkage void __do_softirq(void)
|
|||
cpu = smp_processor_id();
|
||||
restart:
|
||||
/* Reset the pending bitmask before enabling irqs */
|
||||
local_softirq_pending() = 0;
|
||||
set_softirq_pending(0);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
|
|
Loading…
Reference in a new issue