From 115a3dc50cc1e4b3431b472697be41e46a558cf9 Mon Sep 17 00:00:00 2001 From: Matt Wagantall Date: Thu, 21 Aug 2014 22:07:01 -0700 Subject: [PATCH] Revert "arm64: defer reloading a task's FPSIMD state to userland resume" This reverts commit 41f7d22a9e70284706f2f9c1320b0e3d8446be96. This change results in occasional userspace crashes, presumably because incorrect FPSIMD state is saved/restored. Revert it until the problems are understood and fixed, resolving several merge conflicts along the way. Change-Id: I030c413950640aa6b83d55a8af5f75ab54ab9b7e Signed-off-by: Matt Wagantall --- arch/arm64/include/asm/fpsimd.h | 5 - arch/arm64/include/asm/thread_info.h | 4 +- arch/arm64/kernel/entry.S | 2 +- arch/arm64/kernel/fpsimd.c | 134 +++------------------------ arch/arm64/kernel/ptrace.c | 2 - arch/arm64/kernel/signal.c | 4 - 6 files changed, 16 insertions(+), 135 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 50f559f574fe..2857b9412955 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -37,8 +37,6 @@ struct fpsimd_state { u32 fpcr; }; }; - /* the id of the last cpu to have restored this state */ - unsigned int cpu; }; /* @@ -72,11 +70,8 @@ extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); extern void fpsimd_preserve_current_state(void); -extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct fpsimd_state *state); -extern void fpsimd_flush_task_state(struct task_struct *target); - extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state, u32 num_regs); extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state); diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 9a11566e43d6..f4091da4d972 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -108,7 +108,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 @@ -124,7 +123,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -132,7 +130,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) + _TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 1d3393532b01..573a34aa6f20 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -566,7 +566,7 @@ fast_work_pending: str x0, [sp, #S_X0] // returned x0 work_pending: tbnz x1, #TIF_NEED_RESCHED, work_resched - /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ + /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */ ldr x2, [sp, #S_PSTATE] mov x0, sp // 'regs' tst x2, #PSR_MODE_MASK // user mode regs? diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ad8aebb1cdef..b907a6d0ac12 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -34,59 +34,6 @@ #define FPEXC_IXF (1 << 4) #define FPEXC_IDF (1 << 7) -/* - * In order to reduce the number of times the FPSIMD state is needlessly saved - * and restored, we need to keep track of two things: - * (a) for each task, we need to remember which CPU was the last one to have - * the task's FPSIMD state loaded into its FPSIMD registers; - * (b) for each CPU, we need to remember which task's userland FPSIMD state has - * been loaded into its FPSIMD registers most recently, or whether it has - * been used to perform kernel mode NEON in the meantime. - * - * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to - * the id of the current CPU everytime the state is loaded onto a CPU. For (b), - * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the - * address of the userland FPSIMD state of the task that was loaded onto the CPU - * the most recently, or NULL if kernel mode NEON has been performed after that. - * - * With this in place, we no longer have to restore the next FPSIMD state right - * when switching between tasks. Instead, we can defer this check to userland - * resume, at which time we verify whether the CPU's fpsimd_last_state and the - * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we - * can omit the FPSIMD restore. - * - * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to - * indicate whether or not the userland FPSIMD state of the current task is - * present in the registers. The flag is set unless the FPSIMD registers of this - * CPU currently contain the most recent userland FPSIMD state of the current - * task. - * - * For a certain task, the sequence may look something like this: - * - the task gets scheduled in; if both the task's fpsimd_state.cpu field - * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu - * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is - * cleared, otherwise it is set; - * - * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's - * userland FPSIMD state is copied from memory to the registers, the task's - * fpsimd_state.cpu field is set to the id of the current CPU, the current - * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the - * TIF_FOREIGN_FPSTATE flag is cleared; - * - * - the task executes an ordinary syscall; upon return to userland, the - * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is - * restored; - * - * - the task executes a syscall which executes some NEON instructions; this is - * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD - * register contents to memory, clears the fpsimd_last_state per-cpu variable - * and sets the TIF_FOREIGN_FPSTATE flag; - * - * - the task gets preempted after kernel_neon_end() is called; as we have not - * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so - * whatever is in the FPSIMD registers is not saved to memory, but discarded. - */ -static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); /* * Trapped FP/ASIMD access. @@ -126,38 +73,19 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) void fpsimd_thread_switch(struct task_struct *next) { - /* - * Save the current FPSIMD state to memory, but only if whatever is in - * the registers is in fact the most recent userland FPSIMD state of - * 'current'. - */ - if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) + if (current->mm) fpsimd_save_state(¤t->thread.fpsimd_state); - if (next->mm) { - /* - * If we are switching to a task whose most recent userland - * FPSIMD state is already in the registers of *this* cpu, - * we can skip loading the state from memory. Otherwise, set - * the TIF_FOREIGN_FPSTATE flag so the state will be loaded - * upon the next return to userland. - */ - struct fpsimd_state *st = &next->thread.fpsimd_state; - - if (__this_cpu_read(fpsimd_last_state) == st - && st->cpu == smp_processor_id()) - clear_ti_thread_flag(task_thread_info(next), - TIF_FOREIGN_FPSTATE); - else - set_ti_thread_flag(task_thread_info(next), - TIF_FOREIGN_FPSTATE); - } + if (next->mm) + fpsimd_load_state(&next->thread.fpsimd_state); } void fpsimd_flush_thread(void) { + preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); - set_thread_flag(TIF_FOREIGN_FPSTATE); + fpsimd_load_state(¤t->thread.fpsimd_state); + preempt_enable(); } /* @@ -167,55 +95,20 @@ void fpsimd_flush_thread(void) void fpsimd_preserve_current_state(void) { preempt_disable(); - if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_save_state(¤t->thread.fpsimd_state); + fpsimd_save_state(¤t->thread.fpsimd_state); preempt_enable(); } /* - * Load the userland FPSIMD state of 'current' from memory, but only if the - * FPSIMD state already held in the registers is /not/ the most recent FPSIMD - * state of 'current' - */ -void fpsimd_restore_current_state(void) -{ - preempt_disable(); - if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { - struct fpsimd_state *st = ¤t->thread.fpsimd_state; - - fpsimd_load_state(st); - this_cpu_write(fpsimd_last_state, st); - st->cpu = smp_processor_id(); - } - preempt_enable(); -} - -/* - * Load an updated userland FPSIMD state for 'current' from memory and set the - * flag that indicates that the FPSIMD register contents are the most recent - * FPSIMD state of 'current' + * Load an updated userland FPSIMD state for 'current' from memory */ void fpsimd_update_current_state(struct fpsimd_state *state) { preempt_disable(); fpsimd_load_state(state); - if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { - struct fpsimd_state *st = ¤t->thread.fpsimd_state; - - this_cpu_write(fpsimd_last_state, st); - st->cpu = smp_processor_id(); - } preempt_enable(); } -/* - * Invalidate live CPU copies of task t's FPSIMD state - */ -void fpsimd_flush_task_state(struct task_struct *t) -{ - t->thread.fpsimd_state.cpu = NR_CPUS; -} - #ifdef CONFIG_KERNEL_MODE_NEON static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate); @@ -240,10 +133,8 @@ void kernel_neon_begin_partial(u32 num_regs) * registers. */ preempt_disable(); - if (current->mm && - !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE)) + if (current->mm) fpsimd_save_state(¤t->thread.fpsimd_state); - this_cpu_write(fpsimd_last_state, NULL); } } EXPORT_SYMBOL(kernel_neon_begin_partial); @@ -255,6 +146,9 @@ void kernel_neon_end(void) in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); fpsimd_load_partial_state(s); } else { + if (current->mm) + fpsimd_load_state(¤t->thread.fpsimd_state); + preempt_enable(); } } @@ -268,12 +162,12 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, { switch (cmd) { case CPU_PM_ENTER: - if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) + if (current->mm) fpsimd_save_state(¤t->thread.fpsimd_state); break; case CPU_PM_EXIT: if (current->mm) - set_thread_flag(TIF_FOREIGN_FPSTATE); + fpsimd_load_state(¤t->thread.fpsimd_state); break; case CPU_PM_ENTER_FAILED: default: diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9fde010c945f..7c8e809bdbde 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -521,7 +521,6 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, return ret; target->thread.fpsimd_state.user_fpsimd = newstate; - fpsimd_flush_task_state(target); return ret; } @@ -779,7 +778,6 @@ static int compat_vfp_set(struct task_struct *target, uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; } - fpsimd_flush_task_state(target); return ret; } diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 6357b9c6c90e..7432987816e0 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -430,8 +430,4 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } - - if (thread_flags & _TIF_FOREIGN_FPSTATE) - fpsimd_restore_current_state(); - }