arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx.

When returning from idle, we rely on the fact that thread_info lives at
the end of the kernel stack, and restore this by masking the saved stack
pointer. Subsequent patches will sever the relationship between the
stack and thread_info, and to cater for this we must save/restore sp_el0
explicitly, storing it in cpu_suspend_ctx.

As cpu_suspend_ctx must be doubleword aligned, this leaves us with an
extra slot in cpu_suspend_ctx. We can use this to save/restore tpidr_el1
in the same way, which simplifies the code, avoiding pointer chasing on
the restore path (as we no longer need to load thread_info::cpu followed
by the relevant slot in __per_cpu_offset based on this).

This patch stashes both registers in cpu_suspend_ctx.

Change-Id: Icd9395e4783c252d7e7f9ee5e991e38777014ccc
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: James Morse <james.morse@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Git-commit: 623b476fc815464a0241ea7483da7b3580b7d8ac
Git-repo: https://source.codeaurora.org/quic/la/kernel/msm-3.10.git
[schikk@codeaurora.org: Resolved merge conflicts.
Ignored the sp_el0 changes as changes to support sp_el0
are not there in this baseline ]
Signed-off-by: Swetha Chikkaboraiah <schikk@codeaurora.org>
Signed-off-by: Rajshekar Eashwarappa <reashw@codeaurora.org>
This commit is contained in:
Swetha Chikkaboraiah 2018-03-26 10:36:40 +05:30 committed by syphyr
parent 9d0f39c3a1
commit 1fdc33cb78
3 changed files with 5 additions and 9 deletions

View File

@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
#define NR_CTX_REGS 11
#define NR_CTX_REGS 12
/*
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on

View File

@ -121,12 +121,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
flush_tlb_all();
/*
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled

View File

@ -105,12 +105,13 @@ ENTRY(cpu_do_suspend)
mrs x10, mdscr_el1
mrs x11, oslsr_el1
mrs x12, sctlr_el1
mrs x13, tpidr_el1
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
str x12, [x0, #80]
stp x12, x13, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@ -133,7 +134,7 @@ ENTRY(cpu_do_resume)
ldp x6, x7, [x0, #32]
ldp x8, x9, [x0, #48]
ldp x10, x11, [x0, #64]
ldr x12, [x0, #80]
ldp x12, x13, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@ -144,6 +145,7 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
msr tpidr_el1, x13
/*
* Restore oslsr_el1 by writing oslar_el1
*/