mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
7db024bc95
CPU suspend is the standard kernel interface to be used to enter low-power states on ARM64 systems. Current cpu_suspend implementation by default assumes that all low power states are losing the CPU context, so the CPU registers must be saved and cleaned to DRAM upon state entry. Furthermore, the current cpu_suspend() implementation assumes that if the CPU suspend back-end method returns when called, this has to be considered an error regardless of the return code (which can be successful) since the CPU was not expected to return from a code path that is different from cpu_resume code path - eg returning from the reset vector. All in all this means that the current API does not cope well with low-power states that preserve the CPU context when entered (ie retention states), since first of all the context is saved for nothing on state entry for those states and a successful state entry can return as a normal function return, which is considered an error by the current CPU suspend implementation. This patch refactors the cpu_suspend() API so that it can be split in two separate functionalities. The arm64 cpu_suspend API just provides a wrapper around CPU suspend operation hook. A new function is introduced (for architecture code use only) for states that require context saving upon entry: __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) __cpu_suspend() saves the context on function entry and calls the so called suspend finisher (ie fn) to complete the suspend operation. The finisher is not expected to return, unless it fails in which case the error is propagated back to the __cpu_suspend caller. The API refactoring results in the following pseudo code call sequence for a suspending CPU, when triggered from a kernel subsystem: /* * int cpu_suspend(unsigned long idx) * @idx: idle state index */ { -> cpu_suspend(idx) |---> CPU operations suspend hook called, if present |--> if (retention_state) |--> direct suspend back-end call (eg PSCI suspend) else |--> __cpu_suspend(idx, &back_end_finisher); } By refactoring the cpu_suspend API this way, the CPU operations back-end has a chance to detect whether idle states require state saving or not and can call the required suspend operations accordingly either through simple function call or indirectly through __cpu_suspend() which carries out state saving and suspend finisher dispatching to complete idle state entry. Change-Id: I7f5ba7a15f30c61a9d510bdf3134b9c697a39c61 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Hanjun Guo <hanjun.guo@linaro.org> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Git-commit: 714f59925595b9c2ea9c22b107b340d38e3b3bc9 Git-repo: https://git.kernel.org/scm/linux/kernel/git/stable/linux-stable.git Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org> Signed-off-by: Archana Sathyakumar <asathyak@codeaurora.org> Signed-off-by: Murali Nalajala <mnalajal@codeaurora.org> Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
187 lines
5.5 KiB
ArmAsm
187 lines
5.5 KiB
ArmAsm
#include <linux/errno.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/assembler.h>
|
|
|
|
.text
|
|
/*
|
|
* Implementation of MPIDR_EL1 hash algorithm through shifting
|
|
* and OR'ing.
|
|
*
|
|
* @dst: register containing hash result
|
|
* @rs0: register containing affinity level 0 bit shift
|
|
* @rs1: register containing affinity level 1 bit shift
|
|
* @rs2: register containing affinity level 2 bit shift
|
|
* @rs3: register containing affinity level 3 bit shift
|
|
* @mpidr: register containing MPIDR_EL1 value
|
|
* @mask: register containing MPIDR mask
|
|
*
|
|
* Pseudo C-code:
|
|
*
|
|
*u32 dst;
|
|
*
|
|
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
|
|
* u32 aff0, aff1, aff2, aff3;
|
|
* u64 mpidr_masked = mpidr & mask;
|
|
* aff0 = mpidr_masked & 0xff;
|
|
* aff1 = mpidr_masked & 0xff00;
|
|
* aff2 = mpidr_masked & 0xff0000;
|
|
* aff2 = mpidr_masked & 0xff00000000;
|
|
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
|
|
*}
|
|
* Input registers: rs0, rs1, rs2, rs3, mpidr, mask
|
|
* Output register: dst
|
|
* Note: input and output registers must be disjoint register sets
|
|
(eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
|
|
*/
|
|
.macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
|
|
and \mpidr, \mpidr, \mask // mask out MPIDR bits
|
|
and \dst, \mpidr, #0xff // mask=aff0
|
|
lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
|
|
and \mask, \mpidr, #0xff00 // mask = aff1
|
|
lsr \mask ,\mask, \rs1
|
|
orr \dst, \dst, \mask // dst|=(aff1>>rs1)
|
|
and \mask, \mpidr, #0xff0000 // mask = aff2
|
|
lsr \mask ,\mask, \rs2
|
|
orr \dst, \dst, \mask // dst|=(aff2>>rs2)
|
|
and \mask, \mpidr, #0xff00000000 // mask = aff3
|
|
lsr \mask ,\mask, \rs3
|
|
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
|
.endm
|
|
/*
|
|
* Save CPU state for a suspend and execute the suspend finisher.
|
|
* On success it will return 0 through cpu_resume - ie through a CPU
|
|
* soft/hard reboot from the reset vector.
|
|
* On failure it returns the suspend finisher return value or force
|
|
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
|
|
* is not allowed to return, if it does this must be considered failure).
|
|
* It saves callee registers, and allocates space on the kernel stack
|
|
* to save the CPU specific registers + some other data for resume.
|
|
*
|
|
* x0 = suspend finisher argument
|
|
* x1 = suspend finisher function pointer
|
|
*/
|
|
ENTRY(__cpu_suspend_enter)
|
|
stp x29, lr, [sp, #-96]!
|
|
stp x19, x20, [sp,#16]
|
|
stp x21, x22, [sp,#32]
|
|
stp x23, x24, [sp,#48]
|
|
stp x25, x26, [sp,#64]
|
|
stp x27, x28, [sp,#80]
|
|
/*
|
|
* Stash suspend finisher and its argument in x20 and x19
|
|
*/
|
|
mov x19, x0
|
|
mov x20, x1
|
|
mov x2, sp
|
|
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
|
|
mov x0, sp
|
|
/*
|
|
* x0 now points to struct cpu_suspend_ctx allocated on the stack
|
|
*/
|
|
str x2, [x0, #CPU_CTX_SP]
|
|
ldr x1, =sleep_save_sp
|
|
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
|
#ifdef CONFIG_SMP
|
|
mrs x7, mpidr_el1
|
|
ldr x9, =mpidr_hash
|
|
ldr x10, [x9, #MPIDR_HASH_MASK]
|
|
/*
|
|
* Following code relies on the struct mpidr_hash
|
|
* members size.
|
|
*/
|
|
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
|
|
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
|
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
|
add x1, x1, x8, lsl #3
|
|
#endif
|
|
bl __cpu_suspend_save
|
|
/*
|
|
* Grab suspend finisher in x20 and its argument in x19
|
|
*/
|
|
mov x0, x19
|
|
mov x1, x20
|
|
/*
|
|
* We are ready for power down, fire off the suspend finisher
|
|
* in x1, with argument in x0
|
|
*/
|
|
blr x1
|
|
/*
|
|
* Never gets here, unless suspend finisher fails.
|
|
* Successful cpu_suspend should return from cpu_resume, returning
|
|
* through this code path is considered an error
|
|
* If the return value is set to 0 force x0 = -EOPNOTSUPP
|
|
* to make sure a proper error condition is propagated
|
|
*/
|
|
cmp x0, #0
|
|
mov x3, #-EOPNOTSUPP
|
|
csel x0, x3, x0, eq
|
|
add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
|
|
ldp x19, x20, [sp, #16]
|
|
ldp x21, x22, [sp, #32]
|
|
ldp x23, x24, [sp, #48]
|
|
ldp x25, x26, [sp, #64]
|
|
ldp x27, x28, [sp, #80]
|
|
ldp x29, lr, [sp], #96
|
|
ret
|
|
ENDPROC(__cpu_suspend_enter)
|
|
.ltorg
|
|
|
|
ENTRY(cpu_resume)
|
|
bl el2_setup // if in EL2 drop to EL1 cleanly
|
|
#ifdef CONFIG_SMP
|
|
mrs x1, mpidr_el1
|
|
adrp x4, mpidr_hash_ptr
|
|
add x4, x4, #:lo12:mpidr_hash_ptr
|
|
ldr x5, [x4]
|
|
add x8, x4, x5 // x8 = struct mpidr_hash phys address
|
|
/* retrieve mpidr_hash members to compute the hash */
|
|
ldr x2, [x8, #MPIDR_HASH_MASK]
|
|
ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
|
|
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
|
|
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
|
|
/* x7 contains hash index, let's use it to grab context pointer */
|
|
#else
|
|
mov x7, xzr
|
|
#endif
|
|
adrp x0, sleep_save_sp
|
|
add x0, x0, #:lo12:sleep_save_sp
|
|
ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
|
|
ldr x0, [x0, x7, lsl #3]
|
|
/* load sp from context */
|
|
ldr x2, [x0, #CPU_CTX_SP]
|
|
adrp x1, sleep_idmap_phys
|
|
/* load physical address of identity map page table in x1 */
|
|
ldr x1, [x1, #:lo12:sleep_idmap_phys]
|
|
mov sp, x2
|
|
/*
|
|
* cpu_do_resume expects x0 to contain context physical address
|
|
* pointer and x1 to contain physical address of 1:1 page tables
|
|
*/
|
|
bl cpu_do_resume // PC relative jump, MMU off
|
|
b cpu_resume_mmu // Resume MMU, never returns
|
|
ENDPROC(cpu_resume)
|
|
|
|
.data
|
|
.align 3
|
|
mpidr_hash_ptr:
|
|
/*
|
|
* offset of mpidr_hash symbol from current location
|
|
* used to obtain run-time mpidr_hash address with MMU off
|
|
*/
|
|
.quad mpidr_hash - .
|
|
/*
|
|
* physical address of identity mapped page tables
|
|
*/
|
|
.type sleep_idmap_phys, #object
|
|
ENTRY(sleep_idmap_phys)
|
|
.quad 0
|
|
/*
|
|
* struct sleep_save_sp {
|
|
* phys_addr_t *save_ptr_stash;
|
|
* phys_addr_t save_ptr_stash_phys;
|
|
* };
|
|
*/
|
|
.type sleep_save_sp, #object
|
|
ENTRY(sleep_save_sp)
|
|
.space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
|