android_kernel_samsung_msm8976/arch/arm64/mm/tlb.S
Rohit Vaswani 3819dbd2cb arm64: mm: Add tlbi workaround for msm8994
MSM8994 v1 revision has a bug in which the bits [39:38]
of VA are tied to zero. ARM64 Linux uses these
bits for kernel and userspace maps. Due to this bug
it is possible that other processors don't see the
effect of tlb invalidation by VA or ASID and could result
in an abort or data corruption.

The workaround here is to replace the tlb invalidation
by VA and ASID with tlbiall in arm64 kernel.
This workaround is controlled through CONFIG_ARCH_MSM8994_V1_TLBI_WA.

Change-Id: Ie36c168881e61007d5d5383d989d7ee6adbd1d92
Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
2014-05-14 19:48:40 -07:00

82 lines
2.1 KiB
ArmAsm

/*
* Based on arch/arm/mm/tlb.S
*
* Copyright (C) 1997-2002 Russell King
* Copyright (C) 2012 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
/*
* __cpu_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*/
ENTRY(__cpu_flush_user_tlb_range)
#ifdef CONFIG_ARCH_MSM8994_V1_TLBI_WA
tlbi vmalle1is
dsb sy
isb
#else
vma_vm_mm x3, x2 // get vma->vm_mm
mmid w3, x3 // get vm_mm->context.id
dsb sy
lsr x0, x0, #12 // align address
lsr x1, x1, #12
bfi x0, x3, #48, #16 // start VA and ASID
bfi x1, x3, #48, #16 // end VA and ASID
1: tlbi vae1is, x0 // TLB invalidate by address and ASID
add x0, x0, #1
cmp x0, x1
b.lo 1b
dsb sy
#endif
ret
ENDPROC(__cpu_flush_user_tlb_range)
/*
* __cpu_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(__cpu_flush_kern_tlb_range)
#ifdef CONFIG_ARCH_MSM8994_V1_TLBI_WA
tlbi vmalle1is
#else
dsb sy
lsr x0, x0, #12 // align address
lsr x1, x1, #12
1: tlbi vaae1is, x0 // TLB invalidate by address
add x0, x0, #1
cmp x0, x1
b.lo 1b
#endif
dsb sy
isb
ret
ENDPROC(__cpu_flush_kern_tlb_range)