The following commit has been merged in the master branch: commit 39ce4395c3ba730341b067e1fb8abbdf9c47ca77 Merge: a9a01e1238cf5b477ec6aa54855356e518998991 010338d729c1090036eb40d2a60b7b7bce2445b8 Author: Linus Torvalds torvalds@linux-foundation.org Date: Thu Mar 2 14:57:53 2023 -0800
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- In copy_highpage(), only reset the tag of the destination pointer if KASAN_HW_TAGS is enabled so that user-space MTE does not interfere with KASAN_SW_TAGS (which relies on top-byte-ignore).
- Remove warning if SME is detected without SVE, the kernel can cope with such configuration (though none in the field currently).
- In cfi_handler(), pass the ESR_EL1 value to die() for consistency with other die() callers.
- Disable HUGETLB_PAGE_OPTIMIZE_VMEMMAP on arm64 since the pte manipulation from the generic vmemmap_remap_pte() does not follow the required ARM break-before-make sequence (clear the pte, flush the TLBs, set the new pte). It may be re-enabled once this sequence is sorted.
- Fix possible memory leak in the arm64 ACPI code if the SMCCC version and conduit checks fail.
- Forbid CALL_OPS with CC_OPTIMIZE_FOR_SIZE since gcc ignores -falign-functions=N with -Os.
- Don't pretend KASLR is enabled if offset < MIN_KIMG_ALIGN as no randomisation would actually take place.
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: kaslr: don't pretend KASLR is enabled if offset < MIN_KIMG_ALIGN arm64: ftrace: forbid CALL_OPS with CC_OPTIMIZE_FOR_SIZE arm64: acpi: Fix possible memory leak of ffh_ctxt arm64: mm: hugetlb: Disable HUGETLB_PAGE_OPTIMIZE_VMEMMAP arm64: pass ESR_ELx to die() of cfi_handler arm64/fpsimd: Remove warning for SME without SVE arm64: Reset KASAN tag in copy_highpage with HW tags only
diff --combined arch/arm64/Kconfig index 27b2592698b0,f7521695a474..1023e896d46b --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@@ -100,7 -100,6 +100,6 @@@ config ARM6 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) - select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES @@@ -186,8 -185,11 +185,9 @@@ select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE - select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ - if $(cc-option,-fpatchable-function-entry=2) select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ - if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) + if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \ + !CC_OPTIMIZE_FOR_SIZE) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ if DYNAMIC_FTRACE_WITH_ARGS select HAVE_EFFICIENT_UNALIGNED_ACCESS @@@ -974,22 -976,6 +974,22 @@@ config ARM64_ERRATUM_245716
If unsure, say Y.
+config ARM64_ERRATUM_2645198 + bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption" + default y + help + This option adds the workaround for ARM Cortex-A715 erratum 2645198. + + If a Cortex-A715 cpu sees a page mapping permissions change from executable + to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the + next instruction abort caused by permission fault. + + Only user-space does executable to non-executable permission transition via + mprotect() system call. Workaround the problem by doing a break-before-make + TLB invalidation, for all changes to executable user space mappings. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --combined arch/arm64/kernel/cpufeature.c index 87687e99fee3,5643a9ca502a..2e3e55139777 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@@ -1633,7 -1633,7 +1633,7 @@@ bool kaslr_requires_kpti(void return false; }
- return kaslr_offset() > 0; + return kaslr_enabled(); }
static bool __meltdown_safe = true; @@@ -1967,20 -1967,6 +1967,20 @@@ static void cpu_copy_el2regs(const stru write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); }
+static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, + int scope) +{ + if (kvm_get_mode() != KVM_MODE_NV) + return false; + + if (!has_cpuid_feature(cap, scope)) { + pr_warn("unavailable: %s\n", cap->desc); + return false; + } + + return true; +} + #ifdef CONFIG_ARM64_PAN static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) { @@@ -2276,17 -2262,6 +2276,17 @@@ static const struct arm64_cpu_capabilit .matches = runs_at_el2, .cpu_enable = cpu_copy_el2regs, }, + { + .desc = "Nested Virtualization Support", + .capability = ARM64_HAS_NESTED_VIRT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_nested_virt_support, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_EL1_NV_SHIFT, + .field_width = 4, + .min_field_value = ID_AA64MMFR2_EL1_NV_IMP, + }, { .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, .type = ARM64_CPUCAP_SYSTEM_FEATURE, diff --combined arch/arm64/kernel/fpsimd.c index 692dfefbe0ed,7e823ee7ffa2..9e7e50a0fd76 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@@ -385,7 -385,7 +385,7 @@@ static void task_fpsimd_load(void WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context());
- if (system_supports_sve()) { + if (system_supports_sve() || system_supports_sme()) { switch (current->thread.fp_type) { case FP_STATE_FPSIMD: /* Stop tracking SVE for this task until next use. */ @@@ -2122,9 -2122,6 +2122,6 @@@ static int __init fpsimd_init(void pr_notice("Advanced SIMD is not implemented\n");
- if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE)) - pr_notice("SME is implemented but not SVE\n"); - sve_sysctl_init(); sme_sysctl_init();
diff --combined arch/arm64/kernel/traps.c index 901dfd9bf04c,4a623e2e982b..4a79ba100799 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@@ -18,7 -18,6 +18,7 @@@ #include <linux/module.h> #include <linux/kexec.h> #include <linux/delay.h> +#include <linux/efi.h> #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> @@@ -27,7 -26,6 +27,7 @@@ #include <linux/syscalls.h> #include <linux/mm_types.h> #include <linux/kasan.h> +#include <linux/ubsan.h> #include <linux/cfi.h>
#include <asm/atomic.h> @@@ -35,7 -33,6 +35,7 @@@ #include <asm/cpufeature.h> #include <asm/daifflags.h> #include <asm/debug-monitors.h> +#include <asm/efi.h> #include <asm/esr.h> #include <asm/exception.h> #include <asm/extable.h> @@@ -493,10 -490,6 +493,10 @@@ void do_el0_bti(struct pt_regs *regs
void do_el1_bti(struct pt_regs *regs, unsigned long esr) { + if (efi_runtime_fixup_exception(regs, "BTI violation")) { + regs->pstate &= ~PSR_BTYPE_MASK; + return; + } die("Oops - BTI", regs, esr); }
@@@ -997,7 -990,7 +997,7 @@@ static int cfi_handler(struct pt_regs *
switch (report_cfi_failure(regs, regs->pc, &target, type)) { case BUG_TRAP_TYPE_BUG: - die("Oops - CFI", regs, 0); + die("Oops - CFI", regs, esr); break;
case BUG_TRAP_TYPE_WARN: @@@ -1079,19 -1072,6 +1079,19 @@@ static struct break_hook kasan_break_ho }; #endif
+#ifdef CONFIG_UBSAN_TRAP +static int ubsan_handler(struct pt_regs *regs, unsigned long esr) +{ + die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr); + return DBG_HOOK_HANDLED; +} + +static struct break_hook ubsan_break_hook = { + .fn = ubsan_handler, + .imm = UBSAN_BRK_IMM, + .mask = UBSAN_BRK_MASK, +}; +#endif
#define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
@@@ -1109,10 -1089,6 +1109,10 @@@ int __init early_brk64(unsigned long ad #ifdef CONFIG_KASAN_SW_TAGS if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; +#endif +#ifdef CONFIG_UBSAN_TRAP + if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM) + return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; #endif return bug_handler(regs, esr) != DBG_HOOK_HANDLED; } @@@ -1126,9 -1102,6 +1126,9 @@@ void __init trap_init(void register_kernel_break_hook(&fault_break_hook); #ifdef CONFIG_KASAN_SW_TAGS register_kernel_break_hook(&kasan_break_hook); +#endif +#ifdef CONFIG_UBSAN_TRAP + register_kernel_break_hook(&ubsan_break_hook); #endif debug_traps_init(); }
linux-merge@lists.open-mesh.org