Skip to content

Commit 8f47d75

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The big one is a fix for the VHE enabling path during early boot, where the code enabling the MMU wasn't necessarily in the identity map of the new page-tables, resulting in a consistent crash with 64k pages. In fixing that, we noticed some missing barriers too, so we added those for the sake of architectural compliance. Other than that, just the usual merge window trickle. There'll be more to come, too. Summary: - Fix lockdep false alarm on resume-from-cpuidle path - Fix memory leak in kexec_file - Fix module linker script to work with GDB - Fix error code when trying to use uprobes with AArch32 instructions - Fix late VHE enabling with 64k pages - Add missing ISBs after TLB invalidation - Fix seccomp when tracing syscall -1 - Fix stacktrace return code at end of stack - Fix inconsistent whitespace for pointer return values - Fix compiler warnings when building with W=1" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: stacktrace: Report when we reach the end of the stack arm64: ptrace: Fix seccomp of traced syscall -1 (NO_SYSCALL) arm64: Add missing ISB after invalidating TLB in enter_vhe arm64: Add missing ISB after invalidating TLB in __primary_switch arm64: VHE: Enable EL2 MMU from the idmap KVM: arm64: make the hyp vector table entries local arm64/mm: Fixed some coding style issues arm64: uprobe: Return EOPNOTSUPP for AARCH32 instruction probing kexec: move machine_kexec_post_load() to public interface arm64 module: set plt* section addresses to 0x0 arm64: kexec_file: fix memory leakage in create_dtb() when fdt_open_into() fails arm64: spectre: Prevent lockdep splat on v4 mitigation enable path
2 parents a422ce5 + 3c02600 commit 8f47d75

12 files changed

Lines changed: 44 additions & 27 deletions

File tree

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#ifdef CONFIG_ARM64_MODULE_PLTS
22
SECTIONS {
3-
.plt (NOLOAD) : { BYTE(0) }
4-
.init.plt (NOLOAD) : { BYTE(0) }
5-
.text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
3+
.plt 0 (NOLOAD) : { BYTE(0) }
4+
.init.plt 0 (NOLOAD) : { BYTE(0) }
5+
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
66
}
77
#endif

arch/arm64/kernel/head.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -837,6 +837,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
837837

838838
tlbi vmalle1 // Remove any stale TLB entries
839839
dsb nsh
840+
isb
840841

841842
set_sctlr_el1 x19 // re-enable the MMU
842843

arch/arm64/kernel/hyp-stub.S

Lines changed: 27 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,6 @@ SYM_CODE_END(el1_sync)
7575

7676
// nVHE? No way! Give me the real thing!
7777
SYM_CODE_START_LOCAL(mutate_to_vhe)
78-
// Be prepared to fail
79-
mov_q x0, HVC_STUB_ERR
80-
8178
// Sanity check: MMU *must* be off
8279
mrs x1, sctlr_el2
8380
tbnz x1, #0, 1f
@@ -96,8 +93,11 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
9693
cmp x1, xzr
9794
and x2, x2, x1
9895
csinv x2, x2, xzr, ne
99-
cbz x2, 1f
96+
cbnz x2, 2f
10097

98+
1: mov_q x0, HVC_STUB_ERR
99+
eret
100+
2:
101101
// Engage the VHE magic!
102102
mov_q x0, HCR_HOST_VHE_FLAGS
103103
msr hcr_el2, x0
@@ -131,9 +131,28 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
131131
msr mair_el1, x0
132132
isb
133133

134+
// Hack the exception return to stay at EL2
135+
mrs x0, spsr_el1
136+
and x0, x0, #~PSR_MODE_MASK
137+
mov x1, #PSR_MODE_EL2h
138+
orr x0, x0, x1
139+
msr spsr_el1, x0
140+
141+
b enter_vhe
142+
SYM_CODE_END(mutate_to_vhe)
143+
144+
// At the point where we reach enter_vhe(), we run with
145+
// the MMU off (which is enforced by mutate_to_vhe()).
146+
// We thus need to be in the idmap, or everything will
147+
// explode when enabling the MMU.
148+
149+
.pushsection .idmap.text, "ax"
150+
151+
SYM_CODE_START_LOCAL(enter_vhe)
134152
// Invalidate TLBs before enabling the MMU
135153
tlbi vmalle1
136154
dsb nsh
155+
isb
137156

138157
// Enable the EL2 S1 MMU, as set up from EL1
139158
mrs_s x0, SYS_SCTLR_EL12
@@ -143,17 +162,12 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
143162
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
144163
msr_s SYS_SCTLR_EL12, x0
145164

146-
// Hack the exception return to stay at EL2
147-
mrs x0, spsr_el1
148-
and x0, x0, #~PSR_MODE_MASK
149-
mov x1, #PSR_MODE_EL2h
150-
orr x0, x0, x1
151-
msr spsr_el1, x0
152-
153165
mov x0, xzr
154166

155-
1: eret
156-
SYM_CODE_END(mutate_to_vhe)
167+
eret
168+
SYM_CODE_END(enter_vhe)
169+
170+
.popsection
157171

158172
.macro invalid_vector label
159173
SYM_CODE_START_LOCAL(\label)

arch/arm64/kernel/machine_kexec_file.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image,
182182

183183
/* duplicate a device tree blob */
184184
ret = fdt_open_into(initial_boot_params, buf, buf_size);
185-
if (ret)
185+
if (ret) {
186+
vfree(buf);
186187
return -EINVAL;
188+
}
187189

188190
ret = setup_dtb(image, initrd_load_addr, initrd_len,
189191
cmdline, buf);

arch/arm64/kernel/probes/uprobes.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
3838

3939
/* TODO: Currently we do not support AARCH32 instruction probing */
4040
if (mm->context.flags & MMCF_AARCH32)
41-
return -ENOTSUPP;
41+
return -EOPNOTSUPP;
4242
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
4343
return -EINVAL;
4444

arch/arm64/kernel/ptrace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1797,7 +1797,7 @@ int syscall_trace_enter(struct pt_regs *regs)
17971797

17981798
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
17991799
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1800-
if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
1800+
if (flags & _TIF_SYSCALL_EMU)
18011801
return NO_SYSCALL;
18021802
}
18031803

arch/arm64/kernel/stacktrace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
4646

4747
/* Terminal record; nothing to unwind */
4848
if (!fp)
49-
return -EINVAL;
49+
return -ENOENT;
5050

5151
if (fp & 0xf)
5252
return -EINVAL;

arch/arm64/kernel/suspend.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
119119
if (!ret)
120120
ret = -EOPNOTSUPP;
121121
} else {
122-
__cpu_suspend_exit();
122+
RCU_NONIDLE(__cpu_suspend_exit());
123123
}
124124

125125
unpause_graph_tracing();

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ el2_error:
119119

120120
.macro invalid_vector label, target = __guest_exit_panic
121121
.align 2
122-
SYM_CODE_START(\label)
122+
SYM_CODE_START_LOCAL(\label)
123123
b \target
124124
SYM_CODE_END(\label)
125125
.endm

arch/arm64/mm/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1155,7 +1155,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
11551155
}
11561156
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
11571157

1158-
static inline pud_t * fixmap_pud(unsigned long addr)
1158+
static inline pud_t *fixmap_pud(unsigned long addr)
11591159
{
11601160
pgd_t *pgdp = pgd_offset_k(addr);
11611161
p4d_t *p4dp = p4d_offset(pgdp, addr);
@@ -1166,7 +1166,7 @@ static inline pud_t * fixmap_pud(unsigned long addr)
11661166
return pud_offset_kimg(p4dp, addr);
11671167
}
11681168

1169-
static inline pmd_t * fixmap_pmd(unsigned long addr)
1169+
static inline pmd_t *fixmap_pmd(unsigned long addr)
11701170
{
11711171
pud_t *pudp = fixmap_pud(addr);
11721172
pud_t pud = READ_ONCE(*pudp);
@@ -1176,7 +1176,7 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
11761176
return pmd_offset_kimg(pudp, addr);
11771177
}
11781178

1179-
static inline pte_t * fixmap_pte(unsigned long addr)
1179+
static inline pte_t *fixmap_pte(unsigned long addr)
11801180
{
11811181
return &bm_pte[pte_index(addr)];
11821182
}

0 commit comments

Comments
 (0)