Skip to content

Commit 1790f2f

Browse files
Shrikanth Hegdemaddy-kerneldev
authored andcommitted
powerpc: Fix kuap warnings on lazy/full preemption with tracing
These KUAP bugs/Warnings were seen often when tracing was enabled. It happens with preempt=full/lazy. It is easily hit. How to trigger: echo lazy > /sys/kernel/debug/sched/preempt echo function > /sys/kernel/debug/tracing/current_tracer stress-ng --class memory --all 1 -t 3 Bug: Write fault blocked by KUAP! WARNING: [] arch/powerpc/mm/fault.c:231 at bad_kernel_fault.constprop.0+0x1a8/0x2c8, CPU#9: stress-ng-vm-rw/5477 NIP [c00000000008cdec] bad_kernel_fault.constprop.0+0x1a8/0x2c8 Call Trace: bad_kernel_fault.constprop.0+0x1a4/0x2c8 (unreliable) ___do_page_fault+0x688/0xa54 do_page_fault+0x30/0x70 data_access_common_virt+0x210/0x220 ---- interrupt: 300 at __copy_tofrom_user_power7+0x410/0x7ac NIP [c0000000000b3b44] __copy_tofrom_user_power7+0x410/0x7ac LR [c0000000009a7d78] _copy_to_iter+0x134/0x9c4 Enabled CONFIG_PPC_KUAP_DEBUG=y, which prints out below warnings. WARNING: ./arch/powerpc/include/asm/book3s/64/kup.h:93 at _switch+0x80/0x12c, CPU#9: stress-ng-vm-rw/5477 NIP [c000000000013ce4] _switch+0x80/0x12c LR [c00000000001f968] __switch_to+0x148/0x230 Call Trace: __switch_to+0x148/0x230 __schedule+0x270/0x700 preempt_schedule_notrace+0x64/0xd8 function_trace_call+0x180/0x204 ftrace_call+0x4/0x4c enter_vmx_usercopy+0x10/0x74 __copy_tofrom_user_power7+0x278/0x7ac _copy_to_iter+0x134/0x9c4 copy_page_to_iter+0xe4/0x1c4 process_vm_rw_single_vec.constprop.0+0x1cc/0x3b4 process_vm_rw_core.constprop.0+0x168/0x30c process_vm_rw+0x128/0x184 system_call_exception+0x128/0x390 system_call_vectored_common+0x15c/0x2ec enter/exit vmx_usercopy clearly says it shouldn't call schedule. Doing so will end up corrupting AMR registers. When function tracer is enabled, the entry point, i.e enter_vmx_usercopy could be in preemptible context. First thing enter_vmx_usercopy does is, preempt_disable and again function exit of exit_vmx_usercopy maybe preemptible too. So make these as notrace to avoid these bug reports. WARNING: [amr != AMR_KUAP_BLOCKED] ./arch/powerpc/include/asm/book3s/64/kup.h:293 at arch_local_irq_restore.part.0+0x1e8/0x224, CPU#15: stress-ng-pipe/11623 NIP [c000000000038830] arch_local_irq_restore.part.0+0x1e8/0x224 LR [c00000000003871c] arch_local_irq_restore.part.0+0xd4/0x224 Call Trace: return_to_handler+0x0/0x4c (unreliable) __rb_reserve_next+0x198/0x4f8 ring_buffer_lock_reserve+0x1a8/0x51c trace_buffer_lock_reserve+0x30/0x80 __graph_entry.isra.0+0x118/0x140 function_graph_enter_regs+0x1ec/0x408 ftrace_graph_func+0x50/0xcc ftrace_call+0x4/0x4c enable_kernel_altivec+0x10/0xd0 enter_vmx_usercopy+0x58/0x74 return_to_handler+0x0/0x4c (__copy_tofrom_user_power7+0x278/0x7ac) _copy_from_iter+0x134/0x9bc copy_page_from_iter+0xd4/0x1a0 Since AMR registers aren't set to BLOCKED state, warnings could be seen if there is any unlock involved, which gets triggered via arch_local_irq_restore. So had to for that enable_kernel_altivec too. Similarly for check_if_tm_restore_required, giveup_altivec. Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20260109064917.777587-2-sshegde@linux.ibm.com
1 parent ee00bdb commit 1790f2f

2 files changed

Lines changed: 7 additions & 7 deletions

File tree

arch/powerpc/kernel/process.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
*/
8181
bool tm_suspend_disabled __ro_after_init = false;
8282

83-
static void check_if_tm_restore_required(struct task_struct *tsk)
83+
notrace static void check_if_tm_restore_required(struct task_struct *tsk)
8484
{
8585
/*
8686
* If we are saving the current thread's registers, and the
@@ -98,7 +98,7 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
9898
}
9999

100100
#else
101-
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
101+
static __always_inline void check_if_tm_restore_required(struct task_struct *tsk) { }
102102
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
103103

104104
bool strict_msr_control;
@@ -231,7 +231,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
231231
#endif /* CONFIG_PPC_FPU */
232232

233233
#ifdef CONFIG_ALTIVEC
234-
static void __giveup_altivec(struct task_struct *tsk)
234+
notrace static void __giveup_altivec(struct task_struct *tsk)
235235
{
236236
unsigned long msr;
237237

@@ -243,7 +243,7 @@ static void __giveup_altivec(struct task_struct *tsk)
243243
regs_set_return_msr(tsk->thread.regs, msr);
244244
}
245245

246-
void giveup_altivec(struct task_struct *tsk)
246+
notrace void giveup_altivec(struct task_struct *tsk)
247247
{
248248
check_if_tm_restore_required(tsk);
249249

@@ -253,7 +253,7 @@ void giveup_altivec(struct task_struct *tsk)
253253
}
254254
EXPORT_SYMBOL(giveup_altivec);
255255

256-
void enable_kernel_altivec(void)
256+
notrace void enable_kernel_altivec(void)
257257
{
258258
unsigned long cpumsr;
259259

arch/powerpc/lib/vmx-helper.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
#include <linux/hardirq.h>
1111
#include <asm/switch_to.h>
1212

13-
int enter_vmx_usercopy(void)
13+
notrace int enter_vmx_usercopy(void)
1414
{
1515
if (in_interrupt())
1616
return 0;
@@ -32,7 +32,7 @@ int enter_vmx_usercopy(void)
3232
* This function must return 0 because we tail call optimise when calling
3333
* from __copy_tofrom_user_power7 which returns 0 on success.
3434
*/
35-
int exit_vmx_usercopy(void)
35+
notrace int exit_vmx_usercopy(void)
3636
{
3737
disable_kernel_altivec();
3838
pagefault_enable();

0 commit comments

Comments
 (0)