Skip to content

Commit 5b3a426

Browse files
riteshharjanimaddy-kerneldev
authored andcommitted
powerpc/64s/slb: Add no_slb_preload early cmdline param
no_slb_preload cmdline can come useful in quickly disabling and/or testing the performance impact of userspace slb preloads. Recently there was a slb multi-hit issue due to slb preload cache which was very difficult to triage. This cmdline option allows to quickly disable preloads and verify if the issue exists in preload cache or somewhere else. This can also be a useful option to see the effect of slb preloads for any application workload e.g. number of slb faults with or w/o slb preloads. with slb_preload: slb_faults (minimal initrd boot): 15 slb_faults (full systemd boot): 300 with no_slb_preload: slb_faults (minimal initrd boot): 33 slb_faults (full systemd boot): 138180 Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/de484b55c45d831bc2db63945f455153c89a9a65.1761834163.git.ritesh.list@gmail.com
1 parent 2a492d6 commit 5b3a426

4 files changed

Lines changed: 28 additions & 0 deletions

File tree

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7192,6 +7192,9 @@
71927192
them frequently to increase the rate of SLB faults
71937193
on kernel addresses.
71947194

7195+
no_slb_preload [PPC,EARLY]
7196+
Disables slb preloading for userspace.
7197+
71957198
sunrpc.min_resvport=
71967199
sunrpc.max_resvport=
71977200
[NFS,SUNRPC]

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1329,6 +1329,9 @@ static void __init htab_initialize(void)
13291329
if (stress_slb_enabled)
13301330
static_branch_enable(&stress_slb_key);
13311331

1332+
if (no_slb_preload)
1333+
static_branch_enable(&no_slb_preload_key);
1334+
13321335
if (stress_hpt_enabled) {
13331336
unsigned long tmp;
13341337
static_branch_enable(&stress_hpt_key);

arch/powerpc/mm/book3s64/internal.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,13 @@ static inline bool stress_hpt(void)
2222
return static_branch_unlikely(&stress_hpt_key);
2323
}
2424

25+
extern bool no_slb_preload;
26+
DECLARE_STATIC_KEY_FALSE(no_slb_preload_key);
27+
static inline bool slb_preload_disabled(void)
28+
{
29+
return static_branch_unlikely(&no_slb_preload_key);
30+
}
31+
2532
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
2633

2734
void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);

arch/powerpc/mm/book3s64/slb.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,15 @@ early_param("stress_slb", parse_stress_slb);
4242

4343
__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
4444

45+
bool no_slb_preload __initdata;
46+
static int __init parse_no_slb_preload(char *p)
47+
{
48+
no_slb_preload = true;
49+
return 0;
50+
}
51+
early_param("no_slb_preload", parse_no_slb_preload);
52+
__ro_after_init DEFINE_STATIC_KEY_FALSE(no_slb_preload_key);
53+
4554
static void assert_slb_presence(bool present, unsigned long ea)
4655
{
4756
#ifdef CONFIG_DEBUG_VM
@@ -299,6 +308,9 @@ static void preload_add(struct thread_info *ti, unsigned long ea)
299308
unsigned char idx;
300309
unsigned long esid;
301310

311+
if (slb_preload_disabled())
312+
return;
313+
302314
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
303315
/* EAs are stored >> 28 so 256MB segments don't need clearing */
304316
if (ea & ESID_MASK_1T)
@@ -412,6 +424,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
412424

413425
copy_mm_to_paca(mm);
414426

427+
if (slb_preload_disabled())
428+
return;
429+
415430
/*
416431
* We gradually age out SLBs after a number of context switches to
417432
* reduce reload overhead of unused entries (like we do with FP/VEC

0 commit comments

Comments
 (0)