Skip to content

Commit 74a8607

Browse files
Pavankumar KondetiDhineshCool
authored andcommitted
sched/walt: Fix the memory leak of idle task load pointers
The memory for task load pointers are allocated twice for each idle thread except for the boot CPU. This happens during boot from idle_threads_init()->idle_init() in the following 2 paths. 1. idle_init()->fork_idle()->copy_process()-> sched_fork()->init_new_task_load() 2. idle_init()->fork_idle()-> init_idle()->init_new_task_load() The memory allocation for all tasks happens through the 1st path, so use the same for idle tasks and kill the 2nd path. Since the idle thread of boot CPU does not go through fork_idle(), allocate the memory for it separately. Change-Id: I4696a414ffe07d4114b56d326463026019e278f1 Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org> (cherry picked from commit eb58f47212c9621be82108de57bcf3e94ce1035a)
1 parent c0dd326 commit 74a8607

6 files changed

Lines changed: 10 additions & 16 deletions

File tree

include/linux/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ extern int lockdep_tasklist_lock_is_held(void);
365365
extern void sched_init(void);
366366
extern void sched_init_smp(void);
367367
extern asmlinkage void schedule_tail(struct task_struct *prev);
368-
extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
368+
extern void init_idle(struct task_struct *idle, int cpu);
369369
extern void init_idle_bootup_task(struct task_struct *idle);
370370

371371
extern cpumask_var_t cpu_isolated_map;

kernel/fork.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2070,7 +2070,7 @@ struct task_struct *fork_idle(int cpu)
20702070
cpu_to_node(cpu));
20712071
if (!IS_ERR(task)) {
20722072
init_idle_pids(task->pids);
2073-
init_idle(task, cpu, false);
2073+
init_idle(task, cpu);
20742074
}
20752075

20762076
return task;

kernel/sched/core.c

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2514,7 +2514,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
25142514
unsigned long flags;
25152515
int cpu;
25162516

2517-
init_new_task_load(p, false);
2517+
init_new_task_load(p);
25182518
cpu = get_cpu();
25192519

25202520
__sched_fork(clone_flags, p);
@@ -5542,12 +5542,11 @@ void init_idle_bootup_task(struct task_struct *idle)
55425542
* init_idle - set up an idle thread for a given CPU
55435543
* @idle: task in question
55445544
* @cpu: cpu the idle task belongs to
5545-
* @cpu_up: differentiate between initial boot vs hotplug
55465545
*
55475546
* NOTE: this function does not set the idle thread's NEED_RESCHED
55485547
* flag, to make booting more robust.
55495548
*/
5550-
void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
5549+
void init_idle(struct task_struct *idle, int cpu)
55515550
{
55525551
struct rq *rq = cpu_rq(cpu);
55535552
unsigned long flags;
@@ -5556,9 +5555,6 @@ void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
55565555

55575556
__sched_fork(0, idle);
55585557

5559-
if (!cpu_up)
5560-
init_new_task_load(idle, true);
5561-
55625558
raw_spin_lock_irqsave(&idle->pi_lock, flags);
55635559
raw_spin_lock(&rq->lock);
55645560

@@ -8396,7 +8392,8 @@ void __init sched_init(void)
83968392
* but because we are the idle thread, we just pick up running again
83978393
* when this runqueue becomes "idle".
83988394
*/
8399-
init_idle(current, smp_processor_id(), false);
8395+
init_idle(current, smp_processor_id());
8396+
init_new_task_load(current);
84008397

84018398
calc_load_update = jiffies + LOAD_FREQ;
84028399

kernel/sched/walt.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1997,7 +1997,7 @@ int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
19971997
return 0;
19981998
}
19991999

2000-
void init_new_task_load(struct task_struct *p, bool idle_task)
2000+
void init_new_task_load(struct task_struct *p)
20012001
{
20022002
int i;
20032003
u32 init_load_windows;
@@ -2015,9 +2015,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
20152015
/* Don't have much choice. CPU frequency would be bogus */
20162016
BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
20172017

2018-
if (idle_task)
2019-
return;
2020-
20212018
if (current->init_load_pct)
20222019
init_load_pct = current->init_load_pct;
20232020
else

kernel/sched/walt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
150150
extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
151151
extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
152152
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
153-
extern void init_new_task_load(struct task_struct *p, bool idle_task);
153+
extern void init_new_task_load(struct task_struct *p);
154154
extern void mark_task_starting(struct task_struct *p);
155155
extern void set_window_start(struct rq *rq);
156156
void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
@@ -337,7 +337,7 @@ static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
337337
}
338338

339339
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
340-
static inline void init_new_task_load(struct task_struct *p, bool idle_task)
340+
static inline void init_new_task_load(struct task_struct *p)
341341
{
342342
}
343343

kernel/smpboot.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ struct task_struct *idle_thread_get(unsigned int cpu)
3131

3232
if (!tsk)
3333
return ERR_PTR(-ENOMEM);
34-
init_idle(tsk, cpu, true);
34+
init_idle(tsk, cpu);
3535
return tsk;
3636
}
3737

0 commit comments

Comments
 (0)