Skip to content

Commit b99292c

Browse files
committed
schedule: zephyr_ll: implement thread_init/free domain ops
Implement the new domain_thread_init/free() ops for the Zephyr LL scheduler implementation. Move all privileged operations to these methods. After this change, the domain_register() and domain_unregister() are now safe to call from user-space context. Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
1 parent 9b8f418 commit b99292c

2 files changed

Lines changed: 122 additions & 47 deletions

File tree

src/schedule/zephyr_domain.c

Lines changed: 114 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,8 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3)
126126
}
127127
#endif
128128

129-
dt->handler(dt->arg);
129+
if (dt->handler)
130+
dt->handler(dt->arg);
130131

131132
#ifdef CONFIG_SCHEDULE_LL_STATS_LOG
132133
cycles1 = k_cycle_get_32();
@@ -287,67 +288,65 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain,
287288

288289
#else /* CONFIG_SOF_USERSPACE_LL */
289290

290-
/* User-space implementation for register/unregister */
291-
292-
static int zephyr_domain_register_user(struct ll_schedule_domain *domain,
293-
struct task *task,
294-
void (*handler)(void *arg), void *arg)
291+
/*
292+
* Privileged thread initialization for userspace LL scheduling.
293+
* Creates the scheduling thread, sets up timer, grants access to kernel
294+
* objects. Must be called from kernel context before any user-space
295+
* domain_register() calls.
296+
*/
297+
static int zephyr_domain_thread_init(struct ll_schedule_domain *domain,
298+
struct task *task)
295299
{
296300
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
297301
struct zephyr_domain_thread *dt;
298302
char thread_name[] = "ll_thread0";
299303
k_tid_t thread;
300304
int core;
301305

302-
tr_dbg(&ll_tr, "entry");
306+
tr_dbg(&ll_tr, "thread_init entry");
303307

304308
if (task->core < 0 || task->core >= CONFIG_CORE_COUNT)
305309
return -EINVAL;
306310

307-
dt = zephyr_domain->domain_thread + task->core;
311+
core = task->core;
312+
dt = zephyr_domain->domain_thread + core;
308313

309-
/* domain work only needs registered once on each core */
310-
if (dt->handler)
314+
/* thread only needs to be created once per core */
315+
if (dt->ll_thread)
311316
return 0;
312317

313-
/* safety check executed in kernel mode */
314-
__ASSERT_NO_MSG(cpu_get_id() == core);
315-
316-
dt->handler = handler;
317-
dt->arg = arg;
318+
dt->handler = NULL;
318319

319320
/* 10 is rather random, we better not accumulate 10 missed timer interrupts */
320321
k_sem_init(dt->sem, 0, 10);
321322

322323
thread_name[sizeof(thread_name) - 2] = '0' + core;
323324

325+
/* Allocate thread structure dynamically */
326+
dt->ll_thread = k_object_alloc(K_OBJ_THREAD);
324327
if (!dt->ll_thread) {
325-
/* Allocate thread structure dynamically */
326-
dt->ll_thread = k_object_alloc(K_OBJ_THREAD);
327-
if (!dt->ll_thread) {
328-
tr_err(&ll_tr, "Failed to allocate thread object for core %d", core);
329-
dt->handler = NULL;
330-
dt->arg = NULL;
331-
return -ENOMEM;
332-
}
328+
tr_err(&ll_tr, "Failed to allocate thread object for core %d", core);
329+
dt->handler = NULL;
330+
dt->arg = NULL;
331+
return -ENOMEM;
332+
}
333333

334-
thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE,
335-
zephyr_domain_thread_fn, zephyr_domain,
336-
INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY,
337-
K_USER, K_FOREVER);
334+
thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE,
335+
zephyr_domain_thread_fn, zephyr_domain,
336+
INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY,
337+
K_USER, K_FOREVER);
338338

339-
k_thread_cpu_mask_clear(thread);
340-
k_thread_cpu_mask_enable(thread, core);
341-
k_thread_name_set(thread, thread_name);
339+
k_thread_cpu_mask_clear(thread);
340+
k_thread_cpu_mask_enable(thread, core);
341+
k_thread_name_set(thread, thread_name);
342342

343-
k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread);
344-
k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer);
345-
user_grant_dai_access_all(thread);
346-
user_grant_dma_access_all(thread);
347-
tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core);
343+
k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread);
344+
k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer);
345+
user_grant_dai_access_all(thread);
346+
user_grant_dma_access_all(thread);
347+
tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core);
348348

349-
k_thread_start(thread);
350-
}
349+
k_thread_start(thread);
351350

352351
k_mutex_lock(domain->lock, K_FOREVER);
353352
if (!k_timer_user_data_get(zephyr_domain->timer)) {
@@ -370,6 +369,43 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain,
370369
return 0;
371370
}
372371

372+
/*
373+
* User-space register: bookkeeping only. The privileged thread setup has
374+
* already been done by domain_thread_init() called from kernel context.
375+
*/
376+
static int zephyr_domain_register_user(struct ll_schedule_domain *domain,
377+
struct task *task,
378+
void (*handler)(void *arg), void *arg)
379+
{
380+
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
381+
struct zephyr_domain_thread *dt;
382+
int core;
383+
384+
tr_dbg(&ll_tr, "register_user entry");
385+
386+
if (task->core < 0 || task->core >= CONFIG_CORE_COUNT)
387+
return -EINVAL;
388+
389+
core = task->core;
390+
dt = zephyr_domain->domain_thread + core;
391+
392+
if (!dt->ll_thread) {
393+
tr_err(&ll_tr, "domain_thread_init() not called for core %d", core);
394+
return -EINVAL;
395+
}
396+
397+
__ASSERT_NO_MSG(!dt->handler || dt->handler == handler);
398+
if (dt->handler)
399+
return 0;
400+
401+
dt->handler = handler;
402+
dt->arg = arg;
403+
404+
tr_info(&ll_tr, "task registered on core %d", core);
405+
406+
return 0;
407+
}
408+
373409
static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain,
374410
struct task *task, uint32_t num_tasks)
375411
{
@@ -384,14 +420,6 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain,
384420

385421
k_mutex_lock(domain->lock, K_FOREVER);
386422

387-
if (!atomic_read(&domain->total_num_tasks)) {
388-
/* Disable the watchdog */
389-
watchdog_disable(core);
390-
391-
k_timer_stop(zephyr_domain->timer);
392-
k_timer_user_data_set(zephyr_domain->timer, NULL);
393-
}
394-
395423
zephyr_domain->domain_thread[core].handler = NULL;
396424

397425
k_mutex_unlock(domain->lock);
@@ -410,6 +438,45 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain,
410438
return 0;
411439
}
412440

441+
/*
442+
* Free resources acquired by zephyr_domain_thread_init().
443+
* Stops the timer, aborts the scheduling thread and frees the thread object.
444+
* Must be called from kernel context.
445+
*/
446+
static void zephyr_domain_thread_free(struct ll_schedule_domain *domain,
447+
uint32_t num_tasks)
448+
{
449+
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
450+
int core = cpu_get_id();
451+
struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;
452+
453+
tr_dbg(&ll_tr, "thread_free entry, core %d, num_tasks %u", core, num_tasks);
454+
455+
/* Still tasks on other cores, only clean up this core's thread */
456+
k_mutex_lock(domain->lock, K_FOREVER);
457+
458+
if (!num_tasks && !atomic_read(&domain->total_num_tasks)) {
459+
/* Last task globally: stop the timer and watchdog */
460+
watchdog_disable(core);
461+
462+
k_timer_stop(zephyr_domain->timer);
463+
k_timer_user_data_set(zephyr_domain->timer, NULL);
464+
}
465+
466+
dt->handler = NULL;
467+
dt->arg = NULL;
468+
469+
k_mutex_unlock(domain->lock);
470+
471+
if (dt->ll_thread) {
472+
k_thread_abort(dt->ll_thread);
473+
k_object_free(dt->ll_thread);
474+
dt->ll_thread = NULL;
475+
}
476+
477+
tr_info(&ll_tr, "thread_free done, core %d", core);
478+
}
479+
413480
struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain)
414481
{
415482
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
@@ -452,6 +519,8 @@ APP_TASK_DATA static const struct ll_schedule_domain_ops zephyr_domain_ops = {
452519
#ifdef CONFIG_SOF_USERSPACE_LL
453520
.domain_register = zephyr_domain_register_user,
454521
.domain_unregister = zephyr_domain_unregister_user,
522+
.domain_thread_init = zephyr_domain_thread_init,
523+
.domain_thread_free = zephyr_domain_thread_free,
455524
#else
456525
.domain_register = zephyr_domain_register,
457526
.domain_unregister = zephyr_domain_unregister,

src/schedule/zephyr_ll.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,12 @@ struct k_thread *zephyr_ll_init_context(void *data, struct task *task)
514514
struct zephyr_ll *sch = data;
515515
int ret;
516516

517-
ret = domain_register(sch->ll_domain, task, &schedule_ll_callback, sch);
517+
/*
518+
* Use domain_thread_init() for privileged setup (thread creation,
519+
* timer, access grants). domain_register() is now bookkeeping only
520+
* and will be called later from user context when scheduling tasks.
521+
*/
522+
ret = domain_thread_init(sch->ll_domain, task);
518523
if (ret < 0) {
519524
tr_err(&ll_tr, "cannot init_context %d", ret);
520525
return NULL;
@@ -536,7 +541,8 @@ void zephyr_ll_free_context(void *data)
536541
{
537542
struct zephyr_ll *sch = data;
538543

539-
(void *)sch;
544+
tr_info(&ll_tr, "free the domain thread");
545+
domain_thread_free(sch->ll_domain, sch->n_tasks);
540546
}
541547
#endif
542548

0 commit comments

Comments
 (0)