Skip to content

Commit ad02735

Browse files
committed
WIP: ipc: implement user-space IPC handling
Add a dedicated user-space thread for handling IPC commands that operate on audio pipelines when CONFIG_SOF_USERSPACE_LL is enabled. The kernel IPC handler forwards CREATE_PIPELINE messages to the user thread via k_event signaling and collects the result through a k_sem handshake. The IPC task_mask IPC_TASK_IN_THREAD bit prevents host completion until the user thread finishes. Key changes: - add ipc_user struct and user-space IPC thread with event loop - allocate IPC context statically in a dedicated memory partition - replace sof_get()->ipc with ipc_get() returning static context - forward CREATE_PIPELINE to user thread via ipc_user_forward_cmd() - allocate pipeline and IPC container from user-space heap This is a in-progress implementation that only handles one IPC message type and is thus not a full implementation. This does allow to proceed to test IPC user thread creation and the basic mechanism to handle messages. Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
1 parent e84bb6b commit ad02735

5 files changed

Lines changed: 296 additions & 22 deletions

File tree

src/include/sof/ipc/common.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,20 @@ extern struct tr_ctx ipc_tr;
5454
#define IPC_TASK_SECONDARY_CORE BIT(2)
5555
#define IPC_TASK_POWERDOWN BIT(3)
5656

57+
struct ipc_user {
58+
struct k_thread *thread;
59+
struct k_sem *sem;
60+
struct k_event *event;
61+
/** @brief Copy of IPC4 message primary word forwarded to user thread */
62+
uint32_t ipc_msg_pri;
63+
/** @brief Copy of IPC4 message extension word forwarded to user thread */
64+
uint32_t ipc_msg_ext;
65+
/** @brief Result code from user thread processing */
66+
int result;
67+
struct ipc *ipc;
68+
struct k_thread *audio_thread;
69+
};
70+
5771
struct ipc {
5872
struct k_spinlock lock; /* locking mechanism */
5973
void *comp_data;
@@ -75,6 +89,10 @@ struct ipc {
7589
struct task ipc_task;
7690
#endif
7791

92+
#ifdef CONFIG_SOF_USERSPACE_LL
93+
struct ipc_user *ipc_user_pdata;
94+
#endif
95+
7896
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
7997
/* io performance measurement */
8098
struct io_perf_data_item *io_perf_in_msg_count;
@@ -96,6 +114,12 @@ struct ipc {
96114

97115
extern struct task_ops ipc_task_ops;
98116

117+
#ifdef CONFIG_SOF_USERSPACE_LL
118+
119+
struct ipc *ipc_get(void);
120+
121+
#else
122+
99123
/**
100124
* \brief Get the IPC global context.
101125
* @return The global IPC context.
@@ -105,6 +129,8 @@ static inline struct ipc *ipc_get(void)
105129
return sof_get()->ipc;
106130
}
107131

132+
#endif /* CONFIG_SOF_USERSPACE_LL */
133+
108134
/**
109135
* \brief Initialise global IPC context.
110136
* @param[in,out] sof Global SOF context.
@@ -294,4 +320,14 @@ void ipc_complete_cmd(struct ipc *ipc);
294320
/* GDB stub: should enter GDB after completing the IPC processing */
295321
extern bool ipc_enter_gdb;
296322

323+
#ifdef CONFIG_SOF_USERSPACE_LL
324+
struct ipc4_message_request;
325+
/**
326+
* @brief Forward an IPC4 command to the user-space thread.
327+
* @param ipc4 Pointer to the IPC4 message request
328+
* @return Result from user thread processing
329+
*/
330+
int ipc_user_forward_cmd(struct ipc4_message_request *ipc4);
331+
#endif
332+
297333
#endif /* __SOF_DRIVERS_IPC_H__ */

src/ipc/ipc-common.c

Lines changed: 229 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
#include <sof/lib/memory.h>
2525
#include <sof/list.h>
2626
#include <sof/platform.h>
27+
#include <sof/schedule/ll_schedule.h>
28+
#include <sof/schedule/ll_schedule_domain.h>
2729
#include <rtos/sof.h>
2830
#include <rtos/spinlock.h>
2931
#include <ipc/dai.h>
@@ -35,6 +37,16 @@
3537
#include <stddef.h>
3638
#include <stdint.h>
3739

40+
#ifdef __ZEPHYR__
41+
#include <zephyr/kernel.h>
42+
#endif
43+
44+
#ifdef CONFIG_SOF_USERSPACE_LL
45+
#include <rtos/userspace_helper.h>
46+
#include <sof/schedule/ll_schedule_domain.h>
47+
#include <ipc4/pipeline.h>
48+
#endif
49+
3850
#include <sof/debug/telemetry/performance_monitor.h>
3951

4052
LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL);
@@ -43,6 +55,18 @@ SOF_DEFINE_REG_UUID(ipc);
4355

4456
DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO);
4557

58+
#ifdef CONFIG_SOF_USERSPACE_LL
59+
K_APPMEM_PARTITION_DEFINE(ipc_context_part);
60+
61+
K_APP_BMEM(ipc_context_part) static struct ipc ipc_context;
62+
63+
struct ipc *ipc_get(void)
64+
{
65+
return &ipc_context;
66+
}
67+
EXPORT_SYMBOL(ipc_get);
68+
#endif
69+
4670
int ipc_process_on_core(uint32_t core, bool blocking)
4771
{
4872
struct ipc *ipc = ipc_get();
@@ -256,7 +280,11 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority)
256280
list_item_append(&msg->list, &ipc->msg_list);
257281
}
258282

283+
#if 0 /*def CONFIG_SOF_USERSPACE_LL */
284+
LOG_WRN("Skipping IPC worker schedule. TODO to fix\n");
285+
#else
259286
schedule_ipc_worker();
287+
#endif
260288

261289
k_spin_unlock(&ipc->lock, key);
262290
}
@@ -288,29 +316,207 @@ void ipc_schedule_process(struct ipc *ipc)
288316
#endif
289317
}
290318

319+
#ifdef CONFIG_SOF_USERSPACE_LL
320+
/* User-space thread for pipeline_two_components test */
321+
#define IPC_USER_STACKSIZE 8192
322+
323+
#define IPC_USER_EVENT_CMD BIT(0)
324+
#define IPC_USER_EVENT_STOP BIT(1)
325+
326+
static struct k_thread ipc_user_thread;
327+
static K_THREAD_STACK_DEFINE(ipc_user_stack, IPC_USER_STACKSIZE);
328+
329+
/**
330+
* @brief Forward an IPC4 command to the user-space thread.
331+
*
332+
* Called from kernel context (IPC EDF task) to forward the IPC4
333+
* message to the user-space thread for processing. Sets
334+
* IPC_TASK_IN_THREAD in task_mask so the host is not signaled
335+
* until the user thread completes. Blocks until the user thread
336+
* finishes processing and returns the result.
337+
*
338+
* @param ipc4 Pointer to the IPC4 message request
339+
* @return Result from user thread processing
340+
*/
341+
int ipc_user_forward_cmd(struct ipc4_message_request *ipc4)
342+
{
343+
struct ipc *ipc = ipc_get();
344+
struct ipc_user *pdata = ipc->ipc_user_pdata;
345+
k_spinlock_key_t key;
346+
int ret;
347+
348+
LOG_DBG("IPC: forward cmd %08x", ipc4->primary.dat);
349+
350+
/* Copy message words — original buffer may be reused */
351+
pdata->ipc_msg_pri = ipc4->primary.dat;
352+
pdata->ipc_msg_ext = ipc4->extension.dat;
353+
pdata->ipc = ipc;
354+
355+
/* Prevent host completion until user thread finishes */
356+
key = k_spin_lock(&ipc->lock);
357+
ipc->task_mask |= IPC_TASK_IN_THREAD;
358+
k_spin_unlock(&ipc->lock, key);
359+
360+
/* Wake the user thread */
361+
k_event_set(pdata->event, IPC_USER_EVENT_CMD);
362+
363+
/* Wait for user thread to complete */
364+
ret = k_sem_take(pdata->sem, K_MSEC(10));
365+
if (ret) {
366+
LOG_ERR("IPC user: sem error %d\n", ret);
367+
return ret;
368+
}
369+
370+
/* Clear the task mask bit and check for completion */
371+
key = k_spin_lock(&ipc->lock);
372+
ipc->task_mask &= ~IPC_TASK_IN_THREAD;
373+
ipc_complete_cmd(ipc);
374+
k_spin_unlock(&ipc->lock, key);
375+
376+
return pdata->result;
377+
}
378+
379+
/**
380+
* User-space thread entry point for pipeline_two_components test.
381+
* p1 points to the ppl_test_ctx shared with the kernel launcher.
382+
*/
383+
static void ipc_user_thread_fn(void *p1, void *p2, void *p3)
384+
{
385+
struct ipc_user *ipc_user = p1;
386+
387+
ARG_UNUSED(p2);
388+
ARG_UNUSED(p3);
389+
390+
__ASSERT(k_is_user_context(), "expected user context");
391+
392+
/* Signal startup complete — unblocks init waiting on semaphore */
393+
k_sem_give(ipc_user->sem);
394+
LOG_INF("IPC user-space thread started");
395+
396+
for (;;) {
397+
uint32_t mask = k_event_wait_safe(ipc_user->event,
398+
IPC_USER_EVENT_CMD | IPC_USER_EVENT_STOP,
399+
false, K_MSEC(5000));
400+
401+
LOG_DBG("IPC user wake, mask %u", mask);
402+
403+
if (mask & IPC_USER_EVENT_CMD) {
404+
struct ipc4_pipeline_create pipe_msg;
405+
406+
/* Reconstruct the IPC4 message from copied words */
407+
pipe_msg.primary.dat = ipc_user->ipc_msg_pri;
408+
pipe_msg.extension.dat = ipc_user->ipc_msg_ext;
409+
410+
/* Execute pipeline creation in user context */
411+
ipc_user->result = ipc_pipeline_new(ipc_user->ipc, (ipc_pipe_new *)&pipe_msg);
412+
413+
/* Signal completion — kernel side will finish IPC */
414+
k_sem_give(ipc_user->sem);
415+
}
416+
417+
if (mask & IPC_USER_EVENT_STOP)
418+
break;
419+
}
420+
}
421+
422+
__cold int ipc_user_init(void)
423+
{
424+
struct ipc *ipc = ipc_get();
425+
struct ipc_user *ipc_user = sof_heap_alloc(sof_sys_user_heap_get(), SOF_MEM_FLAG_USER,
426+
sizeof(*ipc_user), 0);
427+
int ret;
428+
429+
ipc_user->sem = k_object_alloc(K_OBJ_SEM);
430+
if (!ipc_user->sem) {
431+
LOG_ERR("user IPC sem alloc failed");
432+
k_panic();
433+
}
434+
435+
ret = k_mem_domain_add_partition(zephyr_ll_mem_domain(), &ipc_context_part);
436+
437+
k_sem_init(ipc_user->sem, 0, 1);
438+
439+
/* Allocate kernel objects for the user-space thread */
440+
ipc_user->event = k_object_alloc(K_OBJ_EVENT);
441+
if (!ipc_user->event) {
442+
LOG_ERR("user IPC event alloc failed");
443+
k_panic();
444+
}
445+
k_event_init(ipc_user->event);
446+
447+
k_thread_create(&ipc_user_thread, ipc_user_stack, IPC_USER_STACKSIZE,
448+
ipc_user_thread_fn, ipc_user, NULL, NULL,
449+
-1, K_USER, K_FOREVER);
450+
451+
k_thread_access_grant(&ipc_user_thread, ipc_user->sem, ipc_user->event);
452+
user_grant_dai_access_all(&ipc_user_thread);
453+
user_grant_dma_access_all(&ipc_user_thread);
454+
user_access_to_mailbox(zephyr_ll_mem_domain(), &ipc_user_thread);
455+
zephyr_ll_grant_access(&ipc_user_thread);
456+
k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ipc_user_thread);
457+
458+
k_thread_name_set(&ipc_user_thread, __func__);
459+
460+
/* Store references in ipc struct so kernel handler can forward commands */
461+
ipc->ipc_user_pdata = ipc_user;
462+
463+
k_thread_start(&ipc_user_thread);
464+
465+
struct task *task = zephyr_ll_task_alloc();
466+
schedule_task_init_ll(task, SOF_UUID(ipc_uuid), SOF_SCHEDULE_LL_TIMER,
467+
0, NULL, NULL, cpu_get_id(), 0);
468+
ipc_user->audio_thread = scheduler_init_context(task);
469+
470+
/* Wait for user thread startup — consumes the initial k_sem_give from thread */
471+
k_sem_take(ipc->ipc_user_pdata->sem, K_FOREVER);
472+
473+
return 0;
474+
}
475+
#else
476+
static int ipc_user_init(void)
477+
{
478+
return 0;
479+
}
480+
#endif /* CONFIG_SOF_USERSPACE_LL */
481+
291482
__cold int ipc_init(struct sof *sof)
292483
{
484+
struct k_heap *heap;
485+
struct ipc *ipc;
486+
293487
assert_can_be_cold();
294488

295489
tr_dbg(&ipc_tr, "entry");
296490

491+
#ifdef CONFIG_SOF_USERSPACE_LL
492+
heap = zephyr_ll_user_heap();
493+
494+
ipc = ipc_get();
495+
memset(ipc, 0, sizeof(*ipc));
496+
#else
497+
heap = NULL;
498+
297499
/* init ipc data */
298-
sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc));
299-
if (!sof->ipc) {
500+
ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*ipc));
501+
if (!ipc) {
300502
tr_err(&ipc_tr, "Unable to allocate IPC data");
301503
return -ENOMEM;
302504
}
303-
sof->ipc->comp_data = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT,
304-
SOF_IPC_MSG_MAX_SIZE);
305-
if (!sof->ipc->comp_data) {
505+
sof->ipc = ipc;
506+
#endif
507+
508+
ipc->comp_data = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT,
509+
SOF_IPC_MSG_MAX_SIZE, 0);
510+
if (!ipc->comp_data) {
306511
tr_err(&ipc_tr, "Unable to allocate IPC component data");
307-
rfree(sof->ipc);
512+
sof_heap_free(heap, ipc);
308513
return -ENOMEM;
309514
}
515+
memset(ipc->comp_data, 0, SOF_IPC_MSG_MAX_SIZE);
310516

311-
k_spinlock_init(&sof->ipc->lock);
312-
list_init(&sof->ipc->msg_list);
313-
list_init(&sof->ipc->comp_list);
517+
k_spinlock_init(&ipc->lock);
518+
list_init(&ipc->msg_list);
519+
list_init(&ipc->comp_list);
314520

315521
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
316522
struct io_perf_data_item init_data = {IO_PERF_IPC_ID,
@@ -319,20 +525,17 @@ __cold int ipc_init(struct sof *sof)
319525
IO_PERF_POWERED_UP_ENABLED,
320526
IO_PERF_D0IX_POWER_MODE,
321527
0, 0, 0 };
322-
io_perf_monitor_init_data(&sof->ipc->io_perf_in_msg_count, &init_data);
528+
io_perf_monitor_init_data(&ipc->io_perf_in_msg_count, &init_data);
323529
init_data.direction = IO_PERF_OUTPUT_DIRECTION;
324-
io_perf_monitor_init_data(&sof->ipc->io_perf_out_msg_count, &init_data);
530+
io_perf_monitor_init_data(&ipc->io_perf_out_msg_count, &init_data);
325531
#endif
326532

327-
#if CONFIG_SOF_BOOT_TEST_STANDALONE
328-
LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC.");
329-
return 0;
330-
#endif
331533

332534
#ifdef __ZEPHYR__
333-
struct k_thread *thread = &sof->ipc->ipc_send_wq.thread;
535+
struct k_thread *thread = &ipc->ipc_send_wq.thread;
334536

335-
k_work_queue_start(&sof->ipc->ipc_send_wq, ipc_send_wq_stack,
537+
k_work_queue_init(&ipc->ipc_send_wq);
538+
k_work_queue_start(&ipc->ipc_send_wq, ipc_send_wq_stack,
336539
K_THREAD_STACK_SIZEOF(ipc_send_wq_stack), 1, NULL);
337540

338541
k_thread_suspend(thread);
@@ -342,10 +545,17 @@ __cold int ipc_init(struct sof *sof)
342545

343546
k_thread_resume(thread);
344547

345-
k_work_init_delayable(&sof->ipc->z_delayed_work, ipc_work_handler);
548+
k_work_init_delayable(&ipc->z_delayed_work, ipc_work_handler);
549+
#endif
550+
551+
ipc_user_init();
552+
553+
#if CONFIG_SOF_BOOT_TEST_STANDALONE
554+
LOG_INF("SOF_BOOT_TEST_STANDALONE, skipping platform IPC init.");
555+
return 0;
346556
#endif
347557

348-
return platform_ipc_init(sof->ipc);
558+
return platform_ipc_init(ipc);
349559
}
350560

351561
/* Locking: call with ipc->lock held and interrupts disabled */

0 commit comments

Comments
 (0)