Skip to content

Commit 2d0f777

Browse files
committed
WIP: ipc: implement user-space IPC handling
Start a separate user thread to handle application IPC messages when SOF is built with CONFIG_SOF_USERSPACE_LL. This is a in-progress implementation that only handles one IPC message type and is thus not a full implementation. This does allow to proceed to test IPC user thread creation and the basic mechanism to handle messages. Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
1 parent b7d0c0c commit 2d0f777

5 files changed

Lines changed: 285 additions & 22 deletions

File tree

src/include/sof/ipc/common.h

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,19 @@ extern struct tr_ctx ipc_tr;
5353
#define IPC_TASK_SECONDARY_CORE BIT(2)
5454
#define IPC_TASK_POWERDOWN BIT(3)
5555

56+
struct ipc_user {
57+
struct k_thread *thread;
58+
struct k_sem *sem;
59+
struct k_event *event;
60+
/** @brief Copy of IPC4 message primary word forwarded to user thread */
61+
uint32_t ipc_msg_pri;
62+
/** @brief Copy of IPC4 message extension word forwarded to user thread */
63+
uint32_t ipc_msg_ext;
64+
/** @brief Result code from user thread processing */
65+
int result;
66+
struct ipc *ipc;
67+
};
68+
5669
struct ipc {
5770
struct k_spinlock lock; /* locking mechanism */
5871
void *comp_data;
@@ -74,6 +87,10 @@ struct ipc {
7487
struct task ipc_task;
7588
#endif
7689

90+
#ifdef CONFIG_SOF_USERSPACE_LL
91+
struct ipc_user *ipc_user_pdata;
92+
#endif
93+
7794
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
7895
/* io performance measurement */
7996
struct io_perf_data_item *io_perf_in_msg_count;
@@ -95,6 +112,12 @@ struct ipc {
95112

96113
extern struct task_ops ipc_task_ops;
97114

115+
#ifdef CONFIG_SOF_USERSPACE_LL
116+
117+
struct ipc *ipc_get(void);
118+
119+
#else
120+
98121
/**
99122
* \brief Get the IPC global context.
100123
* @return The global IPC context.
@@ -104,6 +127,8 @@ static inline struct ipc *ipc_get(void)
104127
return sof_get()->ipc;
105128
}
106129

130+
#endif /* CONFIG_SOF_USERSPACE_LL */
131+
107132
/**
108133
* \brief Initialise global IPC context.
109134
* @param[in,out] sof Global SOF context.
@@ -250,4 +275,14 @@ void ipc_complete_cmd(struct ipc *ipc);
250275
/* GDB stub: should enter GDB after completing the IPC processing */
251276
extern bool ipc_enter_gdb;
252277

278+
#ifdef CONFIG_SOF_USERSPACE_LL
279+
struct ipc4_message_request;
280+
/**
281+
* @brief Forward an IPC4 command to the user-space thread.
282+
* @param ipc4 Pointer to the IPC4 message request
283+
* @return Result from user thread processing
284+
*/
285+
int ipc_user_forward_cmd(struct ipc4_message_request *ipc4);
286+
#endif
287+
253288
#endif /* __SOF_DRIVERS_IPC_H__ */

src/ipc/ipc-common.c

Lines changed: 227 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,16 @@
3535
#include <stddef.h>
3636
#include <stdint.h>
3737

38+
#ifdef __ZEPHYR__
39+
#include <zephyr/kernel.h>
40+
#endif
41+
42+
#ifdef CONFIG_SOF_USERSPACE_LL
43+
#include <rtos/userspace_helper.h>
44+
#include <sof/schedule/ll_schedule_domain.h>
45+
#include <ipc4/pipeline.h>
46+
#endif
47+
3848
#include <sof/debug/telemetry/performance_monitor.h>
3949

4050
LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL);
@@ -43,6 +53,18 @@ SOF_DEFINE_REG_UUID(ipc);
4353

4454
DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO);
4555

56+
#ifdef CONFIG_SOF_USERSPACE_LL
57+
K_APPMEM_PARTITION_DEFINE(ipc_context_part);
58+
59+
K_APP_BMEM(ipc_context_part) static struct ipc ipc_context;
60+
61+
struct ipc *ipc_get(void)
62+
{
63+
return &ipc_context;
64+
}
65+
EXPORT_SYMBOL(ipc_get);
66+
#endif
67+
4668
int ipc_process_on_core(uint32_t core, bool blocking)
4769
{
4870
struct ipc *ipc = ipc_get();
@@ -256,7 +278,11 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority)
256278
list_item_append(&msg->list, &ipc->msg_list);
257279
}
258280

281+
#ifdef CONFIG_SOF_USERSPACE_LL
282+
LOG_WRN("Skipping IPC worker schedule. TODO to fix\n");
283+
#else
259284
schedule_ipc_worker();
285+
#endif
260286

261287
k_spin_unlock(&ipc->lock, key);
262288
}
@@ -288,29 +314,207 @@ void ipc_schedule_process(struct ipc *ipc)
288314
#endif
289315
}
290316

317+
#ifdef CONFIG_SOF_USERSPACE_LL
318+
/* User-space thread for pipeline_two_components test */
319+
#define IPC_USER_STACKSIZE 8192
320+
321+
#define IPC_USER_EVENT_CMD BIT(0)
322+
#define IPC_USER_EVENT_STOP BIT(1)
323+
324+
static struct k_thread ipc_user_thread;
325+
static K_THREAD_STACK_DEFINE(ipc_user_stack, IPC_USER_STACKSIZE);
326+
327+
/**
328+
* @brief Forward an IPC4 command to the user-space thread.
329+
*
330+
* Called from kernel context (IPC EDF task) to forward the IPC4
331+
* message to the user-space thread for processing. Sets
332+
* IPC_TASK_IN_THREAD in task_mask so the host is not signaled
333+
* until the user thread completes. Blocks until the user thread
334+
* finishes processing and returns the result.
335+
*
336+
* @param ipc4 Pointer to the IPC4 message request
337+
* @return Result from user thread processing
338+
*/
339+
int ipc_user_forward_cmd(struct ipc4_message_request *ipc4)
340+
{
341+
struct ipc *ipc = ipc_get();
342+
struct ipc_user *pdata = ipc->ipc_user_pdata;
343+
k_spinlock_key_t key;
344+
int ret;
345+
346+
LOG_DBG("IPC: forward cmd %08x", ipc4->primary.dat);
347+
348+
/* Copy message words — original buffer may be reused */
349+
pdata->ipc_msg_pri = ipc4->primary.dat;
350+
pdata->ipc_msg_ext = ipc4->extension.dat;
351+
pdata->ipc = ipc;
352+
353+
/* Prevent host completion until user thread finishes */
354+
key = k_spin_lock(&ipc->lock);
355+
ipc->task_mask |= IPC_TASK_IN_THREAD;
356+
k_spin_unlock(&ipc->lock, key);
357+
358+
/* Wake the user thread */
359+
k_event_set(pdata->event, IPC_USER_EVENT_CMD);
360+
361+
/* Wait for user thread to complete */
362+
ret = k_sem_take(pdata->sem, K_MSEC(10));
363+
if (ret) {
364+
LOG_ERR("IPC user: sem error %d\n", ret);
365+
return ret;
366+
}
367+
368+
/* Clear the task mask bit and check for completion */
369+
key = k_spin_lock(&ipc->lock);
370+
ipc->task_mask &= ~IPC_TASK_IN_THREAD;
371+
ipc_complete_cmd(ipc);
372+
k_spin_unlock(&ipc->lock, key);
373+
374+
return pdata->result;
375+
}
376+
377+
/**
378+
* User-space thread entry point for pipeline_two_components test.
379+
* p1 points to the ppl_test_ctx shared with the kernel launcher.
380+
*/
381+
static void ipc_user_thread_fn(void *p1, void *p2, void *p3)
382+
{
383+
struct ipc_user *ipc_user = p1;
384+
385+
ARG_UNUSED(p2);
386+
ARG_UNUSED(p3);
387+
388+
__ASSERT(k_is_user_context(), "expected user context");
389+
390+
/* Signal startup complete — unblocks init waiting on semaphore */
391+
k_sem_give(ipc_user->sem);
392+
LOG_INF("IPC user-space thread started");
393+
394+
for (;;) {
395+
uint32_t mask = k_event_wait_safe(ipc_user->event,
396+
IPC_USER_EVENT_CMD | IPC_USER_EVENT_STOP,
397+
false, K_MSEC(5000));
398+
399+
LOG_DBG("IPC user wake, mask %u", mask);
400+
401+
if (mask & IPC_USER_EVENT_CMD) {
402+
struct ipc4_pipeline_create pipe_msg;
403+
404+
/* Reconstruct the IPC4 message from copied words */
405+
pipe_msg.primary.dat = ipc_user->ipc_msg_pri;
406+
pipe_msg.extension.dat = ipc_user->ipc_msg_ext;
407+
408+
/* Execute pipeline creation in user context */
409+
ipc_user->result = ipc_pipeline_new(ipc_user->ipc, (ipc_pipe_new *)&pipe_msg);
410+
411+
/* Signal completion — kernel side will finish IPC */
412+
k_sem_give(ipc_user->sem);
413+
}
414+
415+
if (mask & IPC_USER_EVENT_STOP)
416+
break;
417+
}
418+
}
419+
420+
__cold int ipc_user_init(void)
421+
{
422+
struct ipc *ipc = ipc_get();
423+
struct ipc_user *ipc_user = sof_heap_alloc(zephyr_ll_user_heap(), SOF_MEM_FLAG_USER,
424+
sizeof(*ipc_user), 0);
425+
int ret;
426+
427+
ipc_user->sem = k_object_alloc(K_OBJ_SEM);
428+
if (!ipc_user->sem) {
429+
LOG_ERR("user IPC sem alloc failed");
430+
k_panic();
431+
}
432+
433+
ret = k_mem_domain_add_partition(zephyr_ll_mem_domain(), &ipc_context_part);
434+
435+
k_sem_init(ipc_user->sem, 0, 1);
436+
437+
/* Allocate kernel objects for the user-space thread */
438+
ipc_user->event = k_object_alloc(K_OBJ_EVENT);
439+
if (!ipc_user->event) {
440+
LOG_ERR("user IPC event alloc failed");
441+
k_panic();
442+
}
443+
k_event_init(ipc_user->event);
444+
445+
k_thread_create(&ipc_user_thread, ipc_user_stack, IPC_USER_STACKSIZE,
446+
ipc_user_thread_fn, ipc_user, NULL, NULL,
447+
-1, K_USER, K_FOREVER);
448+
449+
k_thread_access_grant(&ipc_user_thread, ipc_user->sem, ipc_user->event);
450+
user_grant_dai_access_all(&ipc_user_thread);
451+
user_grant_dma_access_all(&ipc_user_thread);
452+
user_access_to_mailbox(zephyr_ll_mem_domain(), &ipc_user_thread);
453+
zephyr_ll_grant_access(&ipc_user_thread);
454+
k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ipc_user_thread);
455+
456+
k_thread_name_set(&ipc_user_thread, __func__);
457+
458+
/* Store references in ipc struct so kernel handler can forward commands */
459+
ipc->ipc_user_pdata = ipc_user;
460+
461+
k_thread_start(&ipc_user_thread);
462+
463+
/* Wait for user thread startup — consumes the initial k_sem_give from thread */
464+
k_sem_take(ipc->ipc_user_pdata->sem, K_FOREVER);
465+
466+
return 0;
467+
}
468+
#else
469+
static int ipc_user_init(void)
470+
{
471+
return 0;
472+
}
473+
#endif /* CONFIG_SOF_USERSPACE_LL */
474+
291475
__cold int ipc_init(struct sof *sof)
292476
{
477+
struct k_heap *heap;
478+
struct ipc *ipc;
479+
293480
assert_can_be_cold();
294481

295482
tr_dbg(&ipc_tr, "entry");
296483

484+
#ifdef CONFIG_SOF_USERSPACE_LL
485+
heap = zephyr_ll_user_heap();
486+
487+
ipc = ipc_get();
488+
memset(ipc, 0, sizeof(*ipc));
489+
#else
490+
heap = NULL;
491+
297492
/* init ipc data */
298-
sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc));
299-
if (!sof->ipc) {
493+
sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*ipc));
494+
if (!ipc) {
300495
tr_err(&ipc_tr, "Unable to allocate IPC data");
301496
return -ENOMEM;
302497
}
303-
sof->ipc->comp_data = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT,
304-
SOF_IPC_MSG_MAX_SIZE);
305-
if (!sof->ipc->comp_data) {
498+
ipc = sof->ipc;
499+
#endif
500+
501+
/* works? yes */
502+
//return 0;
503+
504+
printk("ipc %p\n", ipc);
505+
506+
ipc->comp_data = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT,
507+
SOF_IPC_MSG_MAX_SIZE, 0);
508+
if (!ipc->comp_data) {
306509
tr_err(&ipc_tr, "Unable to allocate IPC component data");
307-
rfree(sof->ipc);
510+
sof_heap_free(heap, ipc);
308511
return -ENOMEM;
309512
}
513+
memset(ipc->comp_data, 0, SOF_IPC_MSG_MAX_SIZE);
310514

311-
k_spinlock_init(&sof->ipc->lock);
312-
list_init(&sof->ipc->msg_list);
313-
list_init(&sof->ipc->comp_list);
515+
k_spinlock_init(&ipc->lock);
516+
list_init(&ipc->msg_list);
517+
list_init(&ipc->comp_list);
314518

315519
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
316520
struct io_perf_data_item init_data = {IO_PERF_IPC_ID,
@@ -319,20 +523,17 @@ __cold int ipc_init(struct sof *sof)
319523
IO_PERF_POWERED_UP_ENABLED,
320524
IO_PERF_D0IX_POWER_MODE,
321525
0, 0, 0 };
322-
io_perf_monitor_init_data(&sof->ipc->io_perf_in_msg_count, &init_data);
526+
io_perf_monitor_init_data(&ipc->io_perf_in_msg_count, &init_data);
323527
init_data.direction = IO_PERF_OUTPUT_DIRECTION;
324-
io_perf_monitor_init_data(&sof->ipc->io_perf_out_msg_count, &init_data);
528+
io_perf_monitor_init_data(&ipc->io_perf_out_msg_count, &init_data);
325529
#endif
326530

327-
#if CONFIG_SOF_BOOT_TEST_STANDALONE
328-
LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC.");
329-
return 0;
330-
#endif
331531

332532
#ifdef __ZEPHYR__
333-
struct k_thread *thread = &sof->ipc->ipc_send_wq.thread;
533+
struct k_thread *thread = &ipc->ipc_send_wq.thread;
334534

335-
k_work_queue_start(&sof->ipc->ipc_send_wq, ipc_send_wq_stack,
535+
k_work_queue_init(&ipc->ipc_send_wq);
536+
k_work_queue_start(&ipc->ipc_send_wq, ipc_send_wq_stack,
336537
K_THREAD_STACK_SIZEOF(ipc_send_wq_stack), 1, NULL);
337538

338539
k_thread_suspend(thread);
@@ -342,10 +543,17 @@ __cold int ipc_init(struct sof *sof)
342543

343544
k_thread_resume(thread);
344545

345-
k_work_init_delayable(&sof->ipc->z_delayed_work, ipc_work_handler);
546+
k_work_init_delayable(&ipc->z_delayed_work, ipc_work_handler);
547+
#endif
548+
549+
ipc_user_init();
550+
551+
#if CONFIG_SOF_BOOT_TEST_STANDALONE
552+
LOG_INF("SOF_BOOT_TEST_STANDALONE, skipping platform IPC init.");
553+
return 0;
346554
#endif
347555

348-
return platform_ipc_init(sof->ipc);
556+
return platform_ipc_init(ipc);
349557
}
350558

351559
/* Locking: call with ipc->lock held and interrupts disabled */

0 commit comments

Comments
 (0)