forked from thesofproject/sof
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathzephyr_idc.c
More file actions
212 lines (174 loc) · 5.79 KB
/
zephyr_idc.c
File metadata and controls
212 lines (174 loc) · 5.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
// SPDX-License-Identifier: BSD-3-Clause
//
// Copyright(c) 2021 Intel Corporation. All rights reserved.
/*
* Use P4WQ to implement IDC for SOF. We create a P4 work queue per core and
* when the primary core sends a message to a secondary core, a work item from
* a static per-core array is queued accordingly. The secondary core is then
* woken up, it executes irc_handler(), which eventually calls idc_cmd() just
* like in the native SOF case. One work item per secondary core is enough
* because IDC on SOF is always synchronous, the primary core always waits for
* secondary cores to complete operation, so no races can occur.
*
* Design:
* - use K_P4WQ_ARRAY_DEFINE() to statically create one queue with one thread
* per DSP core.
* - k_p4wq_submit()
* runs on primary CPU
* send tasks to other CPUs.
*/
#include <zephyr/kernel.h>
#include <zephyr/cache.h>
#include <zephyr/sys/p4wq.h>
#include <rtos/idc.h>
#include <sof/init.h>
#include <sof/ipc/common.h>
#include <sof/schedule/edf_schedule.h>
#include <rtos/alloc.h>
#include <rtos/spinlock.h>
#include <ipc/topology.h>
#include <sof/trace/trace.h>
#include <sof/lib/uuid.h>
#include <sof/debug/telemetry/performance_monitor.h>
LOG_MODULE_REGISTER(zephyr_idc, CONFIG_SOF_LOG_LEVEL);
SOF_DEFINE_REG_UUID(zephyr_idc);
DECLARE_TR_CTX(zephyr_idc_tr, SOF_UUID(zephyr_idc_uuid), LOG_LEVEL_INFO);
/*
* Inter-CPU communication is only used in
* - IPC
* - Notifier
* - Power management (IDC_MSG_POWER_UP, IDC_MSG_POWER_DOWN)
*/
#if !CONFIG_MULTICORE || !defined(CONFIG_SMP)
void idc_init_thread(void)
{
}
int idc_send_msg(struct idc_msg *msg, uint32_t mode)
{
return -ENOTSUP;
}
#else
K_P4WQ_ARRAY_DEFINE(q_zephyr_idc, CONFIG_CORE_COUNT, SOF_STACK_SIZE,
K_P4WQ_USER_CPU_MASK);
struct zephyr_idc_msg {
struct k_p4wq_work work;
struct idc_msg msg;
};
static void idc_handler(struct k_p4wq_work *work)
{
struct zephyr_idc_msg *zmsg = container_of(work, struct zephyr_idc_msg, work);
struct idc *idc = *idc_get();
struct ipc *ipc = ipc_get();
struct idc_msg *msg = &zmsg->msg;
int payload = -1;
k_spinlock_key_t key;
__ASSERT_NO_MSG(!is_cached(msg));
if (msg->size == sizeof(int)) {
const int idc_handler_memcpy_err __unused =
memcpy_s(&payload, sizeof(payload), msg->payload, msg->size);
assert(!idc_handler_memcpy_err);
}
idc->received_msg.core = msg->core;
idc->received_msg.header = msg->header;
idc->received_msg.extension = msg->extension;
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
/* Increment performance counters */
io_perf_monitor_update_data(idc->io_perf_in_msg_count, 1);
#endif
switch (msg->header) {
case IDC_MSG_POWER_UP:
/* Run the core initialisation? */
secondary_core_init(sof_get());
break;
default:
idc_cmd(&idc->received_msg);
break;
case IDC_MSG_IPC:
idc_cmd(&idc->received_msg);
/* Signal the host */
key = k_spin_lock(&ipc->lock);
ipc->task_mask &= ~IPC_TASK_SECONDARY_CORE;
ipc_complete_cmd(ipc);
k_spin_unlock(&ipc->lock, key);
}
}
/*
* Used for *target* CPUs, since the initiator (usually core 0) can launch
* several IDC messages at once. Also we need 2 work items per target core,
* because the p4wq thread might just have returned from the work handler, but
* hasn't released the work buffer yet (hasn't set thread pointer to NULL).
* Then submitting the same work item again can result in an assertion failure.
*/
static struct zephyr_idc_msg idc_work[CONFIG_CORE_COUNT * 2];
/* Protect the above array */
static K_MUTEX_DEFINE(idc_mutex);
int idc_send_msg(struct idc_msg *msg, uint32_t mode)
{
struct idc *idc = *idc_get();
struct idc_payload *payload = idc_payload_get(idc, msg->core);
unsigned int target_cpu = msg->core;
struct zephyr_idc_msg *zmsg = idc_work + target_cpu * 2;
struct idc_msg *msg_cp = &zmsg->msg;
struct k_p4wq_work *work = &zmsg->work;
int ret;
int idc_send_memcpy_err __unused;
k_mutex_lock(&idc_mutex, K_FOREVER);
if (unlikely(work->thread)) {
/* See comment above the idc_work[] array. */
zmsg++;
work = &zmsg->work;
msg_cp = &zmsg->msg;
}
idc_send_memcpy_err = memcpy_s(msg_cp, sizeof(*msg_cp), msg, sizeof(*msg));
assert(!idc_send_memcpy_err);
/* Same priority as the IPC thread which is an EDF task and under Zephyr */
work->priority = CONFIG_EDF_THREAD_PRIORITY;
work->deadline = 0;
work->handler = idc_handler;
work->sync = mode == IDC_BLOCKING;
if (!cpu_is_core_enabled(target_cpu)) {
tr_err(&zephyr_idc_tr, "Core %u is down, cannot sent IDC message", target_cpu);
return -EACCES;
}
if (msg->payload) {
idc_send_memcpy_err = memcpy_s(payload->data, sizeof(payload->data),
msg->payload, msg->size);
assert(!idc_send_memcpy_err);
/* Sending a message to another core, write back local payload cache */
sys_cache_data_flush_range(payload->data, MIN(sizeof(payload->data), msg->size));
}
/* Temporarily store sender core ID */
msg_cp->core = cpu_get_id();
__ASSERT_NO_MSG(!is_cached(msg_cp));
k_p4wq_submit(q_zephyr_idc + target_cpu, work);
k_mutex_unlock(&idc_mutex);
#ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS
/* Increment performance counters */
io_perf_monitor_update_data(idc->io_perf_out_msg_count, 1);
#endif
switch (mode) {
case IDC_BLOCKING:
ret = k_p4wq_wait(work, K_USEC(CONFIG_IDC_TIMEOUT_US));
if (!ret)
/* message was sent and executed successfully, get status code */
ret = idc_msg_status_get(msg->core);
break;
case IDC_POWER_UP:
case IDC_NON_BLOCKING:
default:
ret = 0;
}
return ret;
}
void idc_init_thread(void)
{
int cpu = cpu_get_id();
k_p4wq_enable_static_thread(q_zephyr_idc + cpu,
_p4threads_q_zephyr_idc + cpu, BIT(cpu));
/*
* Assign SOF system heap to the IDC thread. Otherwise by default it
* uses the Zephyr heap for DP stack allocation
*/
k_thread_heap_assign(_p4threads_q_zephyr_idc + cpu, sof_sys_heap_get());
}
#endif /* CONFIG_MULTICORE */