Skip to content

Commit 7abb55f

Browse files
authored
Merge pull request #324 from tlauda/topic/trace_smp
trace: implement multicore traces
2 parents a81c919 + 2fe3324 commit 7abb55f

3 files changed

Lines changed: 71 additions & 49 deletions

File tree

src/include/sof/trace.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,9 @@
105105
#define TRACE_CLASS_IDC (24 << 24)
106106
#define TRACE_CLASS_CPU (25 << 24)
107107

108+
/* trace core id */
109+
#define TRACE_CORE_ID(x) ((uint64_t)(x) << 32)
110+
108111
/* move to config.h */
109112
#define TRACE 1
110113
#define TRACEV 0

src/lib/dma-trace.c

Lines changed: 32 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,12 @@
3838
#include <platform/dma.h>
3939
#include <platform/platform.h>
4040
#include <sof/lock.h>
41+
#include <sof/cpu.h>
4142
#include <stdint.h>
4243

4344
static struct dma_trace_data *trace_data = NULL;
4445

45-
static int dma_trace_get_avali_data(struct dma_trace_data *d,
46+
static int dma_trace_get_avail_data(struct dma_trace_data *d,
4647
struct dma_trace_buf *buffer,
4748
int avail);
4849

@@ -64,9 +65,10 @@ static uint64_t trace_work(void *data, uint64_t delay)
6465
overflow = 0;
6566
}
6667

67-
/* dma gateway supports wrap mode copy, but GPDMA doesn't*/
68-
/* support, so do it differently based on HW features */
69-
size = dma_trace_get_avali_data(d, buffer, avail);
68+
/* dma gateway supports wrap mode copy, but GPDMA doesn't
69+
* support, so do it differently based on HW features
70+
*/
71+
size = dma_trace_get_avail_data(d, buffer, avail);
7072

7173
/* any data to copy ? */
7274
if (size == 0)
@@ -98,7 +100,7 @@ static uint64_t trace_work(void *data, uint64_t delay)
98100
out:
99101
spin_lock_irq(&d->lock, flags);
100102

101-
/* disregard any old messages and dont resend them if we overflow */
103+
/* disregard any old messages and don't resend them if we overflow */
102104
if (size > 0) {
103105
if (d->overflow)
104106
buffer->avail = DMA_TRACE_LOCAL_SIZE - size;
@@ -117,7 +119,8 @@ static uint64_t trace_work(void *data, uint64_t delay)
117119

118120
int dma_trace_init_early(struct sof *sof)
119121
{
120-
trace_data = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*trace_data));
122+
trace_data = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
123+
sizeof(*trace_data));
121124

122125
list_init(&trace_data->config.elem_list);
123126
spinlock_init(&trace_data->lock);
@@ -145,7 +148,7 @@ int dma_trace_init_complete(struct dma_trace_data *d)
145148
}
146149

147150
int dma_trace_host_buffer(struct dma_trace_data *d, struct dma_sg_elem *elem,
148-
uint32_t host_size)
151+
uint32_t host_size)
149152
{
150153
struct dma_sg_elem *e;
151154

@@ -178,6 +181,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d)
178181
}
179182

180183
bzero(buffer->addr, DMA_TRACE_LOCAL_SIZE);
184+
dcache_writeback_invalidate_region(buffer->addr, DMA_TRACE_LOCAL_SIZE);
181185

182186
/* initialise the DMA buffer */
183187
buffer->size = DMA_TRACE_LOCAL_SIZE;
@@ -244,7 +248,7 @@ static int dma_trace_start(struct dma_trace_data *d)
244248
return err;
245249
}
246250

247-
static int dma_trace_get_avali_data(struct dma_trace_data *d,
251+
static int dma_trace_get_avail_data(struct dma_trace_data *d,
248252
struct dma_trace_buf *buffer,
249253
int avail)
250254
{
@@ -266,19 +270,19 @@ static int dma_trace_get_avali_data(struct dma_trace_data *d,
266270

267271
/* writeback trace data */
268272
if (buffer->r_ptr + avail <= buffer->end_addr) {
269-
dcache_writeback_region((void *)buffer->r_ptr, avail);
273+
dcache_writeback_invalidate_region(buffer->r_ptr, avail);
270274
} else {
271275
size = buffer->end_addr - buffer->r_ptr + 1;
272276

273-
/* warp case, flush tail and head of trace buffer */
274-
dcache_writeback_region((void *)buffer->r_ptr, size);
275-
dcache_writeback_region((void *)buffer->addr, avail - size);
277+
/* wrap case, flush tail and head of trace buffer */
278+
dcache_writeback_invalidate_region(buffer->r_ptr, size);
279+
dcache_writeback_invalidate_region(buffer->addr, avail - size);
276280
}
277281

278282
return avail;
279283
}
280284
#else
281-
static int dma_trace_get_avali_data(struct dma_trace_data *d,
285+
static int dma_trace_get_avail_data(struct dma_trace_data *d,
282286
struct dma_trace_buf *buffer,
283287
int avail)
284288
{
@@ -308,7 +312,7 @@ static int dma_trace_get_avali_data(struct dma_trace_data *d,
308312
size = lsize;
309313

310314
/* writeback trace data */
311-
dcache_writeback_region((void *)buffer->r_ptr, size);
315+
dcache_writeback_invalidate_region(buffer->r_ptr, size);
312316

313317
return size;
314318
}
@@ -341,6 +345,7 @@ int dma_trace_enable(struct dma_trace_data *d)
341345

342346
d->enabled = 1;
343347
work_schedule_default(&d->dmat_work, DMA_TRACE_PERIOD);
348+
344349
return 0;
345350
}
346351

@@ -381,7 +386,7 @@ void dma_trace_flush(void *t)
381386
}
382387

383388
/* writeback trace data */
384-
dcache_writeback_region(t, size);
389+
dcache_writeback_invalidate_region((void *)t, size);
385390
}
386391

387392
static void dtrace_add_event(const char *e, uint32_t length)
@@ -395,14 +400,18 @@ static void dtrace_add_event(const char *e, uint32_t length)
395400
if (margin > length) {
396401
/* no wrap */
397402
memcpy(buffer->w_ptr, e, length);
403+
dcache_writeback_invalidate_region(buffer->w_ptr, length);
398404
buffer->w_ptr += length;
399405
} else {
400406

401407
/* data is bigger than remaining margin so we wrap */
402408
memcpy(buffer->w_ptr, e, margin);
409+
dcache_writeback_invalidate_region(buffer->w_ptr, margin);
403410
buffer->w_ptr = buffer->addr;
404411

405412
memcpy(buffer->w_ptr, e + margin, length - margin);
413+
dcache_writeback_invalidate_region(buffer->w_ptr,
414+
length - margin);
406415
buffer->w_ptr += length - margin;
407416
}
408417

@@ -424,9 +433,11 @@ void dtrace_event(const char *e, uint32_t length)
424433
spin_lock_irq(&trace_data->lock, flags);
425434
dtrace_add_event(e, length);
426435

427-
/* if DMA trace copying is working */
428-
/* don't check if local buffer is half full */
429-
if (trace_data->copy_in_progress) {
436+
/* if DMA trace copying is working or slave core
437+
* don't check if local buffer is half full
438+
*/
439+
if (trace_data->copy_in_progress ||
440+
cpu_get_id() != PLATFORM_MASTER_CORE_ID) {
430441
spin_unlock_irq(&trace_data->lock, flags);
431442
return;
432443
}
@@ -438,11 +449,11 @@ void dtrace_event(const char *e, uint32_t length)
438449
buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) {
439450
work_reschedule_default(&trace_data->dmat_work,
440451
DMA_TRACE_RESCHEDULE_TIME);
441-
/* reschedule should not be intrrupted */
442-
/* just like we are in copy progress */
452+
/* reschedule should not be interrupted
453+
* just like we are in copy progress
454+
*/
443455
trace_data->copy_in_progress = 1;
444456
}
445-
446457
}
447458

448459
void dtrace_event_atomic(const char *e, uint32_t length)

src/lib/trace.c

Lines changed: 36 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <platform/timer.h>
3636
#include <sof/lock.h>
3737
#include <sof/dma-trace.h>
38+
#include <sof/cpu.h>
3839
#include <stdint.h>
3940

4041
struct trace {
@@ -43,30 +44,30 @@ struct trace {
4344
spinlock_t lock;
4445
};
4546

46-
static struct trace trace;
47+
static struct trace *trace;
4748

4849
/* send trace events only to the local trace buffer */
4950
void _trace_event(uint32_t event)
5051
{
5152
uint64_t dt[2];
5253

53-
if (!trace.enable)
54+
if (!trace->enable)
5455
return;
5556

5657
dt[0] = platform_timer_get(platform_timer);
57-
dt[1] = event;
58+
dt[1] = event | TRACE_CORE_ID(cpu_get_id());
5859
dtrace_event((const char *)dt, sizeof(uint64_t) * 2);
5960
}
6061

6162
void _trace_event_atomic(uint32_t event)
6263
{
6364
uint64_t dt[2];
6465

65-
if (!trace.enable)
66+
if (!trace->enable)
6667
return;
6768

6869
dt[0] = platform_timer_get(platform_timer);
69-
dt[1] = event;
70+
dt[1] = event | TRACE_CORE_ID(cpu_get_id());
7071
dtrace_event_atomic((const char *)dt, sizeof(uint64_t) * 2);
7172
}
7273

@@ -79,32 +80,32 @@ void _trace_event_mbox(uint32_t event)
7980

8081
volatile uint64_t *t;
8182

82-
if (!trace.enable)
83+
if (!trace->enable)
8384
return;
8485

8586
time = platform_timer_get(platform_timer);
8687

8788
dt[0] = time;
88-
dt[1] = event;
89+
dt[1] = event | TRACE_CORE_ID(cpu_get_id());
8990
dtrace_event((const char *)dt, sizeof(uint64_t) * 2);
9091

9192
/* send event by mail box too. */
92-
spin_lock_irq(&trace.lock, flags);
93+
spin_lock_irq(&trace->lock, flags);
9394

9495
/* write timestamp and event to trace buffer */
95-
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace.pos);
96-
trace.pos += (sizeof(uint64_t) << 1);
96+
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos);
97+
trace->pos += (sizeof(uint64_t) << 1);
9798

98-
if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2)
99-
trace.pos = 0;
99+
if (trace->pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2)
100+
trace->pos = 0;
100101

101-
spin_unlock_irq(&trace.lock, flags);
102+
spin_unlock_irq(&trace->lock, flags);
102103

103104
t[0] = time;
104-
t[1] = event;
105+
t[1] = event | TRACE_CORE_ID(cpu_get_id());
105106

106107
/* writeback trace data */
107-
dcache_writeback_region((void *)t, sizeof(uint64_t) * 2);
108+
dcache_writeback_invalidate_region((void *)t, sizeof(uint64_t) * 2);
108109
}
109110

110111
void _trace_event_mbox_atomic(uint32_t event)
@@ -113,49 +114,56 @@ void _trace_event_mbox_atomic(uint32_t event)
113114
uint64_t dt[2];
114115
uint64_t time;
115116

116-
if (!trace.enable)
117+
if (!trace->enable)
117118
return;
118119

119120
time = platform_timer_get(platform_timer);
120121

121122
dt[0] = time;
122-
dt[1] = event;
123+
dt[1] = event | TRACE_CORE_ID(cpu_get_id());
123124
dtrace_event_atomic((const char *)dt, sizeof(uint64_t) * 2);
124125

125126
/* write timestamp and event to trace buffer */
126-
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace.pos);
127-
trace.pos += (sizeof(uint64_t) << 1);
127+
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos);
128+
trace->pos += (sizeof(uint64_t) << 1);
128129

129-
if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2)
130-
trace.pos = 0;
130+
if (trace->pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2)
131+
trace->pos = 0;
131132

132133
t[0] = time;
133-
t[1] = event;
134+
t[1] = event | TRACE_CORE_ID(cpu_get_id());
134135

135136
/* writeback trace data */
136-
dcache_writeback_region((void *)t, sizeof(uint64_t) * 2);
137+
dcache_writeback_invalidate_region((void *)t, sizeof(uint64_t) * 2);
137138
}
138139

139140
void trace_flush(void)
140141
{
141142
volatile uint64_t *t;
142143

143144
/* get mailbox position */
144-
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace.pos);
145+
t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos);
145146

146147
/* flush dma trace messages */
147148
dma_trace_flush((void *)t);
148149
}
149150

150151
void trace_off(void)
151152
{
152-
trace.enable = 0;
153+
trace->enable = 0;
153154
}
154155

155156
void trace_init(struct sof *sof)
156157
{
157158
dma_trace_init_early(sof);
158-
trace.enable = 1;
159-
trace.pos = 0;
160-
spinlock_init(&trace.lock);
159+
160+
trace = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
161+
sizeof(*trace));
162+
trace->enable = 1;
163+
trace->pos = 0;
164+
spinlock_init(&trace->lock);
165+
166+
bzero((void *)MAILBOX_TRACE_BASE, MAILBOX_TRACE_SIZE);
167+
dcache_writeback_invalidate_region((void *)MAILBOX_TRACE_BASE,
168+
MAILBOX_TRACE_SIZE);
161169
}

0 commit comments

Comments
 (0)