Skip to content

Commit c0ae4c6

Browse files
committed
trace: fix dma trace with repeated data bug on APL&CNL
It is caused by dma trace buffer overflow in FW. There are too many trace events that the dma trace buffer can’t hold them, And this makes buffer in chaos. It is assumed that the trace msg is transferred to host when it is full, and the occupied memory can be reused. But it is not always true. The dma trace buffer is used as a ring buffer. In warp case, both tail and head of the buffer are used, only the tail of buffer is transferred to host and released. It is because GPDMA on BYT only support successive memory mode. So it is need to be done with two dma calls. Because trace is for debugging and to occupy the cpu usage as little as possible, only the first dma transfer is done and the second one is scheduled next time. So only part of dma trace buffer is released. On APL & CNL, dma gateway is used for dma trace and it supports wrap mode transfer. So the algorithm can be refined for dma gateway Now add a function to do the job for different HW feature. For GPDMA, no change is made currently, but later it would be refined to support link list mode. For platform with dma gateway, transferring trace msg with one dma call. Signed-off-by: Rander Wang <rander.wang@linux.intel.com>
1 parent fa48c42 commit c0ae4c6

1 file changed

Lines changed: 76 additions & 40 deletions

File tree

src/lib/dma-trace.c

Lines changed: 76 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@
4242

4343
static struct dma_trace_data *trace_data = NULL;
4444

45+
static int dma_trace_get_avali_data(struct dma_trace_data *d,
46+
struct dma_trace_buf *buffer,
47+
int avail);
48+
4549
static uint64_t trace_work(void *data, uint64_t delay)
4650
{
4751
struct dma_trace_data *d = (struct dma_trace_data *)data;
@@ -50,25 +54,7 @@ static uint64_t trace_work(void *data, uint64_t delay)
5054
unsigned long flags;
5155
uint32_t avail = buffer->avail;
5256
int32_t size;
53-
uint32_t hsize;
54-
uint32_t lsize;
55-
56-
if (d->host_offset == d->host_size)
57-
d->host_offset = 0;
58-
59-
#if defined CONFIG_DMA_GW
60-
/*
61-
* there isn't DMA completion callback in GW DMA copying.
62-
* so we send previous position always before the next copying
63-
* for guaranteeing previous DMA copying is finished.
64-
* This function will be called once every 500ms at least even
65-
* if no new trace is filled.
66-
*/
67-
if (d->old_host_offset != d->host_offset) {
68-
ipc_dma_trace_send_position();
69-
d->old_host_offset = d->host_offset;
70-
}
71-
#endif
57+
uint32_t overflow;
7258

7359
/* any data to copy ? */
7460
if (avail == 0)
@@ -77,33 +63,19 @@ static uint64_t trace_work(void *data, uint64_t delay)
7763
/* DMA trace copying is working */
7864
d->copy_in_progress = 1;
7965

80-
/* make sure we dont write more than buffer */
66+
/* make sure we don't write more than buffer */
8167
if (avail > DMA_TRACE_LOCAL_SIZE) {
82-
d->overflow = avail - DMA_TRACE_LOCAL_SIZE;
68+
overflow = avail - DMA_TRACE_LOCAL_SIZE;
8369
avail = DMA_TRACE_LOCAL_SIZE;
8470
} else {
85-
d->overflow = 0;
71+
overflow = 0;
8672
}
8773

88-
/* copy to host in sections if we wrap */
89-
lsize = hsize = avail;
90-
91-
/* host buffer wrap ? */
92-
if (d->host_offset + avail > d->host_size)
93-
hsize = d->host_size - d->host_offset;
94-
95-
/* local buffer wrap ? */
96-
if (buffer->r_ptr + avail > buffer->end_addr)
97-
lsize = buffer->end_addr - buffer->r_ptr;
98-
99-
/* get smallest size */
100-
if (hsize < lsize)
101-
size = hsize;
102-
else
103-
size = lsize;
74+
/* dma gateway supports wrap mode copy, but GPDMA doesn't*/
75+
/* support, so do it differently based on HW features */
76+
size = dma_trace_get_avali_data(d, buffer, avail);
10477

105-
/* writeback trace data */
106-
dcache_writeback_region((void*)buffer->r_ptr, size);
78+
d->overflow = overflow;
10779

10880
/* copy this section to host */
10981
size = dma_copy_to_host_nowait(&d->dc, config, d->host_offset,
@@ -115,6 +87,8 @@ static uint64_t trace_work(void *data, uint64_t delay)
11587

11688
/* update host pointer and check for wrap */
11789
d->host_offset += size;
90+
if (d->host_offset == d->host_size)
91+
d->host_offset = 0;
11892

11993
/* update local pointer and check for wrap */
12094
buffer->r_ptr += size;
@@ -267,6 +241,68 @@ static int dma_trace_start(struct dma_trace_data *d)
267241
return err;
268242
}
269243

244+
static int dma_trace_get_avali_data(struct dma_trace_data *d,
245+
struct dma_trace_buf *buffer,
246+
int avail)
247+
{
248+
int size;
249+
250+
/* there isn't DMA completion callback in GW DMA copying.
251+
* so we send previous position always before the next copying
252+
* for guaranteeing previous DMA copying is finished.
253+
* This function will be called once every 500ms at least even
254+
* if no new trace is filled.
255+
*/
256+
if (d->old_host_offset != d->host_offset) {
257+
ipc_dma_trace_send_position();
258+
d->old_host_offset = d->host_offset;
259+
}
260+
261+
/* writeback trace data */
262+
if (buffer->r_ptr + avail <= buffer->end_addr) {
263+
dcache_writeback_region((void *)buffer->r_ptr, avail);
264+
} else {
265+
size = buffer->end_addr - buffer->r_ptr + 1;
266+
267+
/* warp case, flush tail and head of trace buffer */
268+
dcache_writeback_region((void *)buffer->r_ptr, size);
269+
dcache_writeback_region((void *)buffer->addr, avail - size);
270+
}
271+
272+
return avail;
273+
}
274+
#else
275+
static int dma_trace_get_avali_data(struct dma_trace_data *d,
276+
struct dma_trace_buf *buffer,
277+
int avail)
278+
{
279+
uint32_t hsize;
280+
uint32_t lsize;
281+
int32_t size;
282+
283+
/* copy to host in sections if we wrap */
284+
lsize = avail;
285+
hsize = avail;
286+
287+
/* host buffer wrap ? */
288+
if (d->host_offset + avail > d->host_size)
289+
hsize = d->host_size - d->host_offset;
290+
291+
/* local buffer wrap ? */
292+
if (buffer->r_ptr + avail > buffer->end_addr)
293+
lsize = buffer->end_addr - buffer->r_ptr;
294+
295+
/* get smallest size */
296+
if (hsize < lsize)
297+
size = hsize;
298+
else
299+
size = lsize;
300+
301+
/* writeback trace data */
302+
dcache_writeback_region((void *)buffer->r_ptr, size);
303+
304+
return size;
305+
}
270306
#endif
271307

272308
int dma_trace_enable(struct dma_trace_data *d)

0 commit comments

Comments
 (0)