Skip to content

Commit daab049

Browse files
tlaudaxiulipan
authored andcommitted
dai: remove wait on stop
Removes wait on stop, as it's not needed. Also this change fixes problems with sending IPC as wait function lowers interrupt level, while IPCs are now executed on irq task level. Signed-off-by: Tomasz Lauda <tomasz.lauda@linux.intel.com>
1 parent 081bc3a commit daab049

2 files changed

Lines changed: 65 additions & 84 deletions

File tree

src/audio/dai.c

Lines changed: 62 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -62,24 +62,66 @@ struct dai_data {
6262
struct dai *dai;
6363
struct dma *dma;
6464
uint32_t period_bytes;
65-
completion_t complete;
6665
int xrun; /* true if we are doing xrun recovery */
6766
int pointer_init; /* true if buffer pointer was initialized */
6867

69-
uint32_t last_bytes; /* the last bytes(<period size) it copies. */
7068
uint32_t dai_pos_blks; /* position in bytes (nearest block) */
7169

7270
volatile uint64_t *dai_pos; /* host can read back this value without IPC */
7371
uint64_t wallclock; /* wall clock at stream start */
7472
};
7573

74+
static void dai_buffer_process(struct comp_dev *dev)
75+
{
76+
struct dai_data *dd = comp_get_drvdata(dev);
77+
struct comp_buffer *dma_buffer;
78+
void *buffer_ptr;
79+
80+
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) {
81+
dma_buffer = list_first_item(&dev->bsource_list,
82+
struct comp_buffer, sink_list);
83+
84+
/* recalc available buffer space */
85+
comp_update_buffer_consume(dma_buffer, dd->period_bytes);
86+
87+
buffer_ptr = dma_buffer->r_ptr;
88+
89+
/* make sure there is available bytes for next period */
90+
if (dma_buffer->avail < dd->period_bytes) {
91+
trace_dai_error("xru");
92+
comp_underrun(dev, dma_buffer, dd->period_bytes, 0);
93+
}
94+
} else {
95+
dma_buffer = list_first_item(&dev->bsink_list,
96+
struct comp_buffer, source_list);
97+
98+
/* recalc available buffer space */
99+
comp_update_buffer_produce(dma_buffer, dd->period_bytes);
100+
101+
buffer_ptr = dma_buffer->w_ptr;
102+
103+
/* make sure there is free bytes for next period */
104+
if (dma_buffer->free < dd->period_bytes) {
105+
trace_dai_error("xro");
106+
comp_overrun(dev, dma_buffer, dd->period_bytes, 0);
107+
}
108+
}
109+
110+
/* update host position (in bytes offset) for drivers */
111+
dev->position += dd->period_bytes;
112+
if (dd->dai_pos) {
113+
dd->dai_pos_blks += dd->period_bytes;
114+
*dd->dai_pos = dd->dai_pos_blks +
115+
buffer_ptr - dma_buffer->addr;
116+
}
117+
}
118+
76119
/* this is called by DMA driver every time descriptor has completed */
77120
static void dai_dma_cb(void *data, uint32_t type, struct dma_sg_elem *next)
78121
{
79122
struct comp_dev *dev = (struct comp_dev *)data;
80123
struct dai_data *dd = comp_get_drvdata(dev);
81124
struct comp_buffer *dma_buffer;
82-
uint32_t copied_size;
83125

84126
tracev_dai("irq");
85127

@@ -91,9 +133,6 @@ static void dai_dma_cb(void *data, uint32_t type, struct dma_sg_elem *next)
91133

92134
/* tell DMA not to reload */
93135
next->size = DMA_RELOAD_END;
94-
95-
/* inform waiters */
96-
wait_completed(&dd->complete);
97136
}
98137

99138
/* is our pipeline handling an XRUN ? */
@@ -111,50 +150,7 @@ static void dai_dma_cb(void *data, uint32_t type, struct dma_sg_elem *next)
111150
return;
112151
}
113152

114-
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) {
115-
dma_buffer = list_first_item(&dev->bsource_list,
116-
struct comp_buffer, sink_list);
117-
118-
copied_size = dd->last_bytes ? dd->last_bytes : dd->period_bytes;
119-
120-
/* recalc available buffer space */
121-
comp_update_buffer_consume(dma_buffer, copied_size);
122-
123-
/* update host position(in bytes offset) for drivers */
124-
dev->position += copied_size;
125-
if (dd->dai_pos) {
126-
dd->dai_pos_blks += copied_size;
127-
*dd->dai_pos = dd->dai_pos_blks +
128-
dma_buffer->r_ptr - dma_buffer->addr;
129-
}
130-
131-
/* make sure there is availble bytes for next period */
132-
if (dma_buffer->avail < dd->period_bytes) {
133-
trace_dai_error("xru");
134-
comp_underrun(dev, dma_buffer, copied_size, 0);
135-
}
136-
137-
} else {
138-
dma_buffer = list_first_item(&dev->bsink_list,
139-
struct comp_buffer, source_list);
140-
141-
/* recalc available buffer space */
142-
comp_update_buffer_produce(dma_buffer, dd->period_bytes);
143-
144-
/* update positions */
145-
dev->position += dd->period_bytes;
146-
if (dd->dai_pos) {
147-
dd->dai_pos_blks += dd->period_bytes;
148-
*dd->dai_pos = dd->dai_pos_blks +
149-
dma_buffer->w_ptr - dma_buffer->addr;
150-
}
151-
152-
/* make sure there is free bytes for next period */
153-
if (dma_buffer->free < dd->period_bytes) {
154-
trace_dai_error("xro");
155-
comp_overrun(dev, dma_buffer, dd->period_bytes, 0);
156-
}
157-
}
153+
dai_buffer_process(dev);
158154

159155
/* notify pipeline that DAI needs its buffer processed */
160156
if (dev->state == COMP_STATE_ACTIVE)
@@ -206,7 +202,6 @@ static struct comp_dev *dai_new(struct sof_ipc_comp *comp)
206202
list_init(&dd->config.elem_list);
207203
dd->dai_pos = NULL;
208204
dd->dai_pos_blks = 0;
209-
dd->last_bytes = 0;
210205
dd->xrun = 0;
211206
dd->pointer_init = 0;
212207

@@ -489,7 +484,6 @@ static int dai_reset(struct comp_dev *dev)
489484
if (dd->dai_pos)
490485
*dd->dai_pos = 0;
491486
dd->dai_pos = NULL;
492-
dd->last_bytes = 0;
493487
dd->wallclock = 0;
494488
dev->position = 0;
495489
dd->xrun = 0;
@@ -536,13 +530,12 @@ static void dai_pointer_init(struct comp_dev *dev)
536530
static int dai_comp_trigger(struct comp_dev *dev, int cmd)
537531
{
538532
struct dai_data *dd = comp_get_drvdata(dev);
533+
struct comp_buffer *dma_buffer;
539534
int ret;
540535

541536
trace_dai("trg");
542537
tracev_value(cmd);
543538

544-
wait_init(&dd->complete);
545-
546539
ret = comp_set_state(dev, cmd);
547540
if (ret < 0)
548541
return ret;
@@ -572,14 +565,22 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
572565
* this is only supported at capture mode.
573566
*/
574567
if (dev->params.direction == SOF_IPC_STREAM_CAPTURE) {
575-
struct comp_buffer *dma_buffer =
576-
list_first_item(&dev->bsink_list,
577-
struct comp_buffer, source_list);
568+
dma_buffer = list_first_item(&dev->bsink_list,
569+
struct comp_buffer,
570+
source_list);
578571
buffer_zero(dma_buffer);
579572
}
580573

581574
/* only start the DAI if we are not XRUN handling */
582575
if (dd->xrun == 0) {
576+
/* set valid buffer pointer */
577+
dai_buffer_process(dev);
578+
579+
/* recover valid start position */
580+
ret = dma_release(dd->dma, dd->chan);
581+
if (ret < 0)
582+
return ret;
583+
583584
/* start the DAI */
584585
ret = dma_start(dd->dma, dd->chan);
585586
if (ret < 0)
@@ -595,26 +596,13 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
595596
case COMP_TRIGGER_XRUN:
596597
trace_dai("txr");
597598
dd->xrun = 1;
598-
/* stop the DAI unconditionally */
599-
dai_trigger(dd->dai, COMP_TRIGGER_STOP, dev->params.direction);
600-
ret = dma_stop(dd->dma, dd->chan);
601-
break;
599+
600+
/* fallthrough */
602601
case COMP_TRIGGER_PAUSE:
603602
case COMP_TRIGGER_STOP:
604603
trace_dai("tsp");
605-
wait_init(&dd->complete);
606-
607-
/* wait for DMA to complete */
608-
dd->complete.timeout = dev->pipeline->ipc_pipe.deadline;
609-
ret = wait_for_completion_timeout(&dd->complete);
610-
if (ret < 0) {
611-
trace_dai_error("ed0");
612-
trace_error_value(cmd);
613-
/* forced stop of DMA+DAI to avoid refcount issues */
614-
dai_trigger(dd->dai, COMP_TRIGGER_STOP,
615-
dev->params.direction);
616-
ret = dma_stop(dd->dma, dd->chan);
617-
}
604+
ret = dma_stop(dd->dma, dd->chan);
605+
dai_trigger(dd->dai, COMP_TRIGGER_STOP, dev->params.direction);
618606
break;
619607
default:
620608
break;

src/drivers/dw-dma.c

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -440,22 +440,15 @@ static int dw_dma_start(struct dma *dma, int channel)
440440
static int dw_dma_release(struct dma *dma, int channel)
441441
{
442442
struct dma_pdata *p = dma_get_drvdata(dma);
443-
struct dw_lli2 *lli;
444443
uint32_t flags;
445444

446445
spin_lock_irq(&dma->lock, flags);
447446

448447
trace_dma("Dpr");
449448

450-
/* get current lli */
451-
#if DW_USE_HW_LLI
452-
lli = (struct dw_lli2 *)dw_read(dma, DW_LLP(channel));
453-
#else
454-
lli = p->chan[channel].lli_current;
455-
#endif
456-
/* get next lli and recover the lli to head for restart */
457-
lli = (struct dw_lli2 *)lli->llp;
458-
p->chan[channel].lli = lli;
449+
/* get next lli for proper release */
450+
p->chan[channel].lli_current =
451+
(struct dw_lli2 *)p->chan[channel].lli_current->llp;
459452

460453
spin_unlock_irq(&dma->lock, flags);
461454
return 0;

0 commit comments

Comments
 (0)