Skip to content

Commit f07af79

Browse files
authored
Merge pull request #504 from slawblauciak/dai_dma_sync
Sync DAI HDA DMA start with the internal work queue
2 parents b793f0c + 7d6dc96 commit f07af79

1 file changed

Lines changed: 48 additions & 17 deletions

File tree

src/drivers/intel/cavs/hda-dma.c

Lines changed: 48 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ trace_event(TRACE_CLASS_HOST, __e, ##__VA_ARGS__)
8989

9090
#define HDA_STATE_PRELOAD BIT(0)
9191
#define HDA_STATE_BF_WAIT BIT(1)
92+
#define HDA_STATE_INIT BIT(2)
9293

9394
struct hda_chan_data {
9495
struct dma *dma;
@@ -269,11 +270,43 @@ static int hda_dma_copy_ch(struct dma *dma, struct hda_chan_data *chan,
269270
return 0;
270271
}
271272

273+
static void hda_dma_init(struct dma *dma, int channel)
274+
{
275+
struct dma_pdata *p = dma_get_drvdata(dma);
276+
uint32_t flags;
277+
278+
spin_lock_irq(&dma->lock, flags);
279+
280+
trace_host("hda-dma-init %p ch %d", (uintptr_t)dma, channel);
281+
282+
/* enable the channel */
283+
hda_update_bits(dma, channel, DGCS, DGCS_GEN | DGCS_FIFORDY,
284+
DGCS_GEN | DGCS_FIFORDY);
285+
286+
/* full buffer is copied at startup */
287+
p->chan[channel].desc_avail = p->chan[channel].desc_count;
288+
289+
p->chan[channel].state &= ~HDA_STATE_INIT;
290+
291+
pm_runtime_put(PM_RUNTIME_HOST_DMA_L1, 0);
292+
293+
/* start link output transfer now */
294+
if (p->chan[channel].direction == DMA_DIR_MEM_TO_DEV)
295+
hda_dma_inc_link_fp(dma, channel,
296+
p->chan[channel].buffer_bytes);
297+
298+
spin_unlock_irq(&dma->lock, flags);
299+
}
300+
272301
static uint64_t hda_dma_work(void *data, uint64_t delay)
273302
{
274303
struct hda_chan_data *chan = (struct hda_chan_data *)data;
275304

276-
hda_dma_copy_ch(chan->dma, chan, chan->period_bytes);
305+
if (chan->state & HDA_STATE_INIT)
306+
hda_dma_init(chan->dma, chan->index);
307+
else
308+
hda_dma_copy_ch(chan->dma, chan, chan->period_bytes);
309+
277310
/* next time to re-arm */
278311
return HDA_LINK_1MS_US;
279312
}
@@ -287,10 +320,14 @@ static int hda_dma_copy(struct dma *dma, int channel, int bytes, uint32_t flags)
287320
if (flags & DMA_COPY_PRELOAD)
288321
chan->state |= HDA_STATE_PRELOAD;
289322

290-
if (chan->state & HDA_STATE_PRELOAD)
323+
if (chan->state & HDA_STATE_INIT)
324+
return 0;
325+
else if (chan->state & HDA_STATE_PRELOAD)
291326
return hda_dma_preload(dma, chan);
292327
else
293328
return hda_dma_copy_ch(dma, chan, bytes);
329+
330+
return 0;
294331
}
295332

296333
/* acquire the specific DMA channel */
@@ -370,26 +407,20 @@ static int hda_dma_start(struct dma *dma, int channel)
370407
goto out;
371408
}
372409

373-
/* enable the channel */
374-
hda_update_bits(dma, channel, DGCS, DGCS_GEN | DGCS_FIFORDY,
375-
DGCS_GEN | DGCS_FIFORDY);
376-
377-
/* full buffer is copied at startup */
378-
p->chan[channel].desc_avail = p->chan[channel].desc_count;
379-
380-
pm_runtime_put(PM_RUNTIME_HOST_DMA_L1, 0);
410+
p->chan[channel].state |= HDA_STATE_INIT;
381411

382-
/* activate timer if configured in cyclic mode */
412+
/*
413+
* Activate timer if configured in cyclic mode.
414+
* In cyclic mode DMA start is scheduled for later,
415+
* to make sure we stay synchronized with the system work queue.
416+
*/
383417
if (p->chan[channel].dma_ch_work.cb) {
384418
work_schedule_default(&p->chan[channel].dma_ch_work,
385419
HDA_LINK_1MS_US);
420+
} else {
421+
hda_dma_init(dma, channel);
386422
}
387423

388-
/* start link output transfer now */
389-
if (p->chan[channel].direction == DMA_DIR_MEM_TO_DEV)
390-
hda_dma_inc_link_fp(dma, channel,
391-
p->chan[channel].buffer_bytes);
392-
393424
out:
394425
spin_unlock_irq(&dma->lock, flags);
395426
return ret;
@@ -537,7 +568,7 @@ static int hda_dma_set_config(struct dma *dma, int channel,
537568
/* initialize timer */
538569
if (config->cyclic) {
539570
work_init(&p->chan[channel].dma_ch_work, hda_dma_work,
540-
&p->chan[channel], WORK_SYNC);
571+
&p->chan[channel], WORK_ASYNC);
541572
}
542573

543574
/* init channel in HW */

0 commit comments

Comments
 (0)