8686
8787#define HDA_LINK_1MS_US 1000
8888
89+ #define HDA_STATE_PRELOAD BIT(0)
90+ #define HDA_STATE_BF_WAIT BIT(1)
91+
8992struct hda_chan_data {
9093 struct dma * dma ;
9194 uint32_t index ;
9295 uint32_t stream_id ;
93- uint32_t status ;
96+ uint32_t status ; /* common status */
97+ uint32_t state ; /* hda specific additional state */
9498 uint32_t desc_count ;
9599 uint32_t desc_avail ;
96100 uint32_t direction ;
@@ -186,42 +190,77 @@ static inline uint32_t hda_dma_get_data_size(struct dma *dma, uint32_t chan)
186190 return ds ;
187191}
188192
193+ static int hda_dma_preload (struct dma * dma , struct hda_chan_data * chan )
194+ {
195+ struct dma_sg_elem next = {
196+ .src = DMA_RELOAD_LLI ,
197+ .dest = DMA_RELOAD_LLI ,
198+ .size = DMA_RELOAD_LLI
199+ };
200+ int i ;
201+ int period_cnt ;
202+
203+ /* waiting for buffer full after start
204+ * first try is unblocking, then blocking
205+ */
206+ while (!(host_dma_reg_read (dma , chan -> index , DGCS ) & DGCS_BF ) &&
207+ (chan -> state & HDA_STATE_BF_WAIT ))
208+ ;
209+
210+ if (host_dma_reg_read (dma , chan -> index , DGCS ) & DGCS_BF ) {
211+ chan -> state &= ~(HDA_STATE_PRELOAD | HDA_STATE_BF_WAIT );
212+ if (chan -> cb ) {
213+ /* loop over each period */
214+ period_cnt = chan -> buffer_bytes /
215+ chan -> period_bytes ;
216+ for (i = 0 ; i < period_cnt ; i ++ )
217+ chan -> cb (chan -> cb_data ,
218+ DMA_IRQ_TYPE_LLIST , & next );
219+ /* do not need to test out next in this path */
220+ }
221+ } else {
222+ /* next call in pre-load state will be blocking */
223+ chan -> state |= HDA_STATE_BF_WAIT ;
224+ }
225+
226+ return 0 ;
227+ }
228+
189229static int hda_dma_copy_ch (struct dma * dma , struct hda_chan_data * chan ,
190- int bytes , uint32_t flags )
230+ int bytes )
191231{
192- struct dma_sg_elem next ;
232+ struct dma_sg_elem next = {
233+ .src = DMA_RELOAD_LLI ,
234+ .dest = DMA_RELOAD_LLI ,
235+ .size = DMA_RELOAD_LLI
236+ };
237+ uint32_t flags ;
193238 uint32_t dgcs = 0 ;
194239
195240 tracev_host ("GwU" );
196- if (flags & DMA_COPY_NO_COMMIT ) {
197- /* delay to fill the buffer, only for the first time
198- * for _input_ dma.
199- */
200- idelay (PLATFORM_DEFAULT_DELAY );
201- } else {
202- /* clear link xruns */
203- dgcs = host_dma_reg_read (dma , chan -> index , DGCS );
204- if (dgcs & DGCS_BOR )
205- hda_update_bits (dma , chan -> index ,
206- DGCS , DGCS_BOR , DGCS_BOR );
207-
208- /* make sure that previous transfer is complete */
209- if (chan -> direction == DMA_DIR_MEM_TO_DEV ) {
210- while (hda_dma_get_free_size (dma , chan -> index ) <
211- bytes )
212- idelay (PLATFORM_DEFAULT_DELAY );
213- }
214241
215- /*
216- * set BFPI to let host gateway knows we have read size,
217- * which will trigger next copy start.
218- */
219- if (chan -> direction == DMA_DIR_MEM_TO_DEV )
220- hda_dma_inc_link_fp (dma , chan -> index , bytes );
221- else
222- hda_dma_inc_fp (dma , chan -> index , bytes );
242+ /* clear link xruns */
243+ dgcs = host_dma_reg_read (dma , chan -> index , DGCS );
244+ if (dgcs & DGCS_BOR )
245+ hda_update_bits (dma , chan -> index ,
246+ DGCS , DGCS_BOR , DGCS_BOR );
247+
248+ /* make sure that previous transfer is complete */
249+ if (chan -> direction == DMA_DIR_MEM_TO_DEV ) {
250+ while (hda_dma_get_free_size (dma , chan -> index ) <
251+ bytes )
252+ idelay (PLATFORM_DEFAULT_DELAY );
223253 }
224254
255+ /*
256+ * set BFPI to let host gateway knows we have read size,
257+ * which will trigger next copy start.
258+ */
259+ if (chan -> direction == DMA_DIR_MEM_TO_DEV )
260+ hda_dma_inc_link_fp (dma , chan -> index , bytes );
261+ else
262+ hda_dma_inc_fp (dma , chan -> index , bytes );
263+
225264 spin_lock_irq (& dma -> lock , flags );
226265 if (chan -> cb ) {
227266 next .src = DMA_RELOAD_LLI ;
@@ -245,17 +284,24 @@ static uint64_t hda_dma_work(void *data, uint64_t delay)
245284{
246285 struct hda_chan_data * chan = (struct hda_chan_data * )data ;
247286
248- hda_dma_copy_ch (chan -> dma , chan , chan -> period_bytes , 0 );
249- /* next time to re-arm (TODO: should sub elapsed time?) */
287+ hda_dma_copy_ch (chan -> dma , chan , chan -> period_bytes );
288+ /* next time to re-arm */
250289 return HDA_LINK_1MS_US ;
251290}
252291
253292/* notify DMA to copy bytes */
254293static int hda_dma_copy (struct dma * dma , int channel , int bytes , uint32_t flags )
255294{
256295 struct dma_pdata * p = dma_get_drvdata (dma );
296+ struct hda_chan_data * chan = p -> chan + channel ;
257297
258- return hda_dma_copy_ch (dma , & p -> chan [channel ], bytes , flags );
298+ if (flags & DMA_COPY_PRELOAD )
299+ chan -> state |= HDA_STATE_PRELOAD ;
300+
301+ if (chan -> state & HDA_STATE_PRELOAD )
302+ return hda_dma_preload (dma , chan );
303+ else
304+ return hda_dma_copy_ch (dma , chan , bytes );
259305}
260306
261307/* acquire the specific DMA channel */
@@ -292,6 +338,7 @@ static void hda_dma_channel_put_unlocked(struct dma *dma, int channel)
292338
293339 /* set new state */
294340 p -> chan [channel ].status = COMP_STATE_INIT ;
341+ p -> chan [channel ].state = 0 ;
295342 p -> chan [channel ].period_bytes = 0 ;
296343 p -> chan [channel ].buffer_bytes = 0 ;
297344 p -> chan [channel ].cb = NULL ;
@@ -495,6 +542,12 @@ static int hda_dma_set_config(struct dma *dma, int channel,
495542 if (buffer_addr == 0 )
496543 buffer_addr = addr ;
497544 }
545+ /* buffer size must be multiple of hda dma burst size */
546+ if (buffer_bytes % PLATFORM_HDA_BUFFER_ALIGNMENT ) {
547+ ret = - EINVAL ;
548+ goto out ;
549+ }
550+
498551 p -> chan [channel ].period_bytes = period_bytes ;
499552 p -> chan [channel ].buffer_bytes = buffer_bytes ;
500553
@@ -511,7 +564,7 @@ static int hda_dma_set_config(struct dma *dma, int channel,
511564 if (config -> direction == DMA_DIR_LMEM_TO_HMEM ||
512565 config -> direction == DMA_DIR_HMEM_TO_LMEM )
513566 host_dma_reg_write (dma , channel , DGMBS ,
514- ALIGN_UP (period_bytes ,
567+ ALIGN_UP (buffer_bytes ,
515568 PLATFORM_HDA_BUFFER_ALIGNMENT ));
516569
517570 /* firmware control buffer */
0 commit comments