@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
499499};
500500#endif /* CONFIG_X86 */
501501
502+ #ifdef CONFIG_SND_DMA_SGBUF
503+ static void * snd_dma_sg_fallback_alloc (struct snd_dma_buffer * dmab , size_t size );
504+ #endif
505+
502506/*
503507 * Non-contiguous pages allocator
504508 */
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
509513
510514 sgt = dma_alloc_noncontiguous (dmab -> dev .dev , size , dmab -> dev .dir ,
511515 DEFAULT_GFP , 0 );
512- if (!sgt )
516+ if (!sgt ) {
517+ #ifdef CONFIG_SND_DMA_SGBUF
518+ if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG )
519+ dmab -> dev .type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK ;
520+ else
521+ dmab -> dev .type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK ;
522+ return snd_dma_sg_fallback_alloc (dmab , size );
523+ #else
513524 return NULL ;
525+ #endif
526+ }
527+
514528 dmab -> dev .need_sync = dma_need_sync (dmab -> dev .dev ,
515529 sg_dma_address (sgt -> sgl ));
516530 p = dma_vmap_noncontiguous (dmab -> dev .dev , size , sgt );
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
633647
634648 if (!p )
635649 return NULL ;
650+ if (dmab -> dev .type != SNDRV_DMA_TYPE_DEV_WC_SG )
651+ return p ;
636652 for_each_sgtable_page (sgt , & iter , 0 )
637653 set_memory_wc (sg_wc_address (& iter ), 1 );
638654 return p ;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
665681 .get_page = snd_dma_noncontig_get_page ,
666682 .get_chunk_size = snd_dma_noncontig_get_chunk_size ,
667683};
684+
685+ /* Fallback SG-buffer allocations for x86 */
686+ struct snd_dma_sg_fallback {
687+ size_t count ;
688+ struct page * * pages ;
689+ dma_addr_t * addrs ;
690+ };
691+
692+ static void __snd_dma_sg_fallback_free (struct snd_dma_buffer * dmab ,
693+ struct snd_dma_sg_fallback * sgbuf )
694+ {
695+ size_t i ;
696+
697+ if (sgbuf -> count && dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK )
698+ set_pages_array_wb (sgbuf -> pages , sgbuf -> count );
699+ for (i = 0 ; i < sgbuf -> count && sgbuf -> pages [i ]; i ++ )
700+ dma_free_coherent (dmab -> dev .dev , PAGE_SIZE ,
701+ page_address (sgbuf -> pages [i ]),
702+ sgbuf -> addrs [i ]);
703+ kvfree (sgbuf -> pages );
704+ kvfree (sgbuf -> addrs );
705+ kfree (sgbuf );
706+ }
707+
708+ static void * snd_dma_sg_fallback_alloc (struct snd_dma_buffer * dmab , size_t size )
709+ {
710+ struct snd_dma_sg_fallback * sgbuf ;
711+ struct page * * pages ;
712+ size_t i , count ;
713+ void * p ;
714+
715+ sgbuf = kzalloc (sizeof (* sgbuf ), GFP_KERNEL );
716+ if (!sgbuf )
717+ return NULL ;
718+ count = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
719+ pages = kvcalloc (count , sizeof (* pages ), GFP_KERNEL );
720+ if (!pages )
721+ goto error ;
722+ sgbuf -> pages = pages ;
723+ sgbuf -> addrs = kvcalloc (count , sizeof (* sgbuf -> addrs ), GFP_KERNEL );
724+ if (!sgbuf -> addrs )
725+ goto error ;
726+
727+ for (i = 0 ; i < count ; sgbuf -> count ++ , i ++ ) {
728+ p = dma_alloc_coherent (dmab -> dev .dev , PAGE_SIZE ,
729+ & sgbuf -> addrs [i ], DEFAULT_GFP );
730+ if (!p )
731+ goto error ;
732+ sgbuf -> pages [i ] = virt_to_page (p );
733+ }
734+
735+ if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK )
736+ set_pages_array_wc (pages , count );
737+ p = vmap (pages , count , VM_MAP , PAGE_KERNEL );
738+ if (!p )
739+ goto error ;
740+ dmab -> private_data = sgbuf ;
741+ return p ;
742+
743+ error :
744+ __snd_dma_sg_fallback_free (dmab , sgbuf );
745+ return NULL ;
746+ }
747+
748+ static void snd_dma_sg_fallback_free (struct snd_dma_buffer * dmab )
749+ {
750+ vunmap (dmab -> area );
751+ __snd_dma_sg_fallback_free (dmab , dmab -> private_data );
752+ }
753+
754+ static int snd_dma_sg_fallback_mmap (struct snd_dma_buffer * dmab ,
755+ struct vm_area_struct * area )
756+ {
757+ struct snd_dma_sg_fallback * sgbuf = dmab -> private_data ;
758+
759+ if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK )
760+ area -> vm_page_prot = pgprot_writecombine (area -> vm_page_prot );
761+ return vm_map_pages (area , sgbuf -> pages , sgbuf -> count );
762+ }
763+
764+ static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
765+ .alloc = snd_dma_sg_fallback_alloc ,
766+ .free = snd_dma_sg_fallback_free ,
767+ .mmap = snd_dma_sg_fallback_mmap ,
768+ /* reuse vmalloc helpers */
769+ .get_addr = snd_dma_vmalloc_get_addr ,
770+ .get_page = snd_dma_vmalloc_get_page ,
771+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size ,
772+ };
668773#endif /* CONFIG_SND_DMA_SGBUF */
669774
670775/*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
736841#ifdef CONFIG_GENERIC_ALLOCATOR
737842 [SNDRV_DMA_TYPE_DEV_IRAM ] = & snd_dma_iram_ops ,
738843#endif /* CONFIG_GENERIC_ALLOCATOR */
844+ #ifdef CONFIG_SND_DMA_SGBUF
845+ [SNDRV_DMA_TYPE_DEV_SG_FALLBACK ] = & snd_dma_sg_fallback_ops ,
846+ [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK ] = & snd_dma_sg_fallback_ops ,
847+ #endif
739848#endif /* CONFIG_HAS_DMA */
740849};
741850
0 commit comments