Skip to content

Commit 925ca89

Browse files
committed
ALSA: memalloc: Add fallback SG-buffer allocations for x86
The recent change for memory allocator replaced the SG-buffer handling helper for x86 with the standard non-contiguous page handler. This works for most cases, but there is a corner case I obviously overlooked, namely, the fallback of non-contiguous handler without IOMMU. When the system runs without IOMMU, the core handler tries to use the continuous pages with a single SGL entry. It works nicely for most cases, but when the system memory gets fragmented, the large allocation may fail frequently. Ideally the non-contig handler could deal with the proper SG pages, it's cumbersome to extend for now. As a workaround, here we add new types for (minimalistic) SG allocations, instead, so that the allocator falls back to those types automatically when the allocation with the standard API failed. BTW, one better (but pretty minor) improvement from the previous SG-buffer code is that this provides the proper mmap support without the PCM's page fault handling. Fixes: 2c95b92 ("ALSA: memalloc: Unify x86 SG-buffer handling (take#3)") BugLink: https://gitlab.freedesktop.org/pipewire/pipewire/-/issues/2272 BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1198248 Cc: <stable@vger.kernel.org> Link: https://lore.kernel.org/r/20220413054808.7547-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
1 parent f20ae50 commit 925ca89

2 files changed

Lines changed: 115 additions & 1 deletion

File tree

include/sound/memalloc.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,11 @@ struct snd_dma_device {
5151
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
5252
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
5353
#endif
54+
/* fallback types, don't use those directly */
55+
#ifdef CONFIG_SND_DMA_SGBUF
56+
#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
57+
#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
58+
#endif
5459

5560
/*
5661
* info for buffer allocation

sound/core/memalloc.c

Lines changed: 110 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
499499
};
500500
#endif /* CONFIG_X86 */
501501

502+
#ifdef CONFIG_SND_DMA_SGBUF
503+
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
504+
#endif
505+
502506
/*
503507
* Non-contiguous pages allocator
504508
*/
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
509513

510514
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
511515
DEFAULT_GFP, 0);
512-
if (!sgt)
516+
if (!sgt) {
517+
#ifdef CONFIG_SND_DMA_SGBUF
518+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
519+
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
520+
else
521+
dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
522+
return snd_dma_sg_fallback_alloc(dmab, size);
523+
#else
513524
return NULL;
525+
#endif
526+
}
527+
514528
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
515529
sg_dma_address(sgt->sgl));
516530
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
633647

634648
if (!p)
635649
return NULL;
650+
if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
651+
return p;
636652
for_each_sgtable_page(sgt, &iter, 0)
637653
set_memory_wc(sg_wc_address(&iter), 1);
638654
return p;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
665681
.get_page = snd_dma_noncontig_get_page,
666682
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
667683
};
684+
685+
/* Fallback SG-buffer allocations for x86 */
686+
struct snd_dma_sg_fallback {
687+
size_t count;
688+
struct page **pages;
689+
dma_addr_t *addrs;
690+
};
691+
692+
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
693+
struct snd_dma_sg_fallback *sgbuf)
694+
{
695+
size_t i;
696+
697+
if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
698+
set_pages_array_wb(sgbuf->pages, sgbuf->count);
699+
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
700+
dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
701+
page_address(sgbuf->pages[i]),
702+
sgbuf->addrs[i]);
703+
kvfree(sgbuf->pages);
704+
kvfree(sgbuf->addrs);
705+
kfree(sgbuf);
706+
}
707+
708+
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
709+
{
710+
struct snd_dma_sg_fallback *sgbuf;
711+
struct page **pages;
712+
size_t i, count;
713+
void *p;
714+
715+
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
716+
if (!sgbuf)
717+
return NULL;
718+
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
719+
pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
720+
if (!pages)
721+
goto error;
722+
sgbuf->pages = pages;
723+
sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
724+
if (!sgbuf->addrs)
725+
goto error;
726+
727+
for (i = 0; i < count; sgbuf->count++, i++) {
728+
p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
729+
&sgbuf->addrs[i], DEFAULT_GFP);
730+
if (!p)
731+
goto error;
732+
sgbuf->pages[i] = virt_to_page(p);
733+
}
734+
735+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
736+
set_pages_array_wc(pages, count);
737+
p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
738+
if (!p)
739+
goto error;
740+
dmab->private_data = sgbuf;
741+
return p;
742+
743+
error:
744+
__snd_dma_sg_fallback_free(dmab, sgbuf);
745+
return NULL;
746+
}
747+
748+
static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
749+
{
750+
vunmap(dmab->area);
751+
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
752+
}
753+
754+
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
755+
struct vm_area_struct *area)
756+
{
757+
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
758+
759+
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
760+
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
761+
return vm_map_pages(area, sgbuf->pages, sgbuf->count);
762+
}
763+
764+
static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
765+
.alloc = snd_dma_sg_fallback_alloc,
766+
.free = snd_dma_sg_fallback_free,
767+
.mmap = snd_dma_sg_fallback_mmap,
768+
/* reuse vmalloc helpers */
769+
.get_addr = snd_dma_vmalloc_get_addr,
770+
.get_page = snd_dma_vmalloc_get_page,
771+
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
772+
};
668773
#endif /* CONFIG_SND_DMA_SGBUF */
669774

670775
/*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
736841
#ifdef CONFIG_GENERIC_ALLOCATOR
737842
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
738843
#endif /* CONFIG_GENERIC_ALLOCATOR */
844+
#ifdef CONFIG_SND_DMA_SGBUF
845+
[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
846+
[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
847+
#endif
739848
#endif /* CONFIG_HAS_DMA */
740849
};
741850

0 commit comments

Comments
 (0)