summaryrefslogtreecommitdiffstats
path: root/sound/core
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2020-06-15 18:00:45 +0200
committerTakashi Iwai <tiwai@suse.de>2020-06-15 18:02:03 +0200
commit3ad796cbc36a7bc8bfd4de191d791b9490bc112b (patch)
tree1b161051729cab91b5c9dc6755cc7f5d421dde12 /sound/core
parent2a1f3368bff609504cdc984cdb7cef467bb0b2b0 (diff)
downloadlinux-3ad796cbc36a7bc8bfd4de191d791b9490bc112b.tar.bz2
ALSA: pcm: Use SG-buffer only when direct DMA is available
The DMA-coherent SG-buffer is tricky to use, as it does need the mapping. It used to work stably on x86 over years (and that's why we had enabled SG-buffer on solely x86) with the default mmap handler and vmap(), but our luck seems no forever success. The chance of breakage is high when the special DMA handling is introduced in the arch side. In this patch, we change the buffer allocation to use the SG-buffer only when the device in question is with the direct DMA. It's a bit hackish, but it's currently the only condition that may work (more or less) reliably with the default mmap and vmap() for mapping the pages that are deduced via virt_to_page(). In theory, we can apply the similar hack in the sound/core memory allocation helper, too; but it's used by SOF for allocating SG pages without re-mapping via vmap() or mmap, and it's fine to use it in that way, so let's keep it and adds the workaround in PCM side. Link: https://lore.kernel.org/r/20200615160045.2703-5-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core')
-rw-r--r--sound/core/pcm_memory.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 860935e3aea4..8326d16d3596 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -11,6 +11,7 @@
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/info.h>
@@ -39,6 +40,18 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
if (max_alloc_per_card &&
card->total_pcm_alloc_bytes + size > max_alloc_per_card)
return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_SND_DMA_SGBUF) &&
+ (type == SNDRV_DMA_TYPE_DEV_SG || type == SNDRV_DMA_TYPE_DEV_UC_SG) &&
+ !dma_is_direct(get_dma_ops(dev))) {
+ /* mutate to continuous page allocation */
+ dev_dbg(dev, "Use continuous page allocator\n");
+ if (type == SNDRV_DMA_TYPE_DEV_SG)
+ type = SNDRV_DMA_TYPE_DEV;
+ else
+ type = SNDRV_DMA_TYPE_DEV_UC;
+ }
+
err = snd_dma_alloc_pages(type, dev, size, dmab);
if (!err) {
mutex_lock(&card->memory_mutex);