EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
 
+/*
+ * compute the max chunk size with continuous pages on sg-buffer
+ */
+unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
+                                         unsigned int ofs, unsigned int size)
+{
+       struct snd_sg_buf *sg = snd_pcm_substream_sgbuf(substream);
+       unsigned int start, end, pg;
+
+       start = ofs >> PAGE_SHIFT;
+       end = (ofs + size - 1) >> PAGE_SHIFT;
+       /* check page continuity */
+       pg = sg->table[start].addr >> PAGE_SHIFT;
+       for (;;) {
+               start++;
+               if (start > end)
+                       break;
+               pg++;
+               if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
+                       return (start << PAGE_SHIFT) - ofs;
+       }
+       /* ok, all on continuous pages */
+       return size;
+}
+EXPORT_SYMBOL(snd_pcm_sgbuf_get_chunk_size);
+
 /**
  * snd_pcm_lib_malloc_pages - allocate the DMA buffer
  * @substream: the substream to allocate the DMA buffer to
 
        tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
        tmpb.dev.dev = sgbuf->dev;
        for (i = 0; i < sgbuf->pages; i++) {
+               if (!(sgbuf->table[i].addr & ~PAGE_MASK))
+                       continue; /* continuous pages */
                tmpb.area = sgbuf->table[i].buf;
-               tmpb.addr = sgbuf->table[i].addr;
-               tmpb.bytes = PAGE_SIZE;
+               tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
+               tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
                snd_dma_free_pages(&tmpb);
        }
        if (dmab->area)
        return 0;
 }
 
+#define MAX_ALLOC_PAGES                32
+
 void *snd_malloc_sgbuf_pages(struct device *device,
                             size_t size, struct snd_dma_buffer *dmab,
                             size_t *res_size)
 {
        struct snd_sg_buf *sgbuf;
-       unsigned int i, pages;
+       unsigned int i, pages, chunk, maxpages;
        struct snd_dma_buffer tmpb;
+       struct snd_sg_page *table;
+       struct page **pgtable;
 
        dmab->area = NULL;
        dmab->addr = 0;
        sgbuf->dev = device;
        pages = snd_sgbuf_aligned_pages(size);
        sgbuf->tblsize = sgbuf_align_table(pages);
-       sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL);
-       if (! sgbuf->table)
+       table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
+       if (!table)
                goto _failed;
-       sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL);
-       if (! sgbuf->page_table)
+       sgbuf->table = table;
+       pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
+       if (!pgtable)
                goto _failed;
+       sgbuf->page_table = pgtable;
 
-       /* allocate each page */
-       for (i = 0; i < pages; i++) {
-               if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, device, PAGE_SIZE, &tmpb) < 0) {
-                       if (res_size == NULL)
+       /* allocate pages */
+       maxpages = MAX_ALLOC_PAGES;
+       while (pages > 0) {
+               chunk = pages;
+               /* don't be too eager to take a huge chunk */
+               if (chunk > maxpages)
+                       chunk = maxpages;
+               chunk <<= PAGE_SHIFT;
+               if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
+                                                chunk, &tmpb) < 0) {
+                       if (!sgbuf->pages)
+                               return NULL;
+                       if (!res_size)
                                goto _failed;
-                       *res_size = size = sgbuf->pages * PAGE_SIZE;
+                       size = sgbuf->pages * PAGE_SIZE;
                        break;
                }
-               sgbuf->table[i].buf = tmpb.area;
-               sgbuf->table[i].addr = tmpb.addr;
-               sgbuf->page_table[i] = virt_to_page(tmpb.area);
-               sgbuf->pages++;
+               chunk = tmpb.bytes >> PAGE_SHIFT;
+               for (i = 0; i < chunk; i++) {
+                       table->buf = tmpb.area;
+                       table->addr = tmpb.addr;
+                       if (!i)
+                               table->addr |= chunk; /* mark head */
+                       table++;
+                       *pgtable++ = virt_to_page(tmpb.area);
+                       tmpb.area += PAGE_SIZE;
+                       tmpb.addr += PAGE_SIZE;
+               }
+               sgbuf->pages += chunk;
+               pages -= chunk;
+               if (chunk < maxpages)
+                       maxpages = chunk;
        }
 
        sgbuf->size = size;
        dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
        if (! dmab->area)
                goto _failed;
+       if (res_size)
+               *res_size = sgbuf->size;
        return dmab->area;
 
  _failed: