+ if (!buffer->pages) {
+ int j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * nr_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err_free_sgt;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
+ }
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, buffer->pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto err_free_sgt;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ }
+
+ if (!dma_map_sg(attachment->dev, sgt->sgl,
+ sgt->nents, direction)) {
+ ret = -ENOMEM;
+ goto err_free_sg_table;
+ }
+
+ return sgt;
+
+err_free_sg_table:
+ sg_free_table(sgt);
+err_free_sgt:
+ kfree(sgt);
+ return ERR_PTR(ret);