2 * Copyright (C) 2013 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/dma-buf.h>
16 #include <linux/highmem.h>
17 #include <linux/memblock.h>
18 #include <linux/slab.h>
20 struct adf_memblock_pdata {
24 static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
25 enum dma_data_direction direction)
27 struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
28 unsigned long pfn = PFN_DOWN(pdata->base);
29 struct page *page = pfn_to_page(pfn);
30 struct sg_table *table;
33 table = kzalloc(sizeof(*table), GFP_KERNEL);
35 return ERR_PTR(-ENOMEM);
37 ret = sg_alloc_table(table, 1, GFP_KERNEL);
41 sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
49 static void adf_memblock_unmap(struct dma_buf_attachment *attach,
50 struct sg_table *table, enum dma_data_direction direction)
55 static void __init_memblock adf_memblock_release(struct dma_buf *buf)
57 struct adf_memblock_pdata *pdata = buf->priv;
58 int err = memblock_free(pdata->base, buf->size);
61 pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
65 static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
68 struct adf_memblock_pdata *pdata = buf->priv;
69 unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
70 struct page *page = pfn_to_page(pfn);
73 return kmap_atomic(page);
78 static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
79 unsigned long pgoffset)
81 return adf_memblock_do_kmap(buf, pgoffset, true);
84 static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
85 unsigned long pgoffset, void *vaddr)
90 static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
92 return adf_memblock_do_kmap(buf, pgoffset, false);
95 static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
101 static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
103 struct adf_memblock_pdata *pdata = buf->priv;
105 return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
106 vma->vm_end - vma->vm_start, vma->vm_page_prot);
109 struct dma_buf_ops adf_memblock_ops = {
110 .map_dma_buf = adf_memblock_map,
111 .unmap_dma_buf = adf_memblock_unmap,
112 .release = adf_memblock_release,
113 .kmap_atomic = adf_memblock_kmap_atomic,
114 .kunmap_atomic = adf_memblock_kunmap_atomic,
115 .kmap = adf_memblock_kmap,
116 .kunmap = adf_memblock_kunmap,
117 .mmap = adf_memblock_mmap,
121 * adf_memblock_export - export a memblock reserved area as a dma-buf
123 * @base: base physical address
124 * @size: memblock size
125 * @flags: mode flags for the dma-buf's file
127 * @base and @size must be page-aligned.
129 * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
131 struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
133 struct adf_memblock_pdata *pdata;
136 if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
137 return ERR_PTR(-EINVAL);
139 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
141 return ERR_PTR(-ENOMEM);
144 buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);