3e1fa166016c44af23776859e3ab11c61ce498ed
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / rockchip / rockchip_drm_buf.c
1 /* rockchip_drm_buf.c
2  *
3  * Copyright (C) ROCKCHIP, Inc.
4  * Author:yzq<yzq@rock-chips.com>
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14
15 #include <drm/drmP.h>
16 #include <drm/rockchip_drm.h>
17
18 #include "rockchip_drm_drv.h"
19 #include "rockchip_drm_gem.h"
20 #include "rockchip_drm_buf.h"
21 #include "rockchip_drm_iommu.h"
22
23 static int lowlevel_buffer_allocate(struct drm_device *dev,
24                 unsigned int flags, struct rockchip_drm_gem_buf *buf)
25 {
26         int ret = 0;
27         enum dma_attr attr;
28         unsigned int nr_pages;
29
30         DRM_DEBUG_KMS("%s\n", __FILE__);
31
32         if (buf->dma_addr) {
33                 DRM_DEBUG_KMS("already allocated.\n");
34                 return 0;
35         }
36
37         init_dma_attrs(&buf->dma_attrs);
38
39         /*
40          * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
41          * region will be allocated else physically contiguous
42          * as possible.
43          */
44         if (!(flags & ROCKCHIP_BO_NONCONTIG))
45                 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
46
47         /*
48          * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
49          * else cachable mapping.
50          */
51         if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
52                 attr = DMA_ATTR_WRITE_COMBINE;
53         else
54                 attr = DMA_ATTR_NON_CONSISTENT;
55
56         dma_set_attr(attr, &buf->dma_attrs);
57         dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
58
59         nr_pages = buf->size >> PAGE_SHIFT;
60
61         if (!is_drm_iommu_supported(dev)) {
62                 dma_addr_t start_addr;
63                 unsigned int i = 0;
64
65                 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
66                                         GFP_KERNEL);
67                 if (!buf->pages) {
68                         DRM_ERROR("failed to allocate pages.\n");
69                         return -ENOMEM;
70                 }
71
72                 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
73                                         &buf->dma_addr, GFP_KERNEL,
74                                         &buf->dma_attrs);
75                 if (!buf->kvaddr) {
76                         DRM_ERROR("failed to allocate buffer.\n");
77                         kfree(buf->pages);
78                         return -ENOMEM;
79                 }
80
81                 start_addr = buf->dma_addr;
82                 while (i < nr_pages) {
83                         buf->pages[i] = phys_to_page(start_addr);
84                         start_addr += PAGE_SIZE;
85                         i++;
86                 }
87         } else {
88
89                 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
90                                         &buf->dma_addr, GFP_KERNEL,
91                                         &buf->dma_attrs);
92                 if (!buf->pages) {
93                         DRM_ERROR("failed to allocate buffer.\n");
94                         return -ENOMEM;
95                 }
96         }
97
98         buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
99         if (!buf->sgt) {
100                 DRM_ERROR("failed to get sg table.\n");
101                 ret = -ENOMEM;
102                 goto err_free_attrs;
103         }
104
105         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
106                         (unsigned long)buf->dma_addr,
107                         buf->size);
108
109         return ret;
110
111 err_free_attrs:
112         dma_free_attrs(dev->dev, buf->size, buf->pages,
113                         (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
114         buf->dma_addr = (dma_addr_t)NULL;
115
116         if (!is_drm_iommu_supported(dev))
117                 kfree(buf->pages);
118
119         return ret;
120 }
121
122 static void lowlevel_buffer_deallocate(struct drm_device *dev,
123                 unsigned int flags, struct rockchip_drm_gem_buf *buf)
124 {
125         DRM_DEBUG_KMS("%s.\n", __FILE__);
126
127         if (!buf->dma_addr) {
128                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
129                 return;
130         }
131
132         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
133                         (unsigned long)buf->dma_addr,
134                         buf->size);
135
136         sg_free_table(buf->sgt);
137
138         kfree(buf->sgt);
139         buf->sgt = NULL;
140
141         if (!is_drm_iommu_supported(dev)) {
142                 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
143                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
144                 kfree(buf->pages);
145         } else
146                 dma_free_attrs(dev->dev, buf->size, buf->pages,
147                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
148
149         buf->dma_addr = (dma_addr_t)NULL;
150 }
151
152 struct rockchip_drm_gem_buf *rockchip_drm_init_buf(struct drm_device *dev,
153                                                 unsigned int size)
154 {
155         struct rockchip_drm_gem_buf *buffer;
156
157         DRM_DEBUG_KMS("%s.\n", __FILE__);
158         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
159
160         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
161         if (!buffer) {
162                 DRM_ERROR("failed to allocate rockchip_drm_gem_buf.\n");
163                 return NULL;
164         }
165
166         buffer->size = size;
167         return buffer;
168 }
169
170 void rockchip_drm_fini_buf(struct drm_device *dev,
171                                 struct rockchip_drm_gem_buf *buffer)
172 {
173         DRM_DEBUG_KMS("%s.\n", __FILE__);
174
175         if (!buffer) {
176                 DRM_DEBUG_KMS("buffer is null.\n");
177                 return;
178         }
179
180         kfree(buffer);
181         buffer = NULL;
182 }
183
184 int rockchip_drm_alloc_buf(struct drm_device *dev,
185                 struct rockchip_drm_gem_buf *buf, unsigned int flags)
186 {
187
188         /*
189          * allocate memory region and set the memory information
190          * to vaddr and dma_addr of a buffer object.
191          */
192         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
193                 return -ENOMEM;
194
195         return 0;
196 }
197
198 void rockchip_drm_free_buf(struct drm_device *dev,
199                 unsigned int flags, struct rockchip_drm_gem_buf *buffer)
200 {
201
202         lowlevel_buffer_deallocate(dev, flags, buffer);
203 }