Merge commit 'ed30f24e8d07d30aa3e69d1f508f4d7bd2e8ea14' of git://git.linaro.org/landi...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_gmr.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/drmP.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31
32 #define VMW_PPN_SIZE sizeof(unsigned long)
33
34 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
35                          struct page *pages[],
36                          unsigned long num_pages,
37                          int gmr_id)
38 {
39         SVGAFifoCmdDefineGMR2 define_cmd;
40         SVGAFifoCmdRemapGMR2 remap_cmd;
41         uint32_t define_size = sizeof(define_cmd) + 4;
42         uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
43         uint32_t *cmd;
44         uint32_t *cmd_orig;
45         uint32_t i;
46
47         cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
48         if (unlikely(cmd == NULL))
49                 return -ENOMEM;
50
51         define_cmd.gmrId = gmr_id;
52         define_cmd.numPages = num_pages;
53
54         remap_cmd.gmrId = gmr_id;
55         remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
56                 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
57         remap_cmd.offsetPages = 0;
58         remap_cmd.numPages = num_pages;
59
60         *cmd++ = SVGA_CMD_DEFINE_GMR2;
61         memcpy(cmd, &define_cmd, sizeof(define_cmd));
62         cmd += sizeof(define_cmd) / sizeof(uint32);
63
64         *cmd++ = SVGA_CMD_REMAP_GMR2;
65         memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
66         cmd += sizeof(remap_cmd) / sizeof(uint32);
67
68         for (i = 0; i < num_pages; ++i) {
69                 if (VMW_PPN_SIZE <= 4)
70                         *cmd = page_to_pfn(*pages++);
71                 else
72                         *((uint64_t *)cmd) = page_to_pfn(*pages++);
73
74                 cmd += VMW_PPN_SIZE / sizeof(*cmd);
75         }
76
77         vmw_fifo_commit(dev_priv, define_size + remap_size);
78
79         return 0;
80 }
81
82 static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
83                             int gmr_id)
84 {
85         SVGAFifoCmdDefineGMR2 define_cmd;
86         uint32_t define_size = sizeof(define_cmd) + 4;
87         uint32_t *cmd;
88
89         cmd = vmw_fifo_reserve(dev_priv, define_size);
90         if (unlikely(cmd == NULL)) {
91                 DRM_ERROR("GMR2 unbind failed.\n");
92                 return;
93         }
94         define_cmd.gmrId = gmr_id;
95         define_cmd.numPages = 0;
96
97         *cmd++ = SVGA_CMD_DEFINE_GMR2;
98         memcpy(cmd, &define_cmd, sizeof(define_cmd));
99
100         vmw_fifo_commit(dev_priv, define_size);
101 }
102
103 /**
104  * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
105  * the number of used descriptors.
106  */
107
108 static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
109                                      struct page *pages[],
110                                      unsigned long num_pages)
111 {
112         struct page *page, *next;
113         struct svga_guest_mem_descriptor *page_virtual = NULL;
114         struct svga_guest_mem_descriptor *desc_virtual = NULL;
115         unsigned int desc_per_page;
116         unsigned long prev_pfn;
117         unsigned long pfn;
118         int ret;
119
120         desc_per_page = PAGE_SIZE /
121             sizeof(struct svga_guest_mem_descriptor) - 1;
122
123         while (likely(num_pages != 0)) {
124                 page = alloc_page(__GFP_HIGHMEM);
125                 if (unlikely(page == NULL)) {
126                         ret = -ENOMEM;
127                         goto out_err;
128                 }
129
130                 list_add_tail(&page->lru, desc_pages);
131
132                 /*
133                  * Point previous page terminating descriptor to this
134                  * page before unmapping it.
135                  */
136
137                 if (likely(page_virtual != NULL)) {
138                         desc_virtual->ppn = page_to_pfn(page);
139                         kunmap_atomic(page_virtual);
140                 }
141
142                 page_virtual = kmap_atomic(page);
143                 desc_virtual = page_virtual - 1;
144                 prev_pfn = ~(0UL);
145
146                 while (likely(num_pages != 0)) {
147                         pfn = page_to_pfn(*pages);
148
149                         if (pfn != prev_pfn + 1) {
150
151                                 if (desc_virtual - page_virtual ==
152                                     desc_per_page - 1)
153                                         break;
154
155                                 (++desc_virtual)->ppn = cpu_to_le32(pfn);
156                                 desc_virtual->num_pages = cpu_to_le32(1);
157                         } else {
158                                 uint32_t tmp =
159                                     le32_to_cpu(desc_virtual->num_pages);
160                                 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
161                         }
162                         prev_pfn = pfn;
163                         --num_pages;
164                         ++pages;
165                 }
166
167                 (++desc_virtual)->ppn = cpu_to_le32(0);
168                 desc_virtual->num_pages = cpu_to_le32(0);
169         }
170
171         if (likely(page_virtual != NULL))
172                 kunmap_atomic(page_virtual);
173
174         return 0;
175 out_err:
176         list_for_each_entry_safe(page, next, desc_pages, lru) {
177                 list_del_init(&page->lru);
178                 __free_page(page);
179         }
180         return ret;
181 }
182
183 static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
184 {
185         struct page *page, *next;
186
187         list_for_each_entry_safe(page, next, desc_pages, lru) {
188                 list_del_init(&page->lru);
189                 __free_page(page);
190         }
191 }
192
193 static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
194                                      int gmr_id, struct list_head *desc_pages)
195 {
196         struct page *page;
197
198         if (unlikely(list_empty(desc_pages)))
199                 return;
200
201         page = list_entry(desc_pages->next, struct page, lru);
202
203         mutex_lock(&dev_priv->hw_mutex);
204
205         vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
206         wmb();
207         vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
208         mb();
209
210         mutex_unlock(&dev_priv->hw_mutex);
211
212 }
213
214 /**
215  * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
216  * the number of used descriptors.
217  */
218
219 static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
220                                         unsigned long num_pages)
221 {
222         unsigned long prev_pfn = ~(0UL);
223         unsigned long pfn;
224         unsigned long descriptors = 0;
225
226         while (num_pages--) {
227                 pfn = page_to_pfn(*pages++);
228                 if (prev_pfn + 1 != pfn)
229                         ++descriptors;
230                 prev_pfn = pfn;
231         }
232
233         return descriptors;
234 }
235
236 int vmw_gmr_bind(struct vmw_private *dev_priv,
237                  struct page *pages[],
238                  unsigned long num_pages,
239                  int gmr_id)
240 {
241         struct list_head desc_pages;
242         int ret;
243
244         if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
245                 return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
246
247         if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
248                 return -EINVAL;
249
250         if (vmw_gmr_count_descriptors(pages, num_pages) >
251             dev_priv->max_gmr_descriptors)
252                 return -EINVAL;
253
254         INIT_LIST_HEAD(&desc_pages);
255
256         ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
257         if (unlikely(ret != 0))
258                 return ret;
259
260         vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
261         vmw_gmr_free_descriptors(&desc_pages);
262
263         return 0;
264 }
265
266
267 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
268 {
269         if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
270                 vmw_gmr2_unbind(dev_priv, gmr_id);
271                 return;
272         }
273
274         mutex_lock(&dev_priv->hw_mutex);
275         vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
276         wmb();
277         vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
278         mb();
279         mutex_unlock(&dev_priv->hw_mutex);
280 }