0d524384ff79c3b49d2dbaa1489d8cbff231d80a
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38
39
40 int amdgpu_ttm_init(struct amdgpu_device *adev);
41 void amdgpu_ttm_fini(struct amdgpu_device *adev);
42
43 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
44                                                 struct ttm_mem_reg *mem)
45 {
46         u64 ret = 0;
47         if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
48                 ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
49                            adev->mc.visible_vram_size ?
50                            adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51                            mem->size;
52         }
53         return ret;
54 }
55
56 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
57                        struct ttm_mem_reg *old_mem,
58                        struct ttm_mem_reg *new_mem)
59 {
60         u64 vis_size;
61         if (!adev)
62                 return;
63
64         if (new_mem) {
65                 switch (new_mem->mem_type) {
66                 case TTM_PL_TT:
67                         atomic64_add(new_mem->size, &adev->gtt_usage);
68                         break;
69                 case TTM_PL_VRAM:
70                         atomic64_add(new_mem->size, &adev->vram_usage);
71                         vis_size = amdgpu_get_vis_part_size(adev, new_mem);
72                         atomic64_add(vis_size, &adev->vram_vis_usage);
73                         break;
74                 }
75         }
76
77         if (old_mem) {
78                 switch (old_mem->mem_type) {
79                 case TTM_PL_TT:
80                         atomic64_sub(old_mem->size, &adev->gtt_usage);
81                         break;
82                 case TTM_PL_VRAM:
83                         atomic64_sub(old_mem->size, &adev->vram_usage);
84                         vis_size = amdgpu_get_vis_part_size(adev, old_mem);
85                         atomic64_sub(vis_size, &adev->vram_vis_usage);
86                         break;
87                 }
88         }
89 }
90
91 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
92 {
93         struct amdgpu_bo *bo;
94
95         bo = container_of(tbo, struct amdgpu_bo, tbo);
96
97         amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
98
99         mutex_lock(&bo->adev->gem.mutex);
100         list_del_init(&bo->list);
101         mutex_unlock(&bo->adev->gem.mutex);
102         drm_gem_object_release(&bo->gem_base);
103         kfree(bo->metadata);
104         kfree(bo);
105 }
106
107 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
108 {
109         if (bo->destroy == &amdgpu_ttm_bo_destroy)
110                 return true;
111         return false;
112 }
113
114 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
115                                       struct ttm_placement *placement,
116                                       struct ttm_place *placements,
117                                       u32 domain, u64 flags)
118 {
119         u32 c = 0, i;
120
121         placement->placement = placements;
122         placement->busy_placement = placements;
123
124         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
125                 if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
126                         adev->mc.visible_vram_size < adev->mc.real_vram_size) {
127                         placements[c].fpfn =
128                                 adev->mc.visible_vram_size >> PAGE_SHIFT;
129                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
130                                 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
131                 }
132                 placements[c].fpfn = 0;
133                 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134                         TTM_PL_FLAG_VRAM;
135                 if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
136                         placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
137         }
138
139         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
140                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
141                         placements[c].fpfn = 0;
142                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
143                                 TTM_PL_FLAG_UNCACHED;
144                 } else {
145                         placements[c].fpfn = 0;
146                         placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
147                 }
148         }
149
150         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
151                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
152                         placements[c].fpfn = 0;
153                         placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
154                                 TTM_PL_FLAG_UNCACHED;
155                 } else {
156                         placements[c].fpfn = 0;
157                         placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
158                 }
159         }
160
161         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
162                 placements[c].fpfn = 0;
163                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
164                         AMDGPU_PL_FLAG_GDS;
165         }
166         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
167                 placements[c].fpfn = 0;
168                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
169                         AMDGPU_PL_FLAG_GWS;
170         }
171         if (domain & AMDGPU_GEM_DOMAIN_OA) {
172                 placements[c].fpfn = 0;
173                 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
174                         AMDGPU_PL_FLAG_OA;
175         }
176
177         if (!c) {
178                 placements[c].fpfn = 0;
179                 placements[c++].flags = TTM_PL_MASK_CACHING |
180                         TTM_PL_FLAG_SYSTEM;
181         }
182         placement->num_placement = c;
183         placement->num_busy_placement = c;
184
185         for (i = 0; i < c; i++) {
186                 if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
187                         (placements[i].flags & TTM_PL_FLAG_VRAM) &&
188                         !placements[i].fpfn)
189                         placements[i].lpfn =
190                                 adev->mc.visible_vram_size >> PAGE_SHIFT;
191                 else
192                         placements[i].lpfn = 0;
193         }
194 }
195
196 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
197 {
198         amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
199                                   rbo->placements, domain, rbo->flags);
200 }
201
202 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
203                                         struct ttm_placement *placement)
204 {
205         BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
206
207         memcpy(bo->placements, placement->placement,
208                placement->num_placement * sizeof(struct ttm_place));
209         bo->placement.num_placement = placement->num_placement;
210         bo->placement.num_busy_placement = placement->num_busy_placement;
211         bo->placement.placement = bo->placements;
212         bo->placement.busy_placement = bo->placements;
213 }
214
215 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
216                                 unsigned long size, int byte_align,
217                                 bool kernel, u32 domain, u64 flags,
218                                 struct sg_table *sg,
219                                 struct ttm_placement *placement,
220                                 struct reservation_object *resv,
221                                 struct amdgpu_bo **bo_ptr)
222 {
223         struct amdgpu_bo *bo;
224         enum ttm_bo_type type;
225         unsigned long page_align;
226         size_t acc_size;
227         int r;
228
229         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
230         size = ALIGN(size, PAGE_SIZE);
231
232         if (kernel) {
233                 type = ttm_bo_type_kernel;
234         } else if (sg) {
235                 type = ttm_bo_type_sg;
236         } else {
237                 type = ttm_bo_type_device;
238         }
239         *bo_ptr = NULL;
240
241         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
242                                        sizeof(struct amdgpu_bo));
243
244         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
245         if (bo == NULL)
246                 return -ENOMEM;
247         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
248         if (unlikely(r)) {
249                 kfree(bo);
250                 return r;
251         }
252         bo->adev = adev;
253         INIT_LIST_HEAD(&bo->list);
254         INIT_LIST_HEAD(&bo->va);
255         bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
256                                        AMDGPU_GEM_DOMAIN_GTT |
257                                        AMDGPU_GEM_DOMAIN_CPU |
258                                        AMDGPU_GEM_DOMAIN_GDS |
259                                        AMDGPU_GEM_DOMAIN_GWS |
260                                        AMDGPU_GEM_DOMAIN_OA);
261
262         bo->flags = flags;
263         amdgpu_fill_placement_to_bo(bo, placement);
264         /* Kernel allocation are uninterruptible */
265         r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
266                         &bo->placement, page_align, !kernel, NULL,
267                         acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
268         if (unlikely(r != 0)) {
269                 return r;
270         }
271         *bo_ptr = bo;
272
273         trace_amdgpu_bo_create(bo);
274
275         return 0;
276 }
277
278 int amdgpu_bo_create(struct amdgpu_device *adev,
279                      unsigned long size, int byte_align,
280                      bool kernel, u32 domain, u64 flags,
281                      struct sg_table *sg,
282                      struct reservation_object *resv,
283                      struct amdgpu_bo **bo_ptr)
284 {
285         struct ttm_placement placement = {0};
286         struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
287
288         memset(&placements, 0,
289                (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
290
291         amdgpu_ttm_placement_init(adev, &placement,
292                                   placements, domain, flags);
293
294         return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
295                                            domain, flags, sg, &placement,
296                                            resv, bo_ptr);
297 }
298
299 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
300 {
301         bool is_iomem;
302         int r;
303
304         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
305                 return -EPERM;
306
307         if (bo->kptr) {
308                 if (ptr) {
309                         *ptr = bo->kptr;
310                 }
311                 return 0;
312         }
313         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
314         if (r) {
315                 return r;
316         }
317         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
318         if (ptr) {
319                 *ptr = bo->kptr;
320         }
321         return 0;
322 }
323
324 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
325 {
326         if (bo->kptr == NULL)
327                 return;
328         bo->kptr = NULL;
329         ttm_bo_kunmap(&bo->kmap);
330 }
331
332 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
333 {
334         if (bo == NULL)
335                 return NULL;
336
337         ttm_bo_reference(&bo->tbo);
338         return bo;
339 }
340
341 void amdgpu_bo_unref(struct amdgpu_bo **bo)
342 {
343         struct ttm_buffer_object *tbo;
344
345         if ((*bo) == NULL)
346                 return;
347
348         tbo = &((*bo)->tbo);
349         ttm_bo_unref(&tbo);
350         if (tbo == NULL)
351                 *bo = NULL;
352 }
353
354 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
355                              u64 min_offset, u64 max_offset,
356                              u64 *gpu_addr)
357 {
358         int r, i;
359         unsigned fpfn, lpfn;
360
361         if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
362                 return -EPERM;
363
364         if (WARN_ON_ONCE(min_offset > max_offset))
365                 return -EINVAL;
366
367         if (bo->pin_count) {
368                 bo->pin_count++;
369                 if (gpu_addr)
370                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
371
372                 if (max_offset != 0) {
373                         u64 domain_start;
374                         if (domain == AMDGPU_GEM_DOMAIN_VRAM)
375                                 domain_start = bo->adev->mc.vram_start;
376                         else
377                                 domain_start = bo->adev->mc.gtt_start;
378                         WARN_ON_ONCE(max_offset <
379                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
380                 }
381
382                 return 0;
383         }
384         amdgpu_ttm_placement_from_domain(bo, domain);
385         for (i = 0; i < bo->placement.num_placement; i++) {
386                 /* force to pin into visible video ram */
387                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
388                     !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
389                     (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
390                         if (WARN_ON_ONCE(min_offset >
391                                          bo->adev->mc.visible_vram_size))
392                                 return -EINVAL;
393                         fpfn = min_offset >> PAGE_SHIFT;
394                         lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
395                 } else {
396                         fpfn = min_offset >> PAGE_SHIFT;
397                         lpfn = max_offset >> PAGE_SHIFT;
398                 }
399                 if (fpfn > bo->placements[i].fpfn)
400                         bo->placements[i].fpfn = fpfn;
401                 if (lpfn && lpfn < bo->placements[i].lpfn)
402                         bo->placements[i].lpfn = lpfn;
403                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
404         }
405
406         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
407         if (likely(r == 0)) {
408                 bo->pin_count = 1;
409                 if (gpu_addr != NULL)
410                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
411                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
412                         bo->adev->vram_pin_size += amdgpu_bo_size(bo);
413                 else
414                         bo->adev->gart_pin_size += amdgpu_bo_size(bo);
415         } else {
416                 dev_err(bo->adev->dev, "%p pin failed\n", bo);
417         }
418         return r;
419 }
420
421 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
422 {
423         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
424 }
425
426 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
427 {
428         int r, i;
429
430         if (!bo->pin_count) {
431                 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
432                 return 0;
433         }
434         bo->pin_count--;
435         if (bo->pin_count)
436                 return 0;
437         for (i = 0; i < bo->placement.num_placement; i++) {
438                 bo->placements[i].lpfn = 0;
439                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
440         }
441         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
442         if (likely(r == 0)) {
443                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
444                         bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
445                 else
446                         bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
447         } else {
448                 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
449         }
450         return r;
451 }
452
453 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
454 {
455         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
456         if (0 && (adev->flags & AMD_IS_APU)) {
457                 /* Useless to evict on IGP chips */
458                 return 0;
459         }
460         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
461 }
462
463 void amdgpu_bo_force_delete(struct amdgpu_device *adev)
464 {
465         struct amdgpu_bo *bo, *n;
466
467         if (list_empty(&adev->gem.objects)) {
468                 return;
469         }
470         dev_err(adev->dev, "Userspace still has active objects !\n");
471         list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
472                 dev_err(adev->dev, "%p %p %lu %lu force free\n",
473                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
474                         *((unsigned long *)&bo->gem_base.refcount));
475                 mutex_lock(&bo->adev->gem.mutex);
476                 list_del_init(&bo->list);
477                 mutex_unlock(&bo->adev->gem.mutex);
478                 /* this should unref the ttm bo */
479                 drm_gem_object_unreference_unlocked(&bo->gem_base);
480         }
481 }
482
483 int amdgpu_bo_init(struct amdgpu_device *adev)
484 {
485         /* Add an MTRR for the VRAM */
486         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
487                                               adev->mc.aper_size);
488         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
489                 adev->mc.mc_vram_size >> 20,
490                 (unsigned long long)adev->mc.aper_size >> 20);
491         DRM_INFO("RAM width %dbits DDR\n",
492                         adev->mc.vram_width);
493         return amdgpu_ttm_init(adev);
494 }
495
496 void amdgpu_bo_fini(struct amdgpu_device *adev)
497 {
498         amdgpu_ttm_fini(adev);
499         arch_phys_wc_del(adev->mc.vram_mtrr);
500 }
501
502 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
503                              struct vm_area_struct *vma)
504 {
505         return ttm_fbdev_mmap(vma, &bo->tbo);
506 }
507
508 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
509 {
510         if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
511                 return -EINVAL;
512
513         bo->tiling_flags = tiling_flags;
514         return 0;
515 }
516
517 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
518 {
519         lockdep_assert_held(&bo->tbo.resv->lock.base);
520
521         if (tiling_flags)
522                 *tiling_flags = bo->tiling_flags;
523 }
524
525 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
526                             uint32_t metadata_size, uint64_t flags)
527 {
528         void *buffer;
529
530         if (!metadata_size) {
531                 if (bo->metadata_size) {
532                         kfree(bo->metadata);
533                         bo->metadata_size = 0;
534                 }
535                 return 0;
536         }
537
538         if (metadata == NULL)
539                 return -EINVAL;
540
541         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
542         if (buffer == NULL)
543                 return -ENOMEM;
544
545         kfree(bo->metadata);
546         bo->metadata_flags = flags;
547         bo->metadata = buffer;
548         bo->metadata_size = metadata_size;
549
550         return 0;
551 }
552
553 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
554                            size_t buffer_size, uint32_t *metadata_size,
555                            uint64_t *flags)
556 {
557         if (!buffer && !metadata_size)
558                 return -EINVAL;
559
560         if (buffer) {
561                 if (buffer_size < bo->metadata_size)
562                         return -EINVAL;
563
564                 if (bo->metadata_size)
565                         memcpy(buffer, bo->metadata, bo->metadata_size);
566         }
567
568         if (metadata_size)
569                 *metadata_size = bo->metadata_size;
570         if (flags)
571                 *flags = bo->metadata_flags;
572
573         return 0;
574 }
575
576 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
577                            struct ttm_mem_reg *new_mem)
578 {
579         struct amdgpu_bo *rbo;
580
581         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
582                 return;
583
584         rbo = container_of(bo, struct amdgpu_bo, tbo);
585         amdgpu_vm_bo_invalidate(rbo->adev, rbo);
586
587         /* update statistics */
588         if (!new_mem)
589                 return;
590
591         /* move_notify is called before move happens */
592         amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
593 }
594
595 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
596 {
597         struct amdgpu_device *adev;
598         struct amdgpu_bo *abo;
599         unsigned long offset, size, lpfn;
600         int i, r;
601
602         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
603                 return 0;
604
605         abo = container_of(bo, struct amdgpu_bo, tbo);
606         adev = abo->adev;
607         if (bo->mem.mem_type != TTM_PL_VRAM)
608                 return 0;
609
610         size = bo->mem.num_pages << PAGE_SHIFT;
611         offset = bo->mem.start << PAGE_SHIFT;
612         if ((offset + size) <= adev->mc.visible_vram_size)
613                 return 0;
614
615         /* hurrah the memory is not visible ! */
616         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
617         lpfn =  adev->mc.visible_vram_size >> PAGE_SHIFT;
618         for (i = 0; i < abo->placement.num_placement; i++) {
619                 /* Force into visible VRAM */
620                 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
621                     (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
622                         abo->placements[i].lpfn = lpfn;
623         }
624         r = ttm_bo_validate(bo, &abo->placement, false, false);
625         if (unlikely(r == -ENOMEM)) {
626                 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
627                 return ttm_bo_validate(bo, &abo->placement, false, false);
628         } else if (unlikely(r != 0)) {
629                 return r;
630         }
631
632         offset = bo->mem.start << PAGE_SHIFT;
633         /* this should never happen */
634         if ((offset + size) > adev->mc.visible_vram_size)
635                 return -EINVAL;
636
637         return 0;
638 }
639
640 /**
641  * amdgpu_bo_fence - add fence to buffer object
642  *
643  * @bo: buffer object in question
644  * @fence: fence to add
645  * @shared: true if fence should be added shared
646  *
647  */
648 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
649                      bool shared)
650 {
651         struct reservation_object *resv = bo->tbo.resv;
652
653         if (shared)
654                 reservation_object_add_shared_fence(resv, fence);
655         else
656                 reservation_object_add_excl_fence(resv, fence);
657 }