2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
32 int radeon_gem_object_init(struct drm_gem_object *obj)
39 void radeon_gem_object_free(struct drm_gem_object *gobj)
41 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
44 if (robj->gem_base.import_attach)
45 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
46 radeon_bo_unref(&robj);
50 int radeon_gem_object_create(struct radeon_device *rdev, int size,
51 int alignment, int initial_domain,
52 bool discardable, bool kernel,
53 struct drm_gem_object **obj)
55 struct radeon_bo *robj;
56 unsigned long max_size;
60 /* At least align on page size */
61 if (alignment < PAGE_SIZE) {
62 alignment = PAGE_SIZE;
65 /* maximun bo size is the minimun btw visible vram and gtt size */
66 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
67 if (size > max_size) {
68 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
69 __func__, __LINE__, size >> 20, max_size >> 20);
74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
81 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
82 size, initial_domain, alignment, r);
86 *obj = &robj->gem_base;
88 mutex_lock(&rdev->gem.mutex);
89 list_add_tail(&robj->list, &rdev->gem.objects);
90 mutex_unlock(&rdev->gem.mutex);
95 int radeon_gem_set_domain(struct drm_gem_object *gobj,
96 uint32_t rdomain, uint32_t wdomain)
98 struct radeon_bo *robj;
102 /* FIXME: reeimplement */
103 robj = gem_to_radeon_bo(gobj);
104 /* work out where to validate the buffer to */
111 printk(KERN_WARNING "Set domain without domain !\n");
114 if (domain == RADEON_GEM_DOMAIN_CPU) {
115 /* Asking for cpu access wait for object idle */
116 r = radeon_bo_wait(robj, NULL, false);
118 printk(KERN_ERR "Failed to wait for object !\n");
125 int radeon_gem_init(struct radeon_device *rdev)
127 INIT_LIST_HEAD(&rdev->gem.objects);
131 void radeon_gem_fini(struct radeon_device *rdev)
133 radeon_bo_force_delete(rdev);
137 * Call from drm_gem_handle_create which appear in both new and open ioctl
140 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
142 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
143 struct radeon_device *rdev = rbo->rdev;
144 struct radeon_fpriv *fpriv = file_priv->driver_priv;
145 struct radeon_vm *vm = &fpriv->vm;
146 struct radeon_bo_va *bo_va;
149 if (rdev->family < CHIP_CAYMAN) {
153 r = radeon_bo_reserve(rbo, false);
158 bo_va = radeon_vm_bo_find(vm, rbo);
160 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
164 radeon_bo_unreserve(rbo);
169 void radeon_gem_object_close(struct drm_gem_object *obj,
170 struct drm_file *file_priv)
172 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
173 struct radeon_device *rdev = rbo->rdev;
174 struct radeon_fpriv *fpriv = file_priv->driver_priv;
175 struct radeon_vm *vm = &fpriv->vm;
176 struct radeon_bo_va *bo_va;
179 if (rdev->family < CHIP_CAYMAN) {
183 r = radeon_bo_reserve(rbo, true);
185 dev_err(rdev->dev, "leaking bo va because "
186 "we fail to reserve bo (%d)\n", r);
189 bo_va = radeon_vm_bo_find(vm, rbo);
191 if (--bo_va->ref_count == 0) {
192 radeon_vm_bo_rmv(rdev, bo_va);
195 radeon_bo_unreserve(rbo);
198 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
201 r = radeon_gpu_reset(rdev);
211 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
212 struct drm_file *filp)
214 struct radeon_device *rdev = dev->dev_private;
215 struct drm_radeon_gem_info *args = data;
216 struct ttm_mem_type_manager *man;
219 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
221 args->vram_size = rdev->mc.real_vram_size;
222 args->vram_visible = (u64)man->size << PAGE_SHIFT;
223 if (rdev->stollen_vga_memory)
224 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
225 args->vram_visible -= radeon_fbdev_total_size(rdev);
226 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
227 for(i = 0; i < RADEON_NUM_RINGS; ++i)
228 args->gart_size -= rdev->ring[i].ring_size;
232 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
233 struct drm_file *filp)
235 /* TODO: implement */
236 DRM_ERROR("unimplemented %s\n", __func__);
240 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
243 /* TODO: implement */
244 DRM_ERROR("unimplemented %s\n", __func__);
248 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
249 struct drm_file *filp)
251 struct radeon_device *rdev = dev->dev_private;
252 struct drm_radeon_gem_create *args = data;
253 struct drm_gem_object *gobj;
257 down_read(&rdev->exclusive_lock);
258 /* create a gem object to contain this object in */
259 args->size = roundup(args->size, PAGE_SIZE);
260 r = radeon_gem_object_create(rdev, args->size, args->alignment,
261 args->initial_domain, false,
264 up_read(&rdev->exclusive_lock);
265 r = radeon_gem_handle_lockup(rdev, r);
268 r = drm_gem_handle_create(filp, gobj, &handle);
269 /* drop reference from allocate - handle holds it now */
270 drm_gem_object_unreference_unlocked(gobj);
272 up_read(&rdev->exclusive_lock);
273 r = radeon_gem_handle_lockup(rdev, r);
276 args->handle = handle;
277 up_read(&rdev->exclusive_lock);
281 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
282 struct drm_file *filp)
284 /* transition the BO to a domain -
285 * just validate the BO into a certain domain */
286 struct radeon_device *rdev = dev->dev_private;
287 struct drm_radeon_gem_set_domain *args = data;
288 struct drm_gem_object *gobj;
289 struct radeon_bo *robj;
292 /* for now if someone requests domain CPU -
293 * just make sure the buffer is finished with */
294 down_read(&rdev->exclusive_lock);
296 /* just do a BO wait for now */
297 gobj = drm_gem_object_lookup(dev, filp, args->handle);
299 up_read(&rdev->exclusive_lock);
302 robj = gem_to_radeon_bo(gobj);
304 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
306 drm_gem_object_unreference_unlocked(gobj);
307 up_read(&rdev->exclusive_lock);
308 r = radeon_gem_handle_lockup(robj->rdev, r);
312 int radeon_mode_dumb_mmap(struct drm_file *filp,
313 struct drm_device *dev,
314 uint32_t handle, uint64_t *offset_p)
316 struct drm_gem_object *gobj;
317 struct radeon_bo *robj;
319 gobj = drm_gem_object_lookup(dev, filp, handle);
323 robj = gem_to_radeon_bo(gobj);
324 *offset_p = radeon_bo_mmap_offset(robj);
325 drm_gem_object_unreference_unlocked(gobj);
329 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *filp)
332 struct drm_radeon_gem_mmap *args = data;
334 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
337 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *filp)
340 struct radeon_device *rdev = dev->dev_private;
341 struct drm_radeon_gem_busy *args = data;
342 struct drm_gem_object *gobj;
343 struct radeon_bo *robj;
345 uint32_t cur_placement = 0;
347 gobj = drm_gem_object_lookup(dev, filp, args->handle);
351 robj = gem_to_radeon_bo(gobj);
352 r = radeon_bo_wait(robj, &cur_placement, true);
353 switch (cur_placement) {
355 args->domain = RADEON_GEM_DOMAIN_VRAM;
358 args->domain = RADEON_GEM_DOMAIN_GTT;
361 args->domain = RADEON_GEM_DOMAIN_CPU;
365 drm_gem_object_unreference_unlocked(gobj);
366 r = radeon_gem_handle_lockup(rdev, r);
370 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *filp)
373 struct radeon_device *rdev = dev->dev_private;
374 struct drm_radeon_gem_wait_idle *args = data;
375 struct drm_gem_object *gobj;
376 struct radeon_bo *robj;
379 gobj = drm_gem_object_lookup(dev, filp, args->handle);
383 robj = gem_to_radeon_bo(gobj);
384 r = radeon_bo_wait(robj, NULL, false);
385 /* callback hw specific functions if any */
386 if (rdev->asic->ioctl_wait_idle)
387 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
388 drm_gem_object_unreference_unlocked(gobj);
389 r = radeon_gem_handle_lockup(rdev, r);
393 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
394 struct drm_file *filp)
396 struct drm_radeon_gem_set_tiling *args = data;
397 struct drm_gem_object *gobj;
398 struct radeon_bo *robj;
401 DRM_DEBUG("%d \n", args->handle);
402 gobj = drm_gem_object_lookup(dev, filp, args->handle);
405 robj = gem_to_radeon_bo(gobj);
406 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
407 drm_gem_object_unreference_unlocked(gobj);
411 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
412 struct drm_file *filp)
414 struct drm_radeon_gem_get_tiling *args = data;
415 struct drm_gem_object *gobj;
416 struct radeon_bo *rbo;
420 gobj = drm_gem_object_lookup(dev, filp, args->handle);
423 rbo = gem_to_radeon_bo(gobj);
424 r = radeon_bo_reserve(rbo, false);
425 if (unlikely(r != 0))
427 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
428 radeon_bo_unreserve(rbo);
430 drm_gem_object_unreference_unlocked(gobj);
434 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *filp)
437 struct drm_radeon_gem_va *args = data;
438 struct drm_gem_object *gobj;
439 struct radeon_device *rdev = dev->dev_private;
440 struct radeon_fpriv *fpriv = filp->driver_priv;
441 struct radeon_bo *rbo;
442 struct radeon_bo_va *bo_va;
446 if (!rdev->vm_manager.enabled) {
447 args->operation = RADEON_VA_RESULT_ERROR;
452 * We don't support vm_id yet, to be sure we don't have have broken
453 * userspace, reject anyone trying to use non 0 value thus moving
454 * forward we can use those fields without breaking existant userspace
457 args->operation = RADEON_VA_RESULT_ERROR;
461 if (args->offset < RADEON_VA_RESERVED_SIZE) {
462 dev_err(&dev->pdev->dev,
463 "offset 0x%lX is in reserved area 0x%X\n",
464 (unsigned long)args->offset,
465 RADEON_VA_RESERVED_SIZE);
466 args->operation = RADEON_VA_RESULT_ERROR;
470 /* don't remove, we need to enforce userspace to set the snooped flag
471 * otherwise we will endup with broken userspace and we won't be able
472 * to enable this feature without adding new interface
474 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
475 if ((args->flags & invalid_flags)) {
476 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
477 args->flags, invalid_flags);
478 args->operation = RADEON_VA_RESULT_ERROR;
481 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
482 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
483 args->operation = RADEON_VA_RESULT_ERROR;
487 switch (args->operation) {
489 case RADEON_VA_UNMAP:
492 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
494 args->operation = RADEON_VA_RESULT_ERROR;
498 gobj = drm_gem_object_lookup(dev, filp, args->handle);
500 args->operation = RADEON_VA_RESULT_ERROR;
503 rbo = gem_to_radeon_bo(gobj);
504 r = radeon_bo_reserve(rbo, false);
506 args->operation = RADEON_VA_RESULT_ERROR;
507 drm_gem_object_unreference_unlocked(gobj);
510 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
512 args->operation = RADEON_VA_RESULT_ERROR;
513 drm_gem_object_unreference_unlocked(gobj);
517 switch (args->operation) {
519 if (bo_va->soffset) {
520 args->operation = RADEON_VA_RESULT_VA_EXIST;
521 args->offset = bo_va->soffset;
524 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
526 case RADEON_VA_UNMAP:
527 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
532 args->operation = RADEON_VA_RESULT_OK;
534 args->operation = RADEON_VA_RESULT_ERROR;
537 radeon_bo_unreserve(rbo);
538 drm_gem_object_unreference_unlocked(gobj);
542 int radeon_mode_dumb_create(struct drm_file *file_priv,
543 struct drm_device *dev,
544 struct drm_mode_create_dumb *args)
546 struct radeon_device *rdev = dev->dev_private;
547 struct drm_gem_object *gobj;
551 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
552 args->size = args->pitch * args->height;
553 args->size = ALIGN(args->size, PAGE_SIZE);
555 r = radeon_gem_object_create(rdev, args->size, 0,
556 RADEON_GEM_DOMAIN_VRAM,
557 false, ttm_bo_type_device,
562 r = drm_gem_handle_create(file_priv, gobj, &handle);
563 /* drop reference from allocate - handle holds it now */
564 drm_gem_object_unreference_unlocked(gobj);
568 args->handle = handle;
572 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
573 struct drm_device *dev,
576 return drm_gem_handle_delete(file_priv, handle);