1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
43 struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
48 struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
53 struct vmw_bo_user_rep {
59 struct vmw_resource res;
63 struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
71 return container_of(bo, struct vmw_dma_buffer, base);
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87 static void vmw_resource_release(struct kref *kref)
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
96 if (likely(res->hw_destroy != NULL))
99 if (res->res_free != NULL)
104 write_lock(&dev_priv->resource_lock);
107 void vmw_resource_unreference(struct vmw_resource **p_res)
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
132 res->dev_priv = dev_priv;
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
142 } while (ret == -EAGAIN);
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
160 static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
163 struct vmw_private *dev_priv = res->dev_priv;
165 write_lock(&dev_priv->resource_lock);
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
174 struct vmw_resource *res;
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
182 read_unlock(&dev_priv->resource_lock);
184 if (unlikely(res == NULL))
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
197 struct vmw_private *dev_priv = res->dev_priv;
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
216 static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
254 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
259 if (unlikely(res == NULL))
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
267 * User-space context management:
270 static void vmw_user_context_free(struct vmw_resource *res)
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
283 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
291 vmw_resource_unreference(&res);
294 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
308 if (res->res_free != &vmw_user_context_free) {
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
321 vmw_resource_unreference(&res);
325 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
336 if (unlikely(ctx == NULL))
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
358 vmw_resource_unreference(&res);
363 int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
367 struct vmw_resource *res;
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
379 read_unlock(&dev_priv->resource_lock);
386 * Surface management.
389 static void vmw_hw_surface_destroy(struct vmw_resource *res)
392 struct vmw_private *dev_priv = res->dev_priv;
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 void vmw_surface_res_free(struct vmw_resource *res)
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
416 kfree(srf->snooper.image);
420 int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
440 if (unlikely(ret != 0)) {
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
480 static void vmw_user_surface_free(struct vmw_resource *res)
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
487 kfree(srf->snooper.image);
491 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 uint32_t handle, struct vmw_surface **out)
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
501 base = ttm_base_object_lookup(tfile, handle);
502 if (unlikely(base == NULL))
505 if (unlikely(base->object_type != VMW_RES_SURFACE))
506 goto out_bad_resource;
508 user_srf = container_of(base, struct vmw_user_surface, base);
509 srf = &user_srf->srf;
512 read_lock(&dev_priv->resource_lock);
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
526 ttm_base_object_unref(&base);
531 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
533 struct ttm_base_object *base = *p_base;
534 struct vmw_user_surface *user_srf =
535 container_of(base, struct vmw_user_surface, base);
536 struct vmw_resource *res = &user_srf->srf.res;
539 vmw_resource_unreference(&res);
542 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
551 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
554 struct vmw_private *dev_priv = vmw_priv(dev);
555 struct vmw_user_surface *user_srf =
556 kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 struct vmw_surface *srf;
558 struct vmw_resource *res;
559 struct vmw_resource *tmp;
560 union drm_vmw_surface_create_arg *arg =
561 (union drm_vmw_surface_create_arg *)data;
562 struct drm_vmw_surface_create_req *req = &arg->req;
563 struct drm_vmw_surface_arg *rep = &arg->rep;
564 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 struct drm_vmw_size __user *user_sizes;
569 if (unlikely(user_srf == NULL))
572 srf = &user_srf->srf;
575 srf->flags = req->flags;
576 srf->format = req->format;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
580 srf->num_sizes += srf->mip_levels[i];
582 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
583 DRM_VMW_MAX_MIP_LEVELS) {
588 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
589 if (unlikely(srf->sizes == NULL)) {
594 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
597 ret = copy_from_user(srf->sizes, user_sizes,
598 srf->num_sizes * sizeof(*srf->sizes));
599 if (unlikely(ret != 0))
602 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL;
606 * From this point, the generic resource management functions
607 * destroy the object on failure.
610 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
611 if (unlikely(ret != 0))
614 tmp = vmw_resource_reference(&srf->res);
615 ret = ttm_base_object_init(tfile, &user_srf->base,
616 req->shareable, VMW_RES_SURFACE,
617 &vmw_user_surface_base_release, NULL);
619 if (unlikely(ret != 0)) {
620 vmw_resource_unreference(&tmp);
621 vmw_resource_unreference(&res);
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
636 DRM_ERROR("Failed to allocate cursor_image\n");
639 srf->snooper.image = NULL;
641 srf->snooper.crtc = NULL;
643 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n");
647 vmw_resource_unreference(&res);
656 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
657 struct drm_file *file_priv)
659 union drm_vmw_surface_reference_arg *arg =
660 (union drm_vmw_surface_reference_arg *)data;
661 struct drm_vmw_surface_arg *req = &arg->req;
662 struct drm_vmw_surface_create_req *rep = &arg->rep;
663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
664 struct vmw_surface *srf;
665 struct vmw_user_surface *user_srf;
666 struct drm_vmw_size __user *user_sizes;
667 struct ttm_base_object *base;
670 base = ttm_base_object_lookup(tfile, req->sid);
671 if (unlikely(base == NULL)) {
672 DRM_ERROR("Could not find surface to reference.\n");
676 if (unlikely(base->object_type != VMW_RES_SURFACE))
677 goto out_bad_resource;
679 user_srf = container_of(base, struct vmw_user_surface, base);
680 srf = &user_srf->srf;
682 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
683 if (unlikely(ret != 0)) {
684 DRM_ERROR("Could not add a reference to a surface.\n");
685 goto out_no_reference;
688 rep->flags = srf->flags;
689 rep->format = srf->format;
690 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
691 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
695 ret = copy_to_user(user_sizes, srf->sizes,
696 srf->num_sizes * sizeof(*srf->sizes));
697 if (unlikely(ret != 0))
698 DRM_ERROR("copy_to_user failed %p %u\n",
699 user_sizes, srf->num_sizes);
702 ttm_base_object_unref(&base);
707 int vmw_surface_check(struct vmw_private *dev_priv,
708 struct ttm_object_file *tfile,
709 uint32_t handle, int *id)
711 struct ttm_base_object *base;
712 struct vmw_user_surface *user_srf;
716 base = ttm_base_object_lookup(tfile, handle);
717 if (unlikely(base == NULL))
720 if (unlikely(base->object_type != VMW_RES_SURFACE))
721 goto out_bad_surface;
723 user_srf = container_of(base, struct vmw_user_surface, base);
724 *id = user_srf->srf.res.id;
729 * FIXME: May deadlock here when called from the
730 * command parsing code.
733 ttm_base_object_unref(&base);
741 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
742 unsigned long num_pages)
744 static size_t bo_user_size = ~0;
746 size_t page_array_size =
747 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
749 if (unlikely(bo_user_size == ~0)) {
750 bo_user_size = glob->ttm_bo_extra_size +
751 ttm_round_pot(sizeof(struct vmw_dma_buffer));
754 return bo_user_size + page_array_size;
757 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev);
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock);
774 int vmw_dmabuf_init(struct vmw_private *dev_priv,
775 struct vmw_dma_buffer *vmw_bo,
776 size_t size, struct ttm_placement *placement,
778 void (*bo_free) (struct ttm_buffer_object *bo))
780 struct ttm_bo_device *bdev = &dev_priv->bdev;
781 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
788 vmw_dmabuf_acc_size(bdev->glob,
789 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
791 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
792 if (unlikely(ret != 0)) {
793 /* we must free the bo here as
794 * ttm_buffer_object_init does so as well */
795 bo_free(&vmw_bo->base);
799 memset(vmw_bo, 0, sizeof(*vmw_bo));
801 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
802 INIT_LIST_HEAD(&vmw_bo->validate_list);
804 vmw_bo->gmr_bound = false;
806 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
807 ttm_bo_type_device, placement,
809 NULL, acc_size, bo_free);
813 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
831 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
833 struct vmw_user_dma_buffer *vmw_user_bo;
834 struct ttm_base_object *base = *p_base;
835 struct ttm_buffer_object *bo;
839 if (unlikely(base == NULL))
842 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
843 bo = &vmw_user_bo->dma.base;
847 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
848 struct drm_file *file_priv)
850 struct vmw_private *dev_priv = vmw_priv(dev);
851 union drm_vmw_alloc_dmabuf_arg *arg =
852 (union drm_vmw_alloc_dmabuf_arg *)data;
853 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
854 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
855 struct vmw_user_dma_buffer *vmw_user_bo;
856 struct ttm_buffer_object *tmp;
857 struct vmw_master *vmaster = vmw_master(file_priv->master);
860 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
861 if (unlikely(vmw_user_bo == NULL))
864 ret = ttm_read_lock(&vmaster->lock, true);
865 if (unlikely(ret != 0)) {
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true,
872 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0))
876 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
877 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
881 &vmw_user_dmabuf_release, NULL);
882 if (unlikely(ret != 0)) {
885 rep->handle = vmw_user_bo->base.hash.key;
886 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
887 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
888 rep->cur_gmr_offset = 0;
892 ttm_read_unlock(&vmaster->lock);
897 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
898 struct drm_file *file_priv)
900 struct drm_vmw_unref_dmabuf_arg *arg =
901 (struct drm_vmw_unref_dmabuf_arg *)data;
903 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
908 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
909 uint32_t cur_validate_node)
911 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
913 if (likely(vmw_bo->on_validate_list))
914 return vmw_bo->cur_validate_node;
916 vmw_bo->cur_validate_node = cur_validate_node;
917 vmw_bo->on_validate_list = true;
919 return cur_validate_node;
922 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
924 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
926 vmw_bo->on_validate_list = false;
929 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
931 struct vmw_dma_buffer *vmw_bo;
933 if (bo->mem.mem_type == TTM_PL_VRAM)
934 return SVGA_GMR_FRAMEBUFFER;
936 vmw_bo = vmw_dma_buffer(bo);
938 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
941 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
943 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
944 vmw_bo->gmr_bound = true;
948 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
949 uint32_t handle, struct vmw_dma_buffer **out)
951 struct vmw_user_dma_buffer *vmw_user_bo;
952 struct ttm_base_object *base;
954 base = ttm_base_object_lookup(tfile, handle);
955 if (unlikely(base == NULL)) {
956 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
957 (unsigned long)handle);
961 if (unlikely(base->object_type != ttm_buffer_type)) {
962 ttm_base_object_unref(&base);
963 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
964 (unsigned long)handle);
968 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
969 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
970 ttm_base_object_unref(&base);
971 *out = &vmw_user_bo->dma;
977 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
978 * when we're out of ids, causing GMR space to be allocated
982 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
984 struct ttm_bo_global *glob = dev_priv->bdev.glob;
989 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
992 spin_lock(&glob->lru_lock);
993 ret = ida_get_new(&dev_priv->gmr_ida, &id);
994 spin_unlock(&glob->lru_lock);
995 } while (ret == -EAGAIN);
997 if (unlikely(ret != 0))
1000 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1001 spin_lock(&glob->lru_lock);
1002 ida_remove(&dev_priv->gmr_ida, id);
1003 spin_unlock(&glob->lru_lock);
1007 *p_id = (uint32_t) id;
1015 static void vmw_stream_destroy(struct vmw_resource *res)
1017 struct vmw_private *dev_priv = res->dev_priv;
1018 struct vmw_stream *stream;
1021 DRM_INFO("%s: unref\n", __func__);
1022 stream = container_of(res, struct vmw_stream, res);
1024 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1028 static int vmw_stream_init(struct vmw_private *dev_priv,
1029 struct vmw_stream *stream,
1030 void (*res_free) (struct vmw_resource *res))
1032 struct vmw_resource *res = &stream->res;
1035 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1036 VMW_RES_STREAM, res_free);
1038 if (unlikely(ret != 0)) {
1039 if (res_free == NULL)
1042 res_free(&stream->res);
1046 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1048 vmw_resource_unreference(&res);
1052 DRM_INFO("%s: claimed\n", __func__);
1054 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1059 * User-space context management:
1062 static void vmw_user_stream_free(struct vmw_resource *res)
1064 struct vmw_user_stream *stream =
1065 container_of(res, struct vmw_user_stream, stream.res);
1071 * This function is called when user space has no more references on the
1072 * base object. It releases the base-object's reference on the resource object.
1075 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1077 struct ttm_base_object *base = *p_base;
1078 struct vmw_user_stream *stream =
1079 container_of(base, struct vmw_user_stream, base);
1080 struct vmw_resource *res = &stream->stream.res;
1083 vmw_resource_unreference(&res);
1086 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv)
1089 struct vmw_private *dev_priv = vmw_priv(dev);
1090 struct vmw_resource *res;
1091 struct vmw_user_stream *stream;
1092 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1093 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1096 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1097 if (unlikely(res == NULL))
1100 if (res->res_free != &vmw_user_stream_free) {
1105 stream = container_of(res, struct vmw_user_stream, stream.res);
1106 if (stream->base.tfile != tfile) {
1111 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1113 vmw_resource_unreference(&res);
1117 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1118 struct drm_file *file_priv)
1120 struct vmw_private *dev_priv = vmw_priv(dev);
1121 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1122 struct vmw_resource *res;
1123 struct vmw_resource *tmp;
1124 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1125 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1128 if (unlikely(stream == NULL))
1131 res = &stream->stream.res;
1132 stream->base.shareable = false;
1133 stream->base.tfile = NULL;
1135 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1136 if (unlikely(ret != 0))
1139 tmp = vmw_resource_reference(res);
1140 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1141 &vmw_user_stream_base_release, NULL);
1143 if (unlikely(ret != 0)) {
1144 vmw_resource_unreference(&tmp);
1148 arg->stream_id = res->id;
1150 vmw_resource_unreference(&res);
1154 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1155 struct ttm_object_file *tfile,
1156 uint32_t *inout_id, struct vmw_resource **out)
1158 struct vmw_user_stream *stream;
1159 struct vmw_resource *res;
1162 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1163 if (unlikely(res == NULL))
1166 if (res->res_free != &vmw_user_stream_free) {
1171 stream = container_of(res, struct vmw_user_stream, stream.res);
1172 if (stream->base.tfile != tfile) {
1177 *inout_id = stream->stream.stream_id;
1181 vmw_resource_unreference(&res);