void *ptr;
INIT_LIST_HEAD(&vgdev->free_vbufs);
+ spin_lock_init(&vgdev->free_vbufs_lock);
count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
count += virtqueue_get_vring_size(vgdev->cursorq.vq);
size = count * VBUFFER_SIZE;
count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
count += virtqueue_get_vring_size(vgdev->cursorq.vq);
+ spin_lock(&vgdev->free_vbufs_lock);
for (i = 0; i < count; i++) {
if (WARN_ON(list_empty(&vgdev->free_vbufs)))
return;
struct virtio_gpu_vbuffer, list);
list_del(&vbuf->list);
}
+ spin_unlock(&vgdev->free_vbufs_lock);
kfree(vgdev->vbufs);
}
{
struct virtio_gpu_vbuffer *vbuf;
+ spin_lock(&vgdev->free_vbufs_lock);
BUG_ON(list_empty(&vgdev->free_vbufs));
vbuf = list_first_entry(&vgdev->free_vbufs,
struct virtio_gpu_vbuffer, list);
list_del(&vbuf->list);
+ spin_unlock(&vgdev->free_vbufs_lock);
memset(vbuf, 0, VBUFFER_SIZE);
BUG_ON(size > MAX_INLINE_CMD_SIZE);
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
kfree(vbuf->data_buf);
+ spin_lock(&vgdev->free_vbufs_lock);
list_add(&vbuf->list, &vgdev->free_vbufs);
+ spin_unlock(&vgdev->free_vbufs_lock);
}
static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)