struct device *mmu_dev;
struct vcodec_iommu_info *iommu_info;
+ struct work_struct set_work;
};
struct vpu_service_info {
struct wake_lock wake_lock;
struct delayed_work power_off_work;
+ struct wake_lock set_wake_lock;
+ struct workqueue_struct *set_workq;
ktime_t last; /* record previous power-on time */
/* vpu service structure global lock */
struct mutex lock;
struct vpu_service_info *pservice = data->pservice;
struct vpu_subdev_data *subdata, *n;
- if (pservice->subcnt < 2)
+ if (pservice->subcnt < 2) {
+ if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
+ set_bit(MMU_ACTIVATED, &data->state);
+
+ if (atomic_read(&pservice->enabled)) {
+ if (vcodec_iommu_attach(data->iommu_info))
+ dev_err(data->dev,
+ "vcodec service attach failed\n"
+ );
+ else
+ BUG_ON(
+ !atomic_read(&pservice->enabled)
+ );
+ }
+ }
return;
+ }
if (pservice->curr_mode == data->mode)
return;
if (data != subdata && subdata->mmu_dev &&
test_bit(MMU_ACTIVATED, &subdata->state)) {
clear_bit(MMU_ACTIVATED, &subdata->state);
+ vcodec_iommu_detach(subdata->iommu_info);
}
}
bits = 1 << pservice->mode_bit;
#endif
if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
set_bit(MMU_ACTIVATED, &data->state);
- if (!atomic_read(&pservice->enabled))
+ if (atomic_read(&pservice->enabled))
+ vcodec_iommu_attach(data->iommu_info);
+ else
/* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
}
if (atomic_read(&pservice->enabled)) {
/* Need to reset iommu */
vcodec_iommu_detach(data->iommu_info);
- vcodec_iommu_attach(data->iommu_info);
} else {
/* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
pservice->last = now;
}
ret = atomic_add_unless(&pservice->enabled, 1, 1);
- if (!ret) {
- if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
- set_bit(MMU_ACTIVATED, &data->state);
- vcodec_iommu_attach(data->iommu_info);
- }
+ if (!ret)
return;
- }
dev_dbg(pservice->dev, "power on\n");
#endif
pm_runtime_get_sync(pservice->dev);
- if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
- set_bit(MMU_ACTIVATED, &data->state);
- if (atomic_read(&pservice->enabled))
- vcodec_iommu_attach(data->iommu_info);
- else
- /*
- * FIXME BUG_ON should not be used in mass
- * produce.
- */
- BUG_ON(!atomic_read(&pservice->enabled));
- }
-
udelay(5);
atomic_add(1, &pservice->power_on_cnt);
wake_lock(&pservice->wake_lock);
if (pps_info_count) {
u8 *pps;
- mutex_lock(&pservice->lock);
-
pps = vcodec_iommu_map_kernel
(data->iommu_info, session, hdl);
vcodec_iommu_unmap_kernel
(data->iommu_info, session, hdl);
- mutex_unlock(&pservice->lock);
}
}
struct vpu_reg *reg = list_entry(pservice->waiting.next,
struct vpu_reg, status_link);
+ vpu_service_power_on(data, pservice);
+
if (change_able || !reset_request) {
switch (reg->type) {
case VPU_ENC: {
vpu_debug_leave();
}
+static void vpu_set_register_work(struct work_struct *work_s)
+{
+ struct vpu_subdev_data *data = container_of(work_s,
+ struct vpu_subdev_data,
+ set_work);
+ struct vpu_service_info *pservice = data->pservice;
+
+ mutex_lock(&pservice->lock);
+ try_set_reg(data);
+ mutex_unlock(&pservice->lock);
+}
+
static int return_reg(struct vpu_subdev_data *data,
struct vpu_reg *reg, u32 __user *dst)
{
struct vpu_request req;
struct vpu_reg *reg;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
if (NULL == reg) {
return -EFAULT;
} else {
- mutex_lock(&pservice->lock);
- try_set_reg(data);
- mutex_unlock(&pservice->lock);
+ queue_work(pservice->set_workq, &data->set_work);
}
} break;
case VPU_IOC_GET_REG: {
struct vpu_reg *reg;
int ret;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
}
vpu_service_session_clear(data, session);
mutex_unlock(&pservice->lock);
+
return ret;
}
-
mutex_lock(&pservice->lock);
reg = list_entry(session->done.next,
struct vpu_reg, session_link);
struct compat_vpu_request req;
struct vpu_reg *reg;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
if (NULL == reg) {
return -EFAULT;
} else {
- mutex_lock(&pservice->lock);
- try_set_reg(data);
- mutex_unlock(&pservice->lock);
+ queue_work(pservice->set_workq, &data->set_work);
}
} break;
case COMPAT_VPU_IOC_GET_REG: {
struct vpu_reg *reg;
int ret;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
}
wake_up(&session->wait);
- vpu_service_power_on(data, pservice);
mutex_lock(&pservice->lock);
/* remove this filp from the asynchronusly notified filp's */
list_del_init(&session->list_session);
data->pservice = pservice;
data->dev = dev;
+
+ INIT_WORK(&data->set_work, vpu_set_register_work);
of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
if (pservice->reg_base == 0) {
clear_bit(MMU_ACTIVATED, &data->state);
vpu_service_power_on(data, pservice);
+ of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
+ data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
+ pservice->alloc_type);
+ dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
+ (pservice->alloc_type == 2 ? "ion" : "null"));
vcodec_enter_mode(data);
ret = vpu_service_check_hw(data);
if (ret < 0) {
vpu_err("error: hw info check faild\n");
goto err;
}
+ vcodec_exit_mode(data);
hw_info = data->hw_info;
regs = (u8 *)data->regs;
atomic_set(&data->enc_dev.irq_count_codec, 0);
atomic_set(&data->enc_dev.irq_count_pp, 0);
- of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
- data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
- pservice->alloc_type);
- dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
- (pservice->alloc_type == 2 ? "ion" : "null"));
get_hw_info(data);
pservice->auto_freq = true;
- vcodec_exit_mode(data);
/* create device node */
ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
if (ret) {
return -ENOMEM;
pservice->dev = dev;
+ pservice->set_workq = create_singlethread_workqueue("vcodec");
+ if (!pservice->set_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
driver_data = vcodec_get_drv_data(pdev);
if (!driver_data)
return -EINVAL;
err:
dev_info(dev, "init failed\n");
vpu_service_power_off(pservice);
+ destroy_workqueue(pservice->set_workq);
wake_lock_destroy(&pservice->wake_lock);
return ret;
dev_err(&pdev->dev, "wait total running time out\n");
vcodec_exit_mode(data);
-
vpu_service_clear(data);
if (of_property_read_bool(np, "subcnt")) {
for (i = 0; i < pservice->subcnt; i++) {
sub_np = of_parse_phandle(np, "rockchip,sub", i);
sub_pdev = of_find_device_by_node(sub_np);
-
vcodec_subdev_remove(platform_get_drvdata(sub_pdev));
}
else
reg_from_run_to_done(data, pservice->reg_pproc);
}
- try_set_reg(data);
+
+ queue_work(pservice->set_workq, &data->set_work);
mutex_unlock(&pservice->lock);
return IRQ_HANDLED;
}
else
reg_from_run_to_done(data, pservice->reg_codec);
}
- try_set_reg(data);
+ queue_work(pservice->set_workq, &data->set_work);
mutex_unlock(&pservice->lock);
+
return IRQ_HANDLED;
}