struct device *mmu_dev;
struct vcodec_iommu_info *iommu_info;
+ struct work_struct set_work;
};
struct vpu_service_info {
struct wake_lock wake_lock;
struct delayed_work power_off_work;
+ struct wake_lock set_wake_lock;
+ struct workqueue_struct *set_workq;
ktime_t last; /* record previous power-on time */
/* vpu service structure global lock */
struct mutex lock;
struct vpu_service_info *pservice = data->pservice;
struct vpu_subdev_data *subdata, *n;
- if (pservice->subcnt < 2)
+ if (pservice->subcnt < 2) {
+ if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
+ set_bit(MMU_ACTIVATED, &data->state);
+
+ if (atomic_read(&pservice->enabled)) {
+ if (vcodec_iommu_attach(data->iommu_info))
+ dev_err(data->dev,
+ "vcodec service attach failed\n"
+ );
+ else
+ BUG_ON(
+ !atomic_read(&pservice->enabled)
+ );
+ }
+ }
return;
+ }
if (pservice->curr_mode == data->mode)
return;
if (data != subdata && subdata->mmu_dev &&
test_bit(MMU_ACTIVATED, &subdata->state)) {
clear_bit(MMU_ACTIVATED, &subdata->state);
+ vcodec_iommu_detach(subdata->iommu_info);
}
}
bits = 1 << pservice->mode_bit;
#endif
if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
set_bit(MMU_ACTIVATED, &data->state);
- if (!atomic_read(&pservice->enabled))
+ if (atomic_read(&pservice->enabled))
+ vcodec_iommu_attach(data->iommu_info);
+ else
/* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
}
_vpu_reset(data);
if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
+ clear_bit(MMU_ACTIVATED, &data->state);
if (atomic_read(&pservice->enabled)) {
/* Need to reset iommu */
vcodec_iommu_detach(data->iommu_info);
- vcodec_iommu_attach(data->iommu_info);
} else {
/* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
struct vpu_service_info *pservice = data->pservice;
list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
- reg_deinit(data, reg);
+ reg_deinit(reg->data, reg);
}
/* wake up session wait event to prevent the timeout hw reset
pservice->last = now;
}
ret = atomic_add_unless(&pservice->enabled, 1, 1);
- if (!ret) {
- if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
- set_bit(MMU_ACTIVATED, &data->state);
- vcodec_iommu_attach(data->iommu_info);
- }
+ if (!ret)
return;
- }
dev_dbg(pservice->dev, "power on\n");
#endif
pm_runtime_get_sync(pservice->dev);
- if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
- set_bit(MMU_ACTIVATED, &data->state);
- if (atomic_read(&pservice->enabled))
- vcodec_iommu_attach(data->iommu_info);
- else
- /*
- * FIXME BUG_ON should not be used in mass
- * produce.
- */
- BUG_ON(!atomic_read(&pservice->enabled));
- }
-
udelay(5);
atomic_add(1, &pservice->power_on_cnt);
wake_lock(&pservice->wake_lock);
if (pps_info_count) {
u8 *pps;
- mutex_lock(&pservice->lock);
-
pps = vcodec_iommu_map_kernel
(data->iommu_info, session, hdl);
vcodec_iommu_unmap_kernel
(data->iommu_info, session, hdl);
- mutex_unlock(&pservice->lock);
}
}
pservice->reg_codec = reg;
- vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "reg: base %3d end %d en %2d mask: en %x gate %x\n",
base, end, reg_en, enable_mask, gating_mask);
VEPU_CLEAN_CACHE(dst);
pservice->reg_codec = reg;
- vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "reg: base %3d end %d en %2d mask: en %x gate %x\n",
base, end, reg_en, enable_mask, gating_mask);
VDPU_CLEAN_CACHE(dst);
pservice->reg_pproc = reg;
- vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "reg: base %3d end %d en %2d mask: en %x gate %x\n",
base, end, reg_en, enable_mask, gating_mask);
if (debug & DEBUG_SET_REG)
pservice->reg_codec = reg;
pservice->reg_pproc = reg;
- vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "reg: base %3d end %d en %2d mask: en %x gate %x\n",
base, end, reg_en, enable_mask, gating_mask);
/* VDPU_SOFT_RESET(dst); */
struct vpu_reg *reg = list_entry(pservice->waiting.next,
struct vpu_reg, status_link);
+ vpu_service_power_on(data, pservice);
+
if (change_able || !reset_request) {
switch (reg->type) {
case VPU_ENC: {
vpu_debug_leave();
}
+static void vpu_set_register_work(struct work_struct *work_s)
+{
+ struct vpu_subdev_data *data = container_of(work_s,
+ struct vpu_subdev_data,
+ set_work);
+ struct vpu_service_info *pservice = data->pservice;
+
+ mutex_lock(&pservice->lock);
+ try_set_reg(data);
+ mutex_unlock(&pservice->lock);
+}
+
static int return_reg(struct vpu_subdev_data *data,
struct vpu_reg *reg, u32 __user *dst)
{
struct vpu_request req;
struct vpu_reg *reg;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
if (NULL == reg) {
return -EFAULT;
} else {
- mutex_lock(&pservice->lock);
- try_set_reg(data);
- mutex_unlock(&pservice->lock);
+ queue_work(pservice->set_workq, &data->set_work);
}
} break;
case VPU_IOC_GET_REG: {
struct vpu_reg *reg;
int ret;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
&pservice->total_running);
dev_err(pservice->dev,
"%d task is running but not return, reset hardware...",
- task_running);
+ task_running);
vpu_reset(data);
dev_err(pservice->dev, "done\n");
}
vpu_service_session_clear(data, session);
mutex_unlock(&pservice->lock);
+
return ret;
}
-
mutex_lock(&pservice->lock);
reg = list_entry(session->done.next,
struct vpu_reg, session_link);
struct compat_vpu_request req;
struct vpu_reg *reg;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
if (NULL == reg) {
return -EFAULT;
} else {
- mutex_lock(&pservice->lock);
- try_set_reg(data);
- mutex_unlock(&pservice->lock);
+ queue_work(pservice->set_workq, &data->set_work);
}
} break;
case COMPAT_VPU_IOC_GET_REG: {
struct vpu_reg *reg;
int ret;
- vpu_service_power_on(data, pservice);
-
vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
}
wake_up(&session->wait);
- vpu_service_power_on(data, pservice);
mutex_lock(&pservice->lock);
/* remove this filp from the asynchronusly notified filp's */
list_del_init(&session->list_session);
data->pservice = pservice;
data->dev = dev;
+
+ INIT_WORK(&data->set_work, vpu_set_register_work);
of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
if (pservice->reg_base == 0) {
clear_bit(MMU_ACTIVATED, &data->state);
vpu_service_power_on(data, pservice);
+ of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
+ data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
+ pservice->alloc_type);
+ dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
+ (pservice->alloc_type == 2 ? "ion" : "null"));
+ vcodec_enter_mode(data);
ret = vpu_service_check_hw(data);
if (ret < 0) {
vpu_err("error: hw info check faild\n");
goto err;
}
+ vcodec_exit_mode(data);
hw_info = data->hw_info;
regs = (u8 *)data->regs;
atomic_set(&data->enc_dev.irq_count_codec, 0);
atomic_set(&data->enc_dev.irq_count_pp, 0);
- vcodec_enter_mode(data);
- of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
- data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
- pservice->alloc_type);
- dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
- (pservice->alloc_type == 2 ? "ion" : "null"));
get_hw_info(data);
pservice->auto_freq = true;
- vcodec_exit_mode(data);
/* create device node */
ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
if (ret) {
return -ENOMEM;
pservice->dev = dev;
+ pservice->set_workq = create_singlethread_workqueue("vcodec");
+ if (!pservice->set_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
driver_data = vcodec_get_drv_data(pdev);
if (!driver_data)
return -EINVAL;
pm_runtime_enable(dev);
if (of_property_read_bool(np, "subcnt")) {
+ struct vpu_subdev_data *data = NULL;
+
+ data = devm_kzalloc(dev, sizeof(struct vpu_subdev_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
for (i = 0; i < pservice->subcnt; i++) {
struct device_node *sub_np;
struct platform_device *sub_pdev;
vcodec_subdev_probe(sub_pdev, pservice);
}
+ data->pservice = pservice;
+ platform_set_drvdata(pdev, data);
} else {
vcodec_subdev_probe(pdev, pservice);
}
err:
dev_info(dev, "init failed\n");
vpu_service_power_off(pservice);
+ destroy_workqueue(pservice->set_workq);
wake_lock_destroy(&pservice->wake_lock);
return ret;
{
struct vpu_subdev_data *data = platform_get_drvdata(pdev);
struct vpu_service_info *pservice = data->pservice;
+ struct device_node *np = pdev->dev.of_node;
int val;
int ret;
+ int i;
dev_info(&pdev->dev, "vcodec shutdown");
dev_err(&pdev->dev, "wait total running time out\n");
vcodec_exit_mode(data);
-
- vpu_service_power_on(data, pservice);
vpu_service_clear(data);
- vcodec_subdev_remove(data);
+ if (of_property_read_bool(np, "subcnt")) {
+ for (i = 0; i < pservice->subcnt; i++) {
+ struct device_node *sub_np;
+ struct platform_device *sub_pdev;
+
+ sub_np = of_parse_phandle(np, "rockchip,sub", i);
+ sub_pdev = of_find_device_by_node(sub_np);
+ vcodec_subdev_remove(platform_get_drvdata(sub_pdev));
+ }
+
+ } else {
+ vcodec_subdev_remove(data);
+ }
pm_runtime_disable(&pdev->dev);
}
}
pservice->auto_freq = true;
- vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
+ vpu_debug(DEBUG_EXTRA_INFO,
+ "vpu_service set to auto frequency mode\n");
atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
pservice->bug_dec_addr = of_machine_is_compatible
raw_status = readl_relaxed(dev->regs + task->reg_irq);
dec_status = raw_status;
- vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
task->reg_irq, dec_status,
task->irq_mask, task->ready_mask, task->error_mask);
else
reg_from_run_to_done(data, pservice->reg_pproc);
}
- try_set_reg(data);
+
+ queue_work(pservice->set_workq, &data->set_work);
mutex_unlock(&pservice->lock);
return IRQ_HANDLED;
}
irq_status = readl_relaxed(dev->regs + task->reg_irq);
- vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
+ vpu_debug(DEBUG_TASK_INFO,
+ "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
task->reg_irq, irq_status,
task->irq_mask, task->ready_mask, task->error_mask);
else
reg_from_run_to_done(data, pservice->reg_codec);
}
- try_set_reg(data);
+ queue_work(pservice->set_workq, &data->set_work);
mutex_unlock(&pservice->lock);
+
return IRQ_HANDLED;
}