clk_prepare_enable(rga2_drvdata->rga2);\r
clk_prepare_enable(rga2_drvdata->aclk_rga2);\r
clk_prepare_enable(rga2_drvdata->hclk_rga2);\r
- //clk_enable(rga2_drvdata->pd_rga2);\r
+ clk_prepare_enable(rga2_drvdata->pd_rga2);\r
wake_lock(&rga2_drvdata->wake_lock);\r
rga2_service.enable = true;\r
}\r
rga2_dump();\r
}\r
\r
- //clk_disable(rga2_drvdata->pd_rga2);\r
clk_disable_unprepare(rga2_drvdata->rga2);\r
- //clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
+ clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
clk_disable_unprepare(rga2_drvdata->aclk_rga2);\r
clk_disable_unprepare(rga2_drvdata->hclk_rga2);\r
wake_unlock(&rga2_drvdata->wake_lock);\r
RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);\r
\r
if((atomic_read(&rga2_service.total_running) > 8))\r
- {\r
ret = rga2_blit_sync(session, &req);\r
- }\r
else\r
- {\r
ret = rga2_blit_async(session, &req);\r
- }\r
+\r
break;\r
case RGA2_BLIT_SYNC:\r
if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
}\r
\r
if((atomic_read(&rga2_service.total_running) > 16))\r
- {\r
ret = rga2_blit_sync(session, &req);\r
- }\r
else\r
- {\r
ret = rga2_blit_async(session, &req);\r
- }\r
+\r
break;\r
case RGA_FLUSH:\r
case RGA2_FLUSH:\r
iounmap((void __iomem *)(data->rga_base));\r
\r
//clk_put(data->pd_rga2);\r
- devm_clk_put(&pdev->dev, data->rga2);\r
+ devm_clk_put(&pdev->dev, data->rga2);\r
+ devm_clk_put(&pdev->dev, data->pd_rga2);\r
devm_clk_put(&pdev->dev, data->aclk_rga2);\r
devm_clk_put(&pdev->dev, data->hclk_rga2);\r
\r
}\r
\r
#endif\r
-module_init(rga2_init);\r
+fs_initcall(rga2_init);\r
module_exit(rga2_exit);\r
\r
/* Module information */\r