u32 *tb;
int owns_tb = 1;
- KBASE_LOG(1, kctx->kbdev->dev, "in %s\n", __func__);
+ dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
size = (vma->vm_end - vma->vm_start);
nr_pages = size >> PAGE_SHIFT;
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
/* the rest of the flags is added by the cpu_mmap handler */
- KBASE_LOG(1, kctx->kbdev->dev, "%s done\n", __func__);
+ dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
return 0;
out_no_va_region:
size_t size;
int err = 0;
- KBASE_LOG(1, kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+ dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
size = (vma->vm_end - vma->vm_start);
nr_pages = size >> PAGE_SHIFT;
*kmap_addr = kaddr;
*reg = new_reg;
- KBASE_LOG(1, kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+ dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
return 0;
out_no_alloc:
int free_on_close = 0;
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(1, dev, "kbase_mmap\n");
+ dev_dbg(dev, "kbase_mmap\n");
nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
/* strip away corresponding VM_MAY% flags to the VM_% flags requested */
err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr);
if (0 != err)
goto out_unlock;
- KBASE_LOG(1, dev, "kbase_trace_buffer_mmap ok\n");
+ dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
/* free the region on munmap */
free_on_close = 1;
goto map;