allocation via dma_map_sg. The implicit contract here is that
memory comming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg_dma_len(sg) = sg->length;
+#endif
+ }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
while (n) {
struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+
if (buffer < entry->buffer)
n = n->rb_left;
else if (buffer > entry->buffer)
mutex_lock(&buffer->lock);
- if (ION_IS_CACHED(buffer->flags)) {
- pr_err("%s: Cannot map iommu as cached.\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
if (!handle->buffer->heap->ops->map_iommu) {
pr_err("%s: map_iommu is not implemented by this heap.\n",
__func__);
goto out;
}
- kref_put(&iommu_map->ref, ion_iommu_release);
-
buffer->iommu_map_cnt--;
trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
dev_name(iommu_dev), iommu_map->iova_addr,
iommu_map->mapped_size, buffer->iommu_map_cnt);
+
+ kref_put(&iommu_map->ref, ion_iommu_release);
out:
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
{
int serial = -1;
struct rb_node *node;
+
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
node);
+
if (strcmp(client->name, name))
continue;
serial = max(serial, client->display_serial);
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
- trace_ion_buffer_mmap("", (unsigned int)buffer, buffer->size,
+ trace_ion_buffer_mmap("", (void*)buffer, buffer->size,
vma->vm_start, vma->vm_end);
return ret;
{
struct ion_buffer *buffer = dmabuf->priv;
- trace_ion_buffer_munmap("", (unsigned int)buffer, buffer->size,
+ trace_ion_buffer_munmap("", (void*)buffer, buffer->size,
vma->vm_start, vma->vm_end);
return 0;
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
+
ion_buffer_put(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
+
return buffer->vaddr + offset * PAGE_SIZE;
}
mutex_unlock(&client->lock);
goto end;
}
- mutex_unlock(&client->lock);
handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
goto end;
+ }
- mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
+
handle = ion_import_dma_buf(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
+
if (!size)
continue;
if (client->task) {
continue;
total_size += buffer->size;
if (!buffer->handle_count) {
- seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ seq_printf(s, "%16.s %16u %16zu 0x%p %d %d\n",
buffer->task_comm, buffer->pid,
- buffer->size, buffer->kmap_cnt,
+ buffer->size, buffer,
+ buffer->kmap_cnt,
atomic_read(&buffer->ref.refcount));
total_orphaned_size += buffer->size;
}
};
#endif
+static ssize_t
+rockchip_ion_debug_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ char buf[64];
+
+ if (copy_from_user(buf, ubuf, cnt>63?63:cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+ ion_trace_lvl = simple_strtol(buf, NULL, 10);
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t
+rockchip_ion_debug_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ int r;
+ char buf[64];
+
+ if (*ppos)
+ return 0;
+
+ snprintf(buf, 63, "%d\n", ion_trace_lvl);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
+
+ return r;
+}
+
+static const struct file_operations rockchip_ion_debug_fops = {
+ .read = rockchip_ion_debug_read,
+ .write = rockchip_ion_debug_write,
+};
+
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
if (!debug_file) {
char buf[256], *path;
+
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
path, heap->name);
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
+
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
{
struct ion_device *idev;
int ret;
+ struct dentry* ion_debug;
idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
if (!idev)
rockchip_ion_snapshot_debugfs(idev->debug_root);
#endif
+ ion_debug = debugfs_create_file("debug", 0664, idev->debug_root,
+ NULL, &rockchip_ion_debug_fops);
+ if (!ion_debug) {
+ char buf[256], *path;
+ path = dentry_path(idev->debug_root, buf, 256);
+ pr_err("Failed to create debugfs at %s/%s\n",path, "ion_debug");
+ }
+
debugfs_done:
idev->custom_ioctl = custom_ioctl;
data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
} else if (data->heaps[i].base == 0) {
phys_addr_t paddr;
+
paddr = memblock_alloc_base(data->heaps[i].size,
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);