2 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License Version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/dma-buf.h>
16 #include <linux/hugetlb.h>
18 #include <linux/sched.h>
21 #include "tee_core_priv.h"
24 #define INMSG() dev_dbg(_DEV(tee), "%s: >\n", __func__)
25 #define OUTMSG(val) dev_dbg(_DEV(tee), "%s: < %ld\n", __func__, (long)val)
26 #define OUTMSGX(val) dev_dbg(_DEV(tee), "%s: < %08x\n",\
27 __func__, (unsigned int)(long)val)
30 #if (sizeof(TEEC_SharedMemory) != sizeof(tee_shm))
31 #error "sizeof(TEEC_SharedMemory) != sizeof(tee_shm))"
34 struct tee_shm_attach {
36 enum dma_data_direction dir;
40 struct tee_shm *tee_shm_alloc_from_rpc(struct tee *tee, size_t size)
46 mutex_lock(&tee->lock);
47 shm = tee_shm_alloc(tee, size, TEE_SHM_TEMP | TEE_SHM_FROM_RPC);
48 if (IS_ERR_OR_NULL(shm)) {
49 dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
50 __func__, PTR_ERR(shm));
54 tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
55 list_add_tail(&shm->entry, &tee->list_rpc_shm);
60 mutex_unlock(&tee->lock);
65 void tee_shm_free_from_rpc(struct tee_shm *shm)
72 mutex_lock(&tee->lock);
73 if (shm->ctx == NULL) {
74 tee_dec_stats(&shm->tee->stats[TEE_STATS_SHM_IDX]);
75 list_del(&shm->entry);
79 mutex_unlock(&tee->lock);
82 struct tee_shm *tee_shm_alloc(struct tee *tee, size_t size, uint32_t flags)
86 unsigned int nr_pages;
92 shm = tee->ops->alloc(tee, size, flags);
93 if (IS_ERR_OR_NULL(shm)) {
95 "%s: allocation failed (s=%d,flags=0x%08x) err=%ld\n",
96 __func__, (int)size, flags, PTR_ERR(shm));
102 dev_dbg(_DEV(tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n",
103 __func__, shm, (void *)shm->paddr, (int)shm->size_req,
104 (int)shm->size_alloc, current->comm, current->pid);
106 pfn = shm->paddr >> PAGE_SHIFT;
107 page = pfn_to_page(pfn);
108 if (IS_ERR_OR_NULL(page)) {
109 dev_err(_DEV(tee), "%s: pfn_to_page(%lx) failed\n",
112 return (struct tee_shm *)page;
115 /* Only one page of contiguous physical memory */
118 ret = sg_alloc_table_from_pages(&shm->sgt, &page,
119 nr_pages, 0, nr_pages * PAGE_SIZE, GFP_KERNEL);
120 if (IS_ERR_VALUE(ret)) {
121 dev_err(_DEV(tee), "%s: sg_alloc_table_from_pages() failed\n",
131 void tee_shm_free(struct tee_shm *shm)
135 if (IS_ERR_OR_NULL(shm))
139 pr_warn("invalid call to tee_shm_free(%p): NULL tee\n", shm);
140 else if (shm->tee == NULL)
141 dev_warn(_DEV(tee), "tee_shm_free(%p): NULL tee\n", shm);
143 sg_free_table(&shm->sgt);
144 shm->tee->ops->free(shm);
148 static int _tee_shm_attach_dma_buf(struct dma_buf *dmabuf,
150 struct dma_buf_attachment *attach)
152 struct tee_shm_attach *tee_shm_attach;
161 tee_shm_attach = devm_kzalloc(_DEV(tee),
162 sizeof(*tee_shm_attach), GFP_KERNEL);
163 if (!tee_shm_attach) {
168 tee_shm_attach->dir = DMA_NONE;
169 attach->priv = tee_shm_attach;
175 static void _tee_shm_detach_dma_buf(struct dma_buf *dmabuf,
176 struct dma_buf_attachment *attach)
178 struct tee_shm_attach *tee_shm_attach = attach->priv;
179 struct sg_table *sgt;
188 if (!tee_shm_attach) {
193 sgt = &tee_shm_attach->sgt;
195 if (tee_shm_attach->dir != DMA_NONE)
196 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
197 tee_shm_attach->dir);
200 devm_kfree(_DEV(tee), tee_shm_attach);
205 static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
206 struct dma_buf_attachment *attach, enum dma_data_direction dir)
208 struct tee_shm_attach *tee_shm_attach = attach->priv;
209 struct tee_shm *tee_shm = attach->dmabuf->priv;
210 struct sg_table *sgt = NULL;
211 struct scatterlist *rd, *wr;
220 /* just return current sgt if already requested. */
221 if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
222 OUTMSGX(&tee_shm_attach->sgt);
223 return &tee_shm_attach->sgt;
226 sgt = &tee_shm_attach->sgt;
228 ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
230 dev_err(_DEV(tee), "failed to alloc sgt.\n");
231 return ERR_PTR(-ENOMEM);
234 rd = tee_shm->sgt.sgl;
236 for (i = 0; i < sgt->orig_nents; ++i) {
237 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
242 if (dir != DMA_NONE) {
243 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
245 dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
252 tee_shm_attach->is_mapped = true;
253 tee_shm_attach->dir = dir;
254 attach->priv = tee_shm_attach;
261 static void _tee_shm_dma_buf_unmap_dma_buf(struct dma_buf_attachment *attach,
262 struct sg_table *table,
263 enum dma_data_direction dir)
268 static void _tee_shm_dma_buf_release(struct dma_buf *dmabuf)
270 struct tee_shm *shm = dmabuf->priv;
271 struct tee_context *ctx;
279 dev_dbg(_DEV(ctx->tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n",
280 __func__, shm, (void *)shm->paddr, (int)shm->size_req,
281 (int)shm->size_alloc, current->comm, current->pid);
283 tee_shm_free_io(shm);
288 static int _tee_shm_dma_buf_mmap(struct dma_buf *dmabuf,
289 struct vm_area_struct *vma)
291 struct tee_shm *shm = dmabuf->priv;
292 size_t size = vma->vm_end - vma->vm_start;
300 pfn = shm->paddr >> PAGE_SHIFT;
304 if (shm->flags & TEE_SHM_CACHED)
305 prot = vma->vm_page_prot;
307 prot = pgprot_noncached(vma->vm_page_prot);
310 remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
312 vma->vm_private_data = (void *)shm;
314 dev_dbg(_DEV(shm->ctx->tee), "%s: map the shm (p@=%p,s=%dKiB) => %x\n",
315 __func__, (void *)shm->paddr, (int)size / 1024,
316 (unsigned int)vma->vm_start);
322 static void *_tee_shm_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
328 static void *_tee_shm_dma_buf_kmap(struct dma_buf *db, unsigned long pgnum)
330 struct tee_shm *shm = db->priv;
332 dev_dbg(_DEV(shm->ctx->tee), "%s: kmap the shm (p@=%p, v@=%p, s=%zdKiB)\n",
333 __func__, (void *)shm->paddr, (void *)shm->kaddr,
334 shm->size_alloc / 1024);
336 * A this stage, a shm allocated by the tee
337 * must be have a kernel address
342 static void _tee_shm_dma_buf_kunmap(
343 struct dma_buf *db, unsigned long pfn, void *kaddr)
345 /* unmap is done at the de init of the shm pool */
348 struct dma_buf_ops _tee_shm_dma_buf_ops = {
349 .attach = _tee_shm_attach_dma_buf,
350 .detach = _tee_shm_detach_dma_buf,
351 .map_dma_buf = _tee_shm_dma_buf_map_dma_buf,
352 .unmap_dma_buf = _tee_shm_dma_buf_unmap_dma_buf,
353 .release = _tee_shm_dma_buf_release,
354 .kmap_atomic = _tee_shm_dma_buf_kmap_atomic,
355 .kmap = _tee_shm_dma_buf_kmap,
356 .kunmap = _tee_shm_dma_buf_kunmap,
357 .mmap = _tee_shm_dma_buf_mmap,
360 /******************************************************************************/
362 static int export_buf(struct tee *tee, struct tee_shm *shm, int *export)
364 struct dma_buf *dmabuf;
366 /* Temporary fix to support both older and newer kernel versions. */
367 #if defined(DEFINE_DMA_BUF_EXPORT_INFO)
368 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
371 exp_info.ops = &_tee_shm_dma_buf_ops;
372 exp_info.size = shm->size_alloc;
373 exp_info.flags = O_RDWR;
375 dmabuf = dma_buf_export(&exp_info);
377 dmabuf = dma_buf_export(shm, &_tee_shm_dma_buf_ops, shm->size_alloc,
380 if (IS_ERR_OR_NULL(dmabuf)) {
381 dev_err(_DEV(tee), "%s: dmabuf: couldn't export buffer (%ld)\n",
382 __func__, PTR_ERR(dmabuf));
387 *export = dma_buf_fd(dmabuf, O_CLOEXEC);
393 int tee_shm_alloc_io(struct tee_context *ctx, struct tee_shm_io *shm_io)
396 struct tee *tee = ctx->tee;
404 mutex_lock(&tee->lock);
405 shm = tee_shm_alloc(tee, shm_io->size, shm_io->flags);
406 if (IS_ERR_OR_NULL(shm)) {
407 dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
408 __func__, PTR_ERR(shm));
413 if (ctx->usr_client) {
414 ret = export_buf(tee, shm, &shm_io->fd_shm);
421 shm->flags |= TEEC_MEM_DMABUF;
425 shm->dev = get_device(_DEV(tee));
427 BUG_ON(ret); /* tee_core_get must not issue */
428 tee_context_get(ctx);
430 tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
431 list_add_tail(&shm->entry, &ctx->list_shm);
433 mutex_unlock(&tee->lock);
438 void tee_shm_free_io(struct tee_shm *shm)
440 struct tee_context *ctx = shm->ctx;
441 struct tee *tee = ctx->tee;
442 struct device *dev = shm->dev;
444 mutex_lock(&ctx->tee->lock);
445 tee_dec_stats(&tee->stats[TEE_STATS_SHM_IDX]);
446 list_del(&shm->entry);
450 tee_context_put(ctx);
453 mutex_unlock(&ctx->tee->lock);
456 /* Buffer allocated by rpc from fw and to be accessed by the user
457 * Not need to be registered as it is not allocated by the user */
458 int tee_shm_fd_for_rpc(struct tee_context *ctx, struct tee_shm_io *shm_io)
460 struct tee_shm *shm = NULL;
461 struct tee *tee = ctx->tee;
463 struct list_head *pshm;
469 mutex_lock(&tee->lock);
470 if (!list_empty(&tee->list_rpc_shm)) {
471 list_for_each(pshm, &tee->list_rpc_shm) {
472 shm = list_entry(pshm, struct tee_shm, entry);
473 if ((void *)shm->paddr == shm_io->buffer)
478 dev_err(_DEV(tee), "Can't find shm for %p\n", (void *)shm_io->buffer);
483 ret = export_buf(tee, shm, &shm_io->fd_shm);
490 list_move(&shm->entry, &ctx->list_shm);
492 shm->dev = get_device(_DEV(tee));
495 tee_context_get(ctx);
497 BUG_ON(!tee->ops->shm_inc_ref(shm));
499 mutex_unlock(&tee->lock);
504 /******************************************************************************/
506 static int tee_shm_db_get(struct tee *tee, struct tee_shm *shm, int fd,
507 unsigned int flags, size_t size, int offset)
509 struct tee_shm_dma_buf *sdb;
510 struct dma_buf *dma_buf;
513 dev_dbg(_DEV(tee), "%s: > fd=%d flags=%08x\n", __func__, fd, flags);
515 dma_buf = dma_buf_get(fd);
516 if (IS_ERR(dma_buf)) {
517 ret = PTR_ERR(dma_buf);
521 sdb = kzalloc(sizeof(*sdb), GFP_KERNEL);
522 if (IS_ERR_OR_NULL(sdb)) {
523 dev_err(_DEV(tee), "can't alloc tee_shm_dma_buf\n");
529 if (dma_buf->size < size + offset) {
530 dev_err(_DEV(tee), "dma_buf too small %zd < %zd + %d\n",
531 dma_buf->size, size, offset);
536 sdb->attach = dma_buf_attach(dma_buf, _DEV(tee));
537 if (IS_ERR_OR_NULL(sdb->attach)) {
538 ret = PTR_ERR(sdb->attach);
542 sdb->sgt = dma_buf_map_attachment(sdb->attach, DMA_NONE);
543 if (IS_ERR_OR_NULL(sdb->sgt)) {
544 ret = PTR_ERR(sdb->sgt);
548 if (sg_nents(sdb->sgt->sgl) != 1) {
553 shm->paddr = sg_phys(sdb->sgt->sgl) + offset;
554 if (dma_buf->ops->attach == _tee_shm_attach_dma_buf)
555 sdb->tee_allocated = true;
557 sdb->tee_allocated = false;
559 shm->flags |= TEEC_MEM_DMABUF;
561 dev_dbg(_DEV(tee), "fd=%d @p=%p is_tee=%d db=%p\n", fd,
562 (void *)shm->paddr, sdb->tee_allocated, dma_buf);
566 dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
568 dma_buf_detach(dma_buf, sdb->attach);
572 dma_buf_put(dma_buf);
578 #ifdef VA_GET_ENABLED
579 static unsigned int tee_shm_get_phy_from_kla(
580 struct mm_struct *mm, unsigned int kla)
588 /* stolen from kernel3.10:mm/memory.c:__follow_pte */
590 pgd = pgd_offset(mm, kla);
591 if (pgd_none(*pgd) || pgd_bad(*pgd))
594 pud = pud_offset(pgd, kla);
595 if (pud_none(*pud) || pud_bad(*pud))
598 pmd = pmd_offset(pud, kla);
599 VM_BUG_ON(pmd_trans_huge(*pmd));
600 if (pmd_none(*pmd) || pmd_bad(*pmd))
603 /* We cannot handle huge page PFN maps.
604 * Luckily they don't exist. */
608 ptep = pte_offset_map(pmd, kla);
615 if (pte_present(pte))
616 pa = __pa(page_address(pte_page(pte)));
625 static int tee_shm_va_get(struct tee_context *ctx, struct tee_shm *shm,
626 void *buffer, unsigned int flags, size_t size, int offset)
629 struct mm_struct *mm = current->mm;
630 unsigned long va = (unsigned long)buffer;
631 unsigned int virt_base = (va / PAGE_SIZE) * PAGE_SIZE;
632 unsigned int offset_in_page = va - virt_base;
633 unsigned int offset_total = offset_in_page + offset;
634 struct vm_area_struct *vma;
635 struct tee *tee = ctx->tee;
637 dev_dbg(_DEV(tee), "%s: > %p\n", __func__, buffer);
638 /* if the caller is the kernel api, active_mm is mm */
640 mm = current->active_mm;
644 vma = find_vma(mm, virt_base);
648 /* It's a VMA => consider it a a user address */
650 if (follow_pfn(vma, virt_base, &pfn)) {
651 dev_err(_DEV(tee), "%s can't get pfn for %p\n",
657 shm->paddr = PFN_PHYS(pfn) + offset_total;
659 if (vma->vm_end - vma->vm_start - offset_total < size) {
660 dev_err(_DEV(tee), "%s %p:%x not big enough: %lx - %d < %x\n",
661 __func__, buffer, shm->paddr,
662 vma->vm_end - vma->vm_start,
668 } else if (!ctx->usr_client) {
669 /* It's not a VMA => consider it as a kernel address
670 * And look if it's an internal known phys addr
671 * Note: virt_to_phys is not usable since it can be a direct
672 * map or a vremap address
674 unsigned int phys_base;
675 int nb_page = (PAGE_SIZE - 1 + size + offset_total) / PAGE_SIZE;
678 spin_lock(&mm->page_table_lock);
679 phys_base = tee_shm_get_phy_from_kla(mm, virt_base);
682 spin_unlock(&mm->page_table_lock);
683 dev_err(_DEV(tee), "%s can't get physical address for %p\n",
688 /* Check continuity on size */
689 for (i = 1; i < nb_page; i++) {
690 unsigned int pa = tee_shm_get_phy_from_kla(mm,
691 virt_base + i*PAGE_SIZE);
692 if (pa != phys_base + i*PAGE_SIZE) {
693 spin_unlock(&mm->page_table_lock);
694 dev_err(_DEV(tee), "%s %p:%x not big enough: %lx - %d < %x\n",
695 __func__, buffer, phys_base,
701 spin_unlock(&mm->page_table_lock);
703 shm->paddr = phys_base + offset_total;
710 dev_dbg(_DEV(tee), "%s: < %d shm=%p vaddr=%p paddr=%x\n",
711 __func__, ret, (void *)shm, buffer, shm->paddr);
716 struct tee_shm *tee_shm_get(struct tee_context *ctx, TEEC_SharedMemory *c_shm,
717 size_t size, int offset)
720 struct tee *tee = ctx->tee;
723 dev_dbg(_DEV(tee), "%s: > fd=%d flags=%08x\n",
724 __func__, c_shm->d.fd, c_shm->flags);
726 mutex_lock(&tee->lock);
727 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
728 if (IS_ERR_OR_NULL(shm)) {
729 dev_err(_DEV(tee), "can't alloc tee_shm\n");
736 shm->dev = _DEV(tee);
737 shm->flags = c_shm->flags | TEE_SHM_MEMREF;
738 shm->size_req = size;
741 if (c_shm->flags & TEEC_MEM_KAPI) {
742 struct tee_shm *kc_shm = (struct tee_shm *)c_shm->d.ptr;
745 dev_err(_DEV(tee), "kapi fd null\n");
749 shm->paddr = kc_shm->paddr;
751 if (kc_shm->size_alloc < size + offset) {
752 dev_err(_DEV(tee), "kapi buff too small %zd < %zd + %d\n",
753 kc_shm->size_alloc, size, offset);
758 dev_dbg(_DEV(tee), "fd=%d @p=%p\n",
759 c_shm->d.fd, (void *)shm->paddr);
760 } else if (c_shm->d.fd) {
761 ret = tee_shm_db_get(tee, shm,
762 c_shm->d.fd, c_shm->flags, size, offset);
765 } else if (!c_shm->buffer) {
766 dev_dbg(_DEV(tee), "null buffer, pass 'as is'\n");
768 #ifdef VA_GET_ENABLED
769 ret = tee_shm_va_get(ctx, shm,
770 c_shm->buffer, c_shm->flags, size, offset);
779 mutex_unlock(&tee->lock);
785 mutex_unlock(&tee->lock);
786 OUTMSGX(ERR_PTR(ret));
790 void tee_shm_put(struct tee_context *ctx, struct tee_shm *shm)
792 struct tee *tee = ctx->tee;
794 dev_dbg(_DEV(tee), "%s: > shm=%p flags=%08x paddr=%p\n",
795 __func__, (void *)shm, shm->flags, (void *)shm->paddr);
798 BUG_ON(!(shm->flags & TEE_SHM_MEMREF));
800 mutex_lock(&tee->lock);
801 if (shm->flags & TEEC_MEM_DMABUF) {
802 struct tee_shm_dma_buf *sdb;
803 struct dma_buf *dma_buf;
806 dma_buf = sdb->attach->dmabuf;
808 dev_dbg(_DEV(tee), "%s: db=%p\n", __func__, (void *)dma_buf);
810 dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
811 dma_buf_detach(dma_buf, sdb->attach);
812 dma_buf_put(dma_buf);
819 mutex_unlock(&tee->lock);