OP-TEE: update optee_linuxdriver to match updated optee_os & optee_client
[firefly-linux-kernel-4.4.55.git] / security / optee_linuxdriver / core / tee_shm.c
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License Version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  */
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/dma-buf.h>
16 #include <linux/hugetlb.h>
17
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20
21 #include "tee_core_priv.h"
22 #include "tee_shm.h"
23
24 #define INMSG() dev_dbg(_DEV(tee), "%s: >\n", __func__)
25 #define OUTMSG(val) dev_dbg(_DEV(tee), "%s: < %ld\n", __func__, (long)val)
26 #define OUTMSGX(val) dev_dbg(_DEV(tee), "%s: < %08x\n",\
27                 __func__, (unsigned int)(long)val)
28
29 /* TODO
30 #if (sizeof(TEEC_SharedMemory) != sizeof(tee_shm))
31 #error "sizeof(TEEC_SharedMemory) != sizeof(tee_shm))"
32 #endif
33 */
34 struct tee_shm_attach {
35         struct sg_table sgt;
36         enum dma_data_direction dir;
37         bool is_mapped;
38 };
39
40 struct tee_shm *tee_shm_alloc_from_rpc(struct tee *tee, size_t size)
41 {
42         struct tee_shm *shm;
43
44         INMSG();
45
46         mutex_lock(&tee->lock);
47         shm = tee_shm_alloc(tee, size, TEE_SHM_TEMP | TEE_SHM_FROM_RPC);
48         if (IS_ERR_OR_NULL(shm)) {
49                 dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
50                         __func__, PTR_ERR(shm));
51                 goto out;
52         }
53
54         tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
55         list_add_tail(&shm->entry, &tee->list_rpc_shm);
56
57         shm->ctx = NULL;
58
59 out:
60         mutex_unlock(&tee->lock);
61         OUTMSGX(shm);
62         return shm;
63 }
64
65 void tee_shm_free_from_rpc(struct tee_shm *shm)
66 {
67         struct tee *tee;
68
69         if (shm == NULL)
70                 return;
71         tee = shm->tee;
72         mutex_lock(&tee->lock);
73         if (shm->ctx == NULL) {
74                 tee_dec_stats(&shm->tee->stats[TEE_STATS_SHM_IDX]);
75                 list_del(&shm->entry);
76         }
77
78         tee_shm_free(shm);
79         mutex_unlock(&tee->lock);
80 }
81
82 struct tee_shm *tee_shm_alloc(struct tee *tee, size_t size, uint32_t flags)
83 {
84         struct tee_shm *shm;
85         unsigned long pfn;
86         unsigned int nr_pages;
87         struct page *page;
88         int ret;
89
90         INMSG();
91
92         shm = tee->ops->alloc(tee, size, flags);
93         if (IS_ERR_OR_NULL(shm)) {
94                 dev_err(_DEV(tee),
95                         "%s: allocation failed (s=%d,flags=0x%08x) err=%ld\n",
96                         __func__, (int)size, flags, PTR_ERR(shm));
97                 goto exit;
98         }
99
100         shm->tee = tee;
101
102         dev_dbg(_DEV(tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n",
103                  __func__, shm, (void *)shm->paddr, (int)shm->size_req,
104                  (int)shm->size_alloc, current->comm, current->pid);
105
106         pfn = shm->paddr >> PAGE_SHIFT;
107         page = pfn_to_page(pfn);
108         if (IS_ERR_OR_NULL(page)) {
109                 dev_err(_DEV(tee), "%s: pfn_to_page(%lx) failed\n",
110                                 __func__, pfn);
111                 tee->ops->free(shm);
112                 return (struct tee_shm *)page;
113         }
114
115         /* Only one page of contiguous physical memory */
116         nr_pages = 1;
117
118         ret = sg_alloc_table_from_pages(&shm->sgt, &page,
119                         nr_pages, 0, nr_pages * PAGE_SIZE, GFP_KERNEL);
120         if (IS_ERR_VALUE(ret)) {
121                 dev_err(_DEV(tee), "%s: sg_alloc_table_from_pages() failed\n",
122                                 __func__);
123                 tee->ops->free(shm);
124                 shm = ERR_PTR(ret);
125         }
126 exit:
127         OUTMSGX(shm);
128         return shm;
129 }
130
131 void tee_shm_free(struct tee_shm *shm)
132 {
133         struct tee *tee;
134
135         if (IS_ERR_OR_NULL(shm))
136                 return;
137         tee = shm->tee;
138         if (tee == NULL)
139                 pr_warn("invalid call to tee_shm_free(%p): NULL tee\n", shm);
140         else if (shm->tee == NULL)
141                 dev_warn(_DEV(tee), "tee_shm_free(%p): NULL tee\n", shm);
142         else {
143                 sg_free_table(&shm->sgt);
144                 shm->tee->ops->free(shm);
145         }
146 }
147
148 static int _tee_shm_attach_dma_buf(struct dma_buf *dmabuf,
149                                         struct device *dev,
150                                         struct dma_buf_attachment *attach)
151 {
152         struct tee_shm_attach *tee_shm_attach;
153         struct tee_shm *shm;
154         struct tee *tee;
155
156         shm = dmabuf->priv;
157         tee = shm->tee;
158
159         INMSG();
160
161         tee_shm_attach = devm_kzalloc(_DEV(tee),
162                         sizeof(*tee_shm_attach), GFP_KERNEL);
163         if (!tee_shm_attach) {
164                 OUTMSG(-ENOMEM);
165                 return -ENOMEM;
166         }
167
168         tee_shm_attach->dir = DMA_NONE;
169         attach->priv = tee_shm_attach;
170
171         OUTMSG(0);
172         return 0;
173 }
174
175 static void _tee_shm_detach_dma_buf(struct dma_buf *dmabuf,
176                                         struct dma_buf_attachment *attach)
177 {
178         struct tee_shm_attach *tee_shm_attach = attach->priv;
179         struct sg_table *sgt;
180         struct tee_shm *shm;
181         struct tee *tee;
182
183         shm = dmabuf->priv;
184         tee = shm->tee;
185
186         INMSG();
187
188         if (!tee_shm_attach) {
189                 OUTMSG(0);
190                 return;
191         }
192
193         sgt = &tee_shm_attach->sgt;
194
195         if (tee_shm_attach->dir != DMA_NONE)
196                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
197                         tee_shm_attach->dir);
198
199         sg_free_table(sgt);
200         devm_kfree(_DEV(tee), tee_shm_attach);
201         attach->priv = NULL;
202         OUTMSG(0);
203 }
204
205 static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
206                 struct dma_buf_attachment *attach, enum dma_data_direction dir)
207 {
208         struct tee_shm_attach *tee_shm_attach = attach->priv;
209         struct tee_shm *tee_shm = attach->dmabuf->priv;
210         struct sg_table *sgt = NULL;
211         struct scatterlist *rd, *wr;
212         unsigned int i;
213         int nents, ret;
214         struct tee *tee;
215
216         tee = tee_shm->tee;
217
218         INMSG();
219
220         /* just return current sgt if already requested. */
221         if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
222                 OUTMSGX(&tee_shm_attach->sgt);
223                 return &tee_shm_attach->sgt;
224         }
225
226         sgt = &tee_shm_attach->sgt;
227
228         ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
229         if (ret) {
230                 dev_err(_DEV(tee), "failed to alloc sgt.\n");
231                 return ERR_PTR(-ENOMEM);
232         }
233
234         rd = tee_shm->sgt.sgl;
235         wr = sgt->sgl;
236         for (i = 0; i < sgt->orig_nents; ++i) {
237                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
238                 rd = sg_next(rd);
239                 wr = sg_next(wr);
240         }
241
242         if (dir != DMA_NONE) {
243                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
244                 if (!nents) {
245                         dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
246                         sg_free_table(sgt);
247                         sgt = ERR_PTR(-EIO);
248                         goto err_unlock;
249                 }
250         }
251
252         tee_shm_attach->is_mapped = true;
253         tee_shm_attach->dir = dir;
254         attach->priv = tee_shm_attach;
255
256 err_unlock:
257         OUTMSGX(sgt);
258         return sgt;
259 }
260
261 static void _tee_shm_dma_buf_unmap_dma_buf(struct dma_buf_attachment *attach,
262                                           struct sg_table *table,
263                                           enum dma_data_direction dir)
264 {
265         return;
266 }
267
268 static void _tee_shm_dma_buf_release(struct dma_buf *dmabuf)
269 {
270         struct tee_shm *shm = dmabuf->priv;
271         struct tee_context *ctx;
272         struct tee *tee;
273
274         tee = shm->ctx->tee;
275
276         INMSG();
277
278         ctx = shm->ctx;
279         dev_dbg(_DEV(ctx->tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n",
280                  __func__, shm, (void *)shm->paddr, (int)shm->size_req,
281                  (int)shm->size_alloc, current->comm, current->pid);
282
283         tee_shm_free_io(shm);
284
285         OUTMSG(0);
286 }
287
288 static int _tee_shm_dma_buf_mmap(struct dma_buf *dmabuf,
289                                 struct vm_area_struct *vma)
290 {
291         struct tee_shm *shm = dmabuf->priv;
292         size_t size = vma->vm_end - vma->vm_start;
293         struct tee *tee;
294         int ret;
295         pgprot_t prot;
296         unsigned long pfn;
297
298         tee = shm->ctx->tee;
299
300         pfn = shm->paddr >> PAGE_SHIFT;
301
302         INMSG();
303
304         if (shm->flags & TEE_SHM_CACHED)
305                 prot = vma->vm_page_prot;
306         else
307                 prot = pgprot_noncached(vma->vm_page_prot);
308
309         ret =
310             remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
311         if (!ret)
312                 vma->vm_private_data = (void *)shm;
313
314         dev_dbg(_DEV(shm->ctx->tee), "%s: map the shm (p@=%p,s=%dKiB) => %x\n",
315                 __func__, (void *)shm->paddr, (int)size / 1024,
316                 (unsigned int)vma->vm_start);
317
318         OUTMSG(ret);
319         return ret;
320 }
321
322 static void *_tee_shm_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
323                                          unsigned long pgnum)
324 {
325         return NULL;
326 }
327
328 static void *_tee_shm_dma_buf_kmap(struct dma_buf *db, unsigned long pgnum)
329 {
330         struct tee_shm *shm = db->priv;
331
332         dev_dbg(_DEV(shm->ctx->tee), "%s: kmap the shm (p@=%p, v@=%p, s=%zdKiB)\n",
333                 __func__, (void *)shm->paddr, (void *)shm->kaddr,
334                 shm->size_alloc / 1024);
335         /*
336          * A this stage, a shm allocated by the tee
337          * must be have a kernel address
338          */
339         return shm->kaddr;
340 }
341
342 static void _tee_shm_dma_buf_kunmap(
343                 struct dma_buf *db, unsigned long pfn, void *kaddr)
344 {
345         /* unmap is done at the de init of the shm pool */
346 }
347
348 struct dma_buf_ops _tee_shm_dma_buf_ops = {
349         .attach = _tee_shm_attach_dma_buf,
350         .detach = _tee_shm_detach_dma_buf,
351         .map_dma_buf = _tee_shm_dma_buf_map_dma_buf,
352         .unmap_dma_buf = _tee_shm_dma_buf_unmap_dma_buf,
353         .release = _tee_shm_dma_buf_release,
354         .kmap_atomic = _tee_shm_dma_buf_kmap_atomic,
355         .kmap = _tee_shm_dma_buf_kmap,
356         .kunmap = _tee_shm_dma_buf_kunmap,
357         .mmap = _tee_shm_dma_buf_mmap,
358 };
359
360 /******************************************************************************/
361
362 static int export_buf(struct tee *tee, struct tee_shm *shm, int *export)
363 {
364         struct dma_buf *dmabuf;
365         int ret = 0;
366         /* Temporary fix to support both older and newer kernel versions. */
367 #if defined(DEFINE_DMA_BUF_EXPORT_INFO)
368         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
369
370         exp_info.priv = shm;
371         exp_info.ops = &_tee_shm_dma_buf_ops;
372         exp_info.size = shm->size_alloc;
373         exp_info.flags = O_RDWR;
374
375         dmabuf = dma_buf_export(&exp_info);
376 #else
377         dmabuf = dma_buf_export(shm, &_tee_shm_dma_buf_ops, shm->size_alloc,
378                                 O_RDWR);
379 #endif
380         if (IS_ERR_OR_NULL(dmabuf)) {
381                 dev_err(_DEV(tee), "%s: dmabuf: couldn't export buffer (%ld)\n",
382                         __func__, PTR_ERR(dmabuf));
383                 ret = -EINVAL;
384                 goto out;
385         }
386
387         *export = dma_buf_fd(dmabuf, O_CLOEXEC);
388 out:
389         OUTMSG(ret);
390         return ret;
391 }
392
393 int tee_shm_alloc_io(struct tee_context *ctx, struct tee_shm_io *shm_io)
394 {
395         struct tee_shm *shm;
396         struct tee *tee = ctx->tee;
397         int ret;
398
399         INMSG();
400
401         if (ctx->usr_client)
402                 shm_io->fd_shm = 0;
403
404         mutex_lock(&tee->lock);
405         shm = tee_shm_alloc(tee, shm_io->size, shm_io->flags);
406         if (IS_ERR_OR_NULL(shm)) {
407                 dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
408                         __func__, PTR_ERR(shm));
409                 ret = PTR_ERR(shm);
410                 goto out;
411         }
412
413         if (ctx->usr_client) {
414                 ret = export_buf(tee, shm, &shm_io->fd_shm);
415                 if (ret) {
416                         tee_shm_free(shm);
417                         ret = -ENOMEM;
418                         goto out;
419                 }
420
421                 shm->flags |= TEEC_MEM_DMABUF;
422         }
423
424         shm->ctx = ctx;
425         shm->dev = get_device(_DEV(tee));
426         ret = tee_get(tee);
427         BUG_ON(ret);            /* tee_core_get must not issue */
428         tee_context_get(ctx);
429
430         tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
431         list_add_tail(&shm->entry, &ctx->list_shm);
432 out:
433         mutex_unlock(&tee->lock);
434         OUTMSG(ret);
435         return ret;
436 }
437
438 void tee_shm_free_io(struct tee_shm *shm)
439 {
440         struct tee_context *ctx = shm->ctx;
441         struct tee *tee = ctx->tee;
442         struct device *dev = shm->dev;
443
444         mutex_lock(&ctx->tee->lock);
445         tee_dec_stats(&tee->stats[TEE_STATS_SHM_IDX]);
446         list_del(&shm->entry);
447
448         tee_shm_free(shm);
449         tee_put(ctx->tee);
450         tee_context_put(ctx);
451         if (dev)
452                 put_device(dev);
453         mutex_unlock(&ctx->tee->lock);
454 }
455
456 /* Buffer allocated by rpc from fw and to be accessed by the user
457  * Not need to be registered as it is not allocated by the user */
458 int tee_shm_fd_for_rpc(struct tee_context *ctx, struct tee_shm_io *shm_io)
459 {
460         struct tee_shm *shm = NULL;
461         struct tee *tee = ctx->tee;
462         int ret;
463         struct list_head *pshm;
464
465         INMSG();
466
467         shm_io->fd_shm = 0;
468
469         mutex_lock(&tee->lock);
470         if (!list_empty(&tee->list_rpc_shm)) {
471                 list_for_each(pshm, &tee->list_rpc_shm) {
472                         shm = list_entry(pshm, struct tee_shm, entry);
473                         if ((void *)shm->paddr == shm_io->buffer)
474                                 goto found;
475                 }
476         }
477
478         dev_err(_DEV(tee), "Can't find shm for %p\n", (void *)shm_io->buffer);
479         ret = -ENOMEM;
480         goto out;
481
482 found:
483         ret = export_buf(tee, shm, &shm_io->fd_shm);
484         if (ret) {
485                 ret = -ENOMEM;
486                 goto out;
487         }
488
489         shm->ctx = ctx;
490         list_move(&shm->entry, &ctx->list_shm);
491
492         shm->dev = get_device(_DEV(tee));
493         ret = tee_get(tee);
494         BUG_ON(ret);
495         tee_context_get(ctx);
496
497         BUG_ON(!tee->ops->shm_inc_ref(shm));
498 out:
499         mutex_unlock(&tee->lock);
500         OUTMSG(ret);
501         return ret;
502 }
503
504 /******************************************************************************/
505
506 static int tee_shm_db_get(struct tee *tee, struct tee_shm *shm, int fd,
507                 unsigned int flags, size_t size, int offset)
508 {
509         struct tee_shm_dma_buf *sdb;
510         struct dma_buf *dma_buf;
511         int ret = 0;
512
513         dev_dbg(_DEV(tee), "%s: > fd=%d flags=%08x\n", __func__, fd, flags);
514
515         dma_buf = dma_buf_get(fd);
516         if (IS_ERR(dma_buf)) {
517                 ret = PTR_ERR(dma_buf);
518                 goto exit;
519         }
520
521         sdb = kzalloc(sizeof(*sdb), GFP_KERNEL);
522         if (IS_ERR_OR_NULL(sdb)) {
523                 dev_err(_DEV(tee), "can't alloc tee_shm_dma_buf\n");
524                 ret = PTR_ERR(sdb);
525                 goto buf_put;
526         }
527         shm->sdb = sdb;
528
529         if (dma_buf->size < size + offset) {
530                 dev_err(_DEV(tee), "dma_buf too small %zd < %zd + %d\n",
531                         dma_buf->size, size, offset);
532                 ret = -EINVAL;
533                 goto free_sdb;
534         }
535
536         sdb->attach = dma_buf_attach(dma_buf, _DEV(tee));
537         if (IS_ERR_OR_NULL(sdb->attach)) {
538                 ret = PTR_ERR(sdb->attach);
539                 goto free_sdb;
540         }
541
542         sdb->sgt = dma_buf_map_attachment(sdb->attach, DMA_NONE);
543         if (IS_ERR_OR_NULL(sdb->sgt)) {
544                 ret = PTR_ERR(sdb->sgt);
545                 goto buf_detach;
546         }
547
548         if (sg_nents(sdb->sgt->sgl) != 1) {
549                 ret = -EINVAL;
550                 goto buf_unmap;
551         }
552
553         shm->paddr = sg_phys(sdb->sgt->sgl) + offset;
554         if (dma_buf->ops->attach == _tee_shm_attach_dma_buf)
555                 sdb->tee_allocated = true;
556         else
557                 sdb->tee_allocated = false;
558
559         shm->flags |= TEEC_MEM_DMABUF;
560
561         dev_dbg(_DEV(tee), "fd=%d @p=%p is_tee=%d db=%p\n", fd,
562                         (void *)shm->paddr, sdb->tee_allocated, dma_buf);
563         goto exit;
564
565 buf_unmap:
566         dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
567 buf_detach:
568         dma_buf_detach(dma_buf, sdb->attach);
569 free_sdb:
570         kfree(sdb);
571 buf_put:
572         dma_buf_put(dma_buf);
573 exit:
574         OUTMSG(ret);
575         return ret;
576 }
577
578 #ifdef VA_GET_ENABLED
579 static unsigned int tee_shm_get_phy_from_kla(
580                 struct mm_struct *mm, unsigned int kla)
581 {
582         pgd_t *pgd;
583         pud_t *pud;
584         pmd_t *pmd;
585         pte_t *ptep, pte;
586         unsigned int pa = 0;
587
588         /* stolen from kernel3.10:mm/memory.c:__follow_pte */
589
590         pgd = pgd_offset(mm, kla);
591         if (pgd_none(*pgd) || pgd_bad(*pgd))
592                 return 0;
593
594         pud = pud_offset(pgd, kla);
595         if (pud_none(*pud) || pud_bad(*pud))
596                 return 0;
597
598         pmd = pmd_offset(pud, kla);
599         VM_BUG_ON(pmd_trans_huge(*pmd));
600         if (pmd_none(*pmd) || pmd_bad(*pmd))
601                 return 0;
602
603         /* We cannot handle huge page PFN maps.
604          * Luckily they don't exist. */
605         if (pmd_huge(*pmd))
606                 return 0;
607
608         ptep = pte_offset_map(pmd, kla);
609
610         if (!ptep)
611                 return 0;
612
613         pte = *ptep;
614
615         if (pte_present(pte))
616                 pa = __pa(page_address(pte_page(pte)));
617
618         if (!pa)
619                 return 0;
620
621         return pa;
622
623 }
624
625 static int tee_shm_va_get(struct tee_context *ctx, struct tee_shm *shm,
626                 void *buffer, unsigned int flags, size_t size, int offset)
627 {
628         int ret = 0;
629         struct mm_struct *mm = current->mm;
630         unsigned long va = (unsigned long)buffer;
631         unsigned int virt_base = (va / PAGE_SIZE) * PAGE_SIZE;
632         unsigned int offset_in_page = va - virt_base;
633         unsigned int offset_total = offset_in_page + offset;
634         struct vm_area_struct *vma;
635         struct tee *tee = ctx->tee;
636
637         dev_dbg(_DEV(tee), "%s: > %p\n", __func__, buffer);
638         /* if the caller is the kernel api, active_mm is mm */
639         if (!mm)
640                 mm = current->active_mm;
641
642         BUG_ON(!mm);
643
644         vma = find_vma(mm, virt_base);
645
646         if (vma) {
647                 unsigned long pfn;
648                 /* It's a VMA => consider it a a user address */
649
650                 if (follow_pfn(vma, virt_base, &pfn)) {
651                         dev_err(_DEV(tee), "%s can't get pfn for %p\n",
652                                 __func__, buffer);
653                         ret = -EINVAL;
654                         goto out;
655                 }
656
657                 shm->paddr = PFN_PHYS(pfn) + offset_total;
658
659                 if (vma->vm_end - vma->vm_start - offset_total < size) {
660                         dev_err(_DEV(tee), "%s %p:%x not big enough: %lx - %d < %x\n",
661                                         __func__, buffer, shm->paddr,
662                                         vma->vm_end - vma->vm_start,
663                                         offset_total, size);
664                         shm->paddr = 0;
665                         ret = -EINVAL;
666                         goto out;
667                 }
668         } else if (!ctx->usr_client) {
669                 /* It's not a VMA => consider it as a kernel address
670                  * And look if it's an internal known phys addr
671                  * Note: virt_to_phys is not usable since it can be a direct
672                  * map or a vremap address
673                 */
674                 unsigned int phys_base;
675                 int nb_page = (PAGE_SIZE - 1 + size + offset_total) / PAGE_SIZE;
676                 int i;
677
678                 spin_lock(&mm->page_table_lock);
679                 phys_base = tee_shm_get_phy_from_kla(mm, virt_base);
680
681                 if (!phys_base) {
682                         spin_unlock(&mm->page_table_lock);
683                         dev_err(_DEV(tee), "%s can't get physical address for %p\n",
684                                         __func__, buffer);
685                         goto err;
686                 }
687
688                 /* Check continuity on size */
689                 for (i = 1; i < nb_page; i++) {
690                         unsigned int pa = tee_shm_get_phy_from_kla(mm,
691                                         virt_base + i*PAGE_SIZE);
692                         if (pa != phys_base + i*PAGE_SIZE) {
693                                 spin_unlock(&mm->page_table_lock);
694                                 dev_err(_DEV(tee), "%s %p:%x not big enough: %lx - %d < %x\n",
695                                                 __func__, buffer, phys_base,
696                                                 i*PAGE_SIZE,
697                                                 offset_total, size);
698                                 goto err;
699                         }
700                 }
701                 spin_unlock(&mm->page_table_lock);
702
703                 shm->paddr = phys_base + offset_total;
704                 goto out;
705 err:
706                 ret = -EINVAL;
707         }
708
709 out:
710         dev_dbg(_DEV(tee), "%s: < %d shm=%p vaddr=%p paddr=%x\n",
711                         __func__, ret, (void *)shm, buffer, shm->paddr);
712         return ret;
713 }
714 #endif
715
716 struct tee_shm *tee_shm_get(struct tee_context *ctx, TEEC_SharedMemory *c_shm,
717                 size_t size, int offset)
718 {
719         struct tee_shm *shm;
720         struct tee *tee = ctx->tee;
721         int ret;
722
723         dev_dbg(_DEV(tee), "%s: > fd=%d flags=%08x\n",
724                         __func__, c_shm->d.fd, c_shm->flags);
725
726         mutex_lock(&tee->lock);
727         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
728         if (IS_ERR_OR_NULL(shm)) {
729                 dev_err(_DEV(tee), "can't alloc tee_shm\n");
730                 ret = -ENOMEM;
731                 goto err;
732         }
733
734         shm->ctx = ctx;
735         shm->tee = tee;
736         shm->dev = _DEV(tee);
737         shm->flags = c_shm->flags | TEE_SHM_MEMREF;
738         shm->size_req = size;
739         shm->size_alloc = 0;
740
741         if (c_shm->flags & TEEC_MEM_KAPI) {
742                 struct tee_shm *kc_shm = (struct tee_shm *)c_shm->d.ptr;
743
744                 if (!kc_shm) {
745                         dev_err(_DEV(tee), "kapi fd null\n");
746                         ret = -EINVAL;
747                         goto err;
748                 }
749                 shm->paddr = kc_shm->paddr;
750
751                 if (kc_shm->size_alloc < size + offset) {
752                         dev_err(_DEV(tee), "kapi buff too small %zd < %zd + %d\n",
753                                 kc_shm->size_alloc, size, offset);
754                         ret = -EINVAL;
755                         goto err;
756                 }
757
758                 dev_dbg(_DEV(tee), "fd=%d @p=%p\n",
759                                 c_shm->d.fd, (void *)shm->paddr);
760         } else if (c_shm->d.fd) {
761                 ret = tee_shm_db_get(tee, shm,
762                                 c_shm->d.fd, c_shm->flags, size, offset);
763                 if (ret)
764                         goto err;
765         } else if (!c_shm->buffer) {
766                 dev_dbg(_DEV(tee), "null buffer, pass 'as is'\n");
767         } else {
768 #ifdef VA_GET_ENABLED
769                 ret = tee_shm_va_get(ctx, shm,
770                                 c_shm->buffer, c_shm->flags, size, offset);
771                 if (ret)
772                         goto err;
773 #else
774                 ret = -EINVAL;
775                 goto err;
776 #endif
777         }
778
779         mutex_unlock(&tee->lock);
780         OUTMSGX(shm);
781         return shm;
782
783 err:
784         kfree(shm);
785         mutex_unlock(&tee->lock);
786         OUTMSGX(ERR_PTR(ret));
787         return ERR_PTR(ret);
788 }
789
790 void tee_shm_put(struct tee_context *ctx, struct tee_shm *shm)
791 {
792         struct tee *tee = ctx->tee;
793
794         dev_dbg(_DEV(tee), "%s: > shm=%p flags=%08x paddr=%p\n",
795                         __func__, (void *)shm, shm->flags, (void *)shm->paddr);
796
797         BUG_ON(!shm);
798         BUG_ON(!(shm->flags & TEE_SHM_MEMREF));
799
800         mutex_lock(&tee->lock);
801         if (shm->flags & TEEC_MEM_DMABUF) {
802                 struct tee_shm_dma_buf *sdb;
803                 struct dma_buf *dma_buf;
804
805                 sdb = shm->sdb;
806                 dma_buf = sdb->attach->dmabuf;
807
808                 dev_dbg(_DEV(tee), "%s: db=%p\n", __func__, (void *)dma_buf);
809
810                 dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
811                 dma_buf_detach(dma_buf, sdb->attach);
812                 dma_buf_put(dma_buf);
813
814                 kfree(sdb);
815                 sdb = 0;
816         }
817
818         kfree(shm);
819         mutex_unlock(&tee->lock);
820         OUTMSG(0);
821 }
822