* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#define NV_DEBUG 0
+
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/bitmap.h>
#include <linux/rbtree.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
+#include <linux/nvmap.h>
#include <asm/tlbflush.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
#include <mach/iovmm.h>
-#include "linux/nvmem_ioctl.h"
#include "nvcommon.h"
#include "nvrm_memmgr.h"
#include "nvbootargs.h"
-#include <linux/dma-mapping.h>
-#include "asm/cacheflush.h"
-
-#define NVMAP_BASE (VMALLOC_END + SZ_2M)
-#define NVMAP_SIZE SZ_2M
+#ifndef NVMAP_BASE
+#define NVMAP_BASE 0xFEE00000
+#define NVMAP_SIZE SZ_2M
+#endif
#define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */
#define pgprot_inner_writeback(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB)
-
+ __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB)
static void smp_dma_clean_range(const void *start, const void *end)
{
dmac_map_area(start, end - start, DMA_TO_DEVICE);
dmac_flush_range(start, end);
}
-int nvmap_add_carveout_heap(unsigned long base, size_t size,
- const char *name, unsigned int bitmask);
-
-
-/*#define IOVMM_FIRST*/ /* enable to force most allocations from iovmm */
-
static void nvmap_vma_open(struct vm_area_struct *vma);
static void nvmap_vma_close(struct vm_area_struct *vma);
static int nvmap_ioctl_rw_handle(struct file *filp, int is_read,
void __user* arg);
-extern void NvRmPrivMemIncrRef(NvRmMemHandle hmem);
-
static struct backing_dev_info nvmap_bdi = {
.ra_pages = 0,
.capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
};
-
#define NVMAP_PTE_OFFSET(x) (((unsigned long)(x) - NVMAP_BASE) >> PAGE_SHIFT)
#define NVMAP_PTE_INDEX(x) (((unsigned long)(x) - NVMAP_BASE)>>PGDIR_SHIFT)
#define NUM_NVMAP_PTES (NVMAP_SIZE >> PGDIR_SHIFT)
#define NVMAP_END (NVMAP_BASE + NVMAP_SIZE)
#define NVMAP_PAGES (NVMAP_SIZE >> PAGE_SHIFT)
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMEM_HANDLE_VISITED (0x1ul << 31)
+
+/* Heaps to use for kernel allocs when no heap list supplied */
+#define NVMAP_KERNEL_DEFAULT_HEAPS (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC)
+
+/* Heaps for which secure allocations are allowed */
+#define NVMAP_SECURE_HEAPS (NVMEM_HEAP_CARVEOUT_IRAM | NVMEM_HEAP_IOVMM)
+
static pte_t *nvmap_pte[NUM_NVMAP_PTES];
static unsigned long nvmap_ptebits[NVMAP_PAGES/BITS_PER_LONG];
static struct tegra_iovmm_client *nvmap_vm_client = NULL;
+/* default heap order policy */
+static unsigned int _nvmap_heap_policy (unsigned int heaps, int numpages)
+{
+ static const unsigned int multipage_order[] = {
+ NVMEM_HEAP_CARVEOUT_MASK,
+ NVMEM_HEAP_SYSMEM,
+ NVMEM_HEAP_IOVMM,
+ 0
+ };
+ static const unsigned int singlepage_order[] = {
+ NVMEM_HEAP_SYSMEM,
+ NVMEM_HEAP_CARVEOUT_MASK,
+ NVMEM_HEAP_IOVMM,
+ 0
+ };
+ const unsigned int* order;
+
+ if (numpages == 1)
+ order = singlepage_order;
+ else
+ order = multipage_order;
+
+ while (*order) {
+ unsigned int h = (*order & heaps);
+ if (h) return h;
+ order++;
+ }
+ return 0;
+};
+
/* first-fit linear allocator carveout heap manager */
struct nvmap_mem_block {
unsigned long base;
val = max_t(unsigned long, val, co->blocks[idx].size);
idx = co->blocks[idx].next_free;
break;
- }
+ }
}
spin_unlock(&co->lock);
static int _nvmap_init_carveout(struct nvmap_carveout *co,
const char *name, unsigned long base_address, size_t len)
{
- const unsigned int min_blocks = 16;
+ unsigned int num_blocks;
struct nvmap_mem_block *blocks = NULL;
int i;
- blocks = kzalloc(sizeof(*blocks)*min_blocks, GFP_KERNEL);
+ num_blocks = min_t(unsigned int, len/1024, 1024);
+ blocks = vmalloc(sizeof(*blocks)*num_blocks);
if (!blocks) goto fail;
co->name = kstrdup(name, GFP_KERNEL);
if (!co->name) goto fail;
- for (i=1; i<min_blocks; i++) {
+ for (i=1; i<num_blocks; i++) {
blocks[i].next = i+1;
blocks[i].prev = i-1;
blocks[i].next_free = -1;
blocks[0].base = base_address;
blocks[0].size = len;
co->blocks = blocks;
- co->num_blocks = min_blocks;
+ co->num_blocks = num_blocks;
spin_lock_init(&co->lock);
co->block_index = 0;
co->spare_index = 1;
return -ENOMEM;
}
-static int nvmap_grow_blocks(struct nvmap_carveout *co)
-{
- struct nvmap_mem_block *blocks;
- unsigned int i;
-
- if (co->num_blocks >= 1<<(8*sizeof(co->free_index)-1)) return -ENOMEM;
- blocks = kzalloc(sizeof(*blocks)*(co->num_blocks*2), GFP_ATOMIC);
- if (!blocks) {
- printk("NV: %s alloc failed\n", __func__);
- return -ENOMEM;
- }
-
- memcpy(blocks, co->blocks, sizeof(*blocks)*(co->num_blocks));
- kfree(co->blocks);
- co->blocks = blocks;
- for (i=co->num_blocks; i<co->num_blocks*2; i++) {
- blocks[i].next = i+1;
- blocks[i].prev = i-1;
- blocks[i].next_free = -1;
- blocks[i].prev_free = -1;
- }
- blocks[co->num_blocks].prev = -1;
- blocks[i-1].next = -1;
- co->spare_index = co->num_blocks;
- co->num_blocks *= 2;
- return 0;
-}
-
static int nvmap_get_spare(struct nvmap_carveout *co) {
int idx;
if (co->spare_index == -1)
- if (nvmap_grow_blocks(co))
- return -1;
+ return -1;
- BUG_ON(co->spare_index == -1);
idx = co->spare_index;
co->spare_index = co->blocks[idx].next;
co->blocks[idx].next = -1;
#endif
}
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
{
#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_lock(&nvmap_mru_vma_lock);
list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length));
- spin_unlock(&nvmap_mru_vma_lock);
#endif
}
struct tegra_iovmm_area *vm = NULL;
unsigned int i, idx;
- spin_lock(&nvmap_mru_vma_lock);
-
if (h->pgalloc.area) {
+ spin_lock(&nvmap_mru_vma_lock);
BUG_ON(list_empty(&h->pgalloc.mru_list));
list_del(&h->pgalloc.mru_list);
INIT_LIST_HEAD(&h->pgalloc.mru_list);
if (vm) {
INIT_LIST_HEAD(&h->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
return vm;
}
/* attempt to re-use the most recently unpinned IOVMM area in the
* evict handles (starting from the current bin) until an allocation
* succeeds or no more areas can be evicted */
+ spin_lock(&nvmap_mru_vma_lock);
mru = _nvmap_list(h->size);
if (!list_empty(mru))
evict = list_first_entry(mru, struct nvmap_handle,
BUG_ON(!evict->pgalloc.area);
list_del(&evict->pgalloc.mru_list);
INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
tegra_iovmm_free_vm(evict->pgalloc.area);
evict->pgalloc.area = NULL;
vm = tegra_iovmm_create_vm(nvmap_vm_client,
NULL, h->size,
_nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ spin_lock(&nvmap_mru_vma_lock);
}
}
-
spin_unlock(&nvmap_mru_vma_lock);
return vm;
#endif
#define nvmap_gfp (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
-static int _nvmap_alloc_do_coalloc(struct nvmap_handle *h,
- struct nvmap_carveout *co, size_t align)
-{
- int idx;
-
- idx = nvmap_carveout_alloc(co, align, h->size);
- if (idx != -1) {
- h->alloc = true;
- h->heap_pgalloc = false;
- h->carveout.co_heap = co;
- h->carveout.block_idx = idx;
- spin_lock(&co->lock);
- h->carveout.base = co->blocks[idx].base;
- spin_unlock(&co->lock);
- }
-
- return (idx==-1) ? -ENOMEM : 0;
-}
-
/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
static void _nvmap_handle_iovmm_map(struct nvmap_handle *h)
{
h->pgalloc.dirty = false;
}
-static int _nvmap_alloc_do_pgalloc(struct nvmap_handle *h,
- bool contiguous, bool secure)
+static int nvmap_pagealloc(struct nvmap_handle *h, bool contiguous)
{
unsigned int i = 0, cnt = (h->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct page **pages;
if (!pages) return -ENOMEM;
- if (cnt==1 && !secure) contiguous = true;
-
- /* secure surfaces should only be allocated in discontiguous (IOVM-
- * managed) space, so that the mapping can be zapped after it is
- * unpinned */
- WARN_ON(secure && contiguous);
-
if (contiguous) {
size_t order = get_order(h->size);
struct page *compound_page;
for (i=0; i<cnt; i++) {
pages[i] = alloc_page(nvmap_gfp);
if (!pages[i]) {
- pr_err("failed to allocate %u pages after %u entries\n",
- cnt, i);
- goto fail;
- }
+ pr_err("failed to allocate %u pages after %u entries\n",
+ cnt, i);
+ goto fail;
+ }
}
}
void *km;
SetPageReserved(pages[i]);
km = kmap(pages[i]);
- if (km) flush_dcache_page(pages[i]);
+ if (km) __cpuc_flush_dcache_area(km, PAGE_SIZE);
outer_flush_range(page_to_phys(pages[i]),
page_to_phys(pages[i])+PAGE_SIZE);
kunmap(pages[i]);
h->size = cnt<<PAGE_SHIFT;
h->pgalloc.pages = pages;
- h->heap_pgalloc = true;
h->pgalloc.contig = contiguous;
INIT_LIST_HEAD(&h->pgalloc.mru_list);
- h->alloc = true;
return 0;
fail:
struct nvmap_file_priv *priv, unsigned long ref)
{
struct rb_node *n = priv->handle_refs.rb_node;
+ struct nvmap_handle *h = (struct nvmap_handle *)ref;
+
+ if (unlikely(h->poison != NVDA_POISON)) {
+ pr_err("%s: handle is poisoned\n", __func__);
+ return NULL;
+ }
while (n) {
struct nvmap_handle_ref *r;
}
BUG_ON(!h->alloc);
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_lock(&nvmap_mru_vma_lock);
+#endif
if (!atomic_dec_return(&h->pin)) {
if (h->heap_pgalloc && h->pgalloc.area) {
/* if a secure handle is clean (i.e., mapped into
ret=1;
}
}
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_unlock(&nvmap_mru_vma_lock);
+#endif
_nvmap_handle_put(h);
return ret;
}
spin_lock(&priv->ref_lock);
for (i=0; i<nr && !ret; i++) {
r = _nvmap_ref_lookup_locked(priv, refs[i]);
- if (!r && (!(priv->su || h[i]->global ||
- current->group_leader == h[i]->owner)))
- ret = -EPERM;
- else if (r) atomic_inc(&r->pin);
+ if (r) atomic_inc(&r->pin);
else {
- pr_err("%s: %s pinning %s's %uB handle without "
- "local context\n", __func__,
- current->group_leader->comm,
- h[i]->owner->comm, h[i]->orig_size);
- }
+ if ((h[i]->poison != NVDA_POISON) ||
+ (!(priv->su || h[i]->global ||
+ current->group_leader == h[i]->owner)))
+ ret = -EPERM;
+ else {
+ pr_err("%s: %s pinning %s's %uB handle without "
+ "local context\n", __func__,
+ current->group_leader->comm,
+ h[i]->owner->comm, h[i]->orig_size);
+ }
+ }
}
while (ret && i--) {
mutex_lock(&nvmap_pin_lock);
for (i=0; i<nr && !ret; i++) {
- ret = wait_event_interruptible_timeout(nvmap_pin_wait,
- !_nvmap_handle_pin_locked(h[i]), 5);
- if (ret >= 0) ret = !ret;
- BUG_ON(ret > 0);
-
-
+ ret = wait_event_interruptible(nvmap_pin_wait,
+ !_nvmap_handle_pin_locked(h[i]));
}
mutex_unlock(&nvmap_pin_lock);
return -EPERM;
}
-/* attempts to allocate from either contiguous system memory or IOVMM space */
-static int _nvmap_do_page_alloc(struct nvmap_file_priv *priv,
- struct nvmap_handle *h, unsigned int heap_mask,
- size_t align, bool secure)
-{
- int ret = -ENOMEM;
- size_t page_size = (h->size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
-#ifdef IOVMM_FIRST
- unsigned int fallback[] = { NVMEM_HEAP_IOVMM, NVMEM_HEAP_SYSMEM, 0 };
-#else
- unsigned int fallback[] = { NVMEM_HEAP_SYSMEM, NVMEM_HEAP_IOVMM, 0 };
-#endif
- unsigned int *m = fallback;
-
- /* secure allocations must not be performed from sysmem */
- if (secure) heap_mask &= ~NVMEM_HEAP_SYSMEM;
-
- if (align > PAGE_SIZE) return -EINVAL;
-
-
- while (*m && ret) {
- if (heap_mask & NVMEM_HEAP_SYSMEM & *m)
- ret = _nvmap_alloc_do_pgalloc(h, true, secure);
-
- else if (heap_mask & NVMEM_HEAP_IOVMM & *m) {
- /* increment the committed IOVM space prior to
- * allocation, to avoid race conditions with other
- * threads simultaneously allocating. this is
- * conservative, but guaranteed to work */
-
- int oc;
- oc = atomic_add_return(page_size, &priv->iovm_commit);
-
- if (oc <= priv->iovm_limit)
- ret = _nvmap_alloc_do_pgalloc(h, false, secure);
- else
- ret = -ENOMEM;
- /* on failure, or when do_pgalloc promotes a non-
- * contiguous request into a contiguous request,
- * release the commited iovm space */
- if (ret || h->pgalloc.contig)
- atomic_sub(page_size, &priv->iovm_commit);
- }
- m++;
- }
- return ret;
-}
-
-/* attempts to allocate from the carveout heaps */
-static int _nvmap_do_carveout_alloc(struct nvmap_handle *h,
- unsigned int heap_mask, size_t align)
-{
- int ret = -ENOMEM;
- struct nvmap_carveout_node *n;
-
- down_read(&nvmap_context.list_sem);
- list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
- if (heap_mask & n->heap_bit)
- ret = _nvmap_alloc_do_coalloc(h, &n->carveout, align);
- if (!ret) break;
- }
- up_read(&nvmap_context.list_sem);
- return ret;
-}
-
static int _nvmap_do_alloc(struct nvmap_file_priv *priv,
unsigned long href, unsigned int heap_mask, size_t align,
- unsigned int flags, bool secure, bool carveout_first)
+ unsigned int flags)
{
- int ret = -ENOMEM;
struct nvmap_handle_ref *r;
struct nvmap_handle *h;
+ int numpages;
+
+ align = max_t(size_t, align, L1_CACHE_BYTES);
if (!href) return -EINVAL;
h = r->h;
if (h->alloc) return 0;
- h->flags = flags;
- align = max_t(size_t, align, L1_CACHE_BYTES);
+ numpages = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ h->secure = (flags & NVMEM_HANDLE_SECURE);
+ h->flags = (flags & 0x3);
+
+ BUG_ON(!numpages);
+
+ /* secure allocations can only be served from secure heaps */
+ if (h->secure) {
+ heap_mask &= NVMAP_SECURE_HEAPS;
+ if (!heap_mask) return -EINVAL;
+ }
+ /* can't do greater than page size alignment with page alloc */
+ if (align > PAGE_SIZE)
+ heap_mask &= NVMEM_HEAP_CARVEOUT_MASK;
+
+ while (heap_mask && !h->alloc) {
+ unsigned int heap_type = _nvmap_heap_policy(heap_mask, numpages);
+
+ if (heap_type & NVMEM_HEAP_CARVEOUT_MASK) {
+ struct nvmap_carveout_node *n;
+
+ down_read(&nvmap_context.list_sem);
+ list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
+ if (heap_type & n->heap_bit) {
+ struct nvmap_carveout* co = &n->carveout;
+ int idx = nvmap_carveout_alloc(co, align, h->size);
+ if (idx != -1) {
+ h->carveout.co_heap = co;
+ h->carveout.block_idx = idx;
+ spin_lock(&co->lock);
+ h->carveout.base = co->blocks[idx].base;
+ spin_unlock(&co->lock);
+ h->heap_pgalloc = false;
+ h->alloc = true;
+ break;
+ }
+ }
+ }
+ up_read(&nvmap_context.list_sem);
+ }
+ else if (heap_type & NVMEM_HEAP_IOVMM) {
+ int ret;
- if (secure) heap_mask &= ~NVMEM_HEAP_CARVEOUT_MASK;
+ BUG_ON(align > PAGE_SIZE);
- if (carveout_first || (heap_mask & NVMEM_HEAP_CARVEOUT_IRAM)) {
- ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
- if (ret) ret = _nvmap_do_page_alloc(priv, h,
- heap_mask, align, secure);
- } else {
- ret = _nvmap_do_page_alloc(priv, h, heap_mask, align, secure);
- if (ret) ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
+ /* increment the committed IOVM space prior to
+ * allocation, to avoid race conditions with other
+ * threads simultaneously allocating. this is
+ * conservative, but guaranteed to work */
+ if (atomic_add_return(numpages << PAGE_SHIFT, &priv->iovm_commit)
+ < priv->iovm_limit) {
+ ret = nvmap_pagealloc(h, false);
+ }
+ else ret = -ENOMEM;
+
+ if (ret) {
+ atomic_sub(numpages << PAGE_SHIFT, &priv->iovm_commit);
+ }
+ else {
+ BUG_ON(h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+ else if (heap_type & NVMEM_HEAP_SYSMEM) {
+ if (nvmap_pagealloc(h, true) == 0) {
+ BUG_ON(!h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+ else break;
+
+ heap_mask &= ~heap_type;
}
- BUG_ON((!ret && !h->alloc) || (ret && h->alloc));
- return ret;
+ return (h->alloc ? 0 : -ENOMEM);
}
static int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
{
struct nvmem_alloc_handle op;
struct nvmap_file_priv *priv = filp->private_data;
- bool secure = false;
-#ifdef IOVMM_FIRST
- bool carveout_first = false;
-#else
- bool carveout_first = true;
-#endif
int err;
err = copy_from_user(&op, arg, sizeof(op));
* data leakage. */
op.align = max_t(size_t, op.align, PAGE_SIZE);
- if (op.flags & NVMEM_HANDLE_SECURE) secure = true;
-
- /* TODO: implement a way to specify carveout-first vs
- * carveout-second */
- return _nvmap_do_alloc(priv, op.handle, op.heap_mask,
- op.align, (op.flags & 0x3), secure, carveout_first);
+ return _nvmap_do_alloc(priv, op.handle, op.heap_mask, op.align, op.flags);
}
static int _nvmap_do_free(struct nvmap_file_priv *priv, unsigned long href)
__func__, current->comm,
(r->h->owner) ? r->h->owner->comm : "kernel",
(r->h->global) ? "global" : "private",
- (r->h->alloc && r->h->heap_pgalloc)?"page-alloc" :
+ (r->h->alloc && r->h->heap_pgalloc) ? "page-alloc" :
(r->h->alloc) ? "carveout" : "unallocated",
r->h->orig_size);
while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
BUG_ON(!h);
r = kzalloc(sizeof(*r), GFP_KERNEL);
- spin_lock(&priv->ref_lock);
if (!r) {
- spin_unlock(&priv->ref_lock);
if (h) _nvmap_handle_put(h);
return -ENOMEM;
}
r->h = h;
atomic_set(&r->pin, 0);
+ spin_lock(&priv->ref_lock);
p = &priv->handle_refs.rb_node;
while (*p) {
struct nvmap_handle_ref *l;
else
outer_maint = NULL;
} else if (op == NVMEM_CACHE_OP_WB_INV) {
- inner_maint = smp_dma_flush_range;
+ inner_maint = dmac_flush_range;
if (h->flags == NVMEM_HANDLE_CACHEABLE)
outer_maint = outer_flush_range;
else
prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
- /* for any write-back operation, it is safe to writeback the entire
- * cache rather than just the requested region. for large regions, it
- * is faster to do this than to iterate over every line.
- * only implemented for L1-only cacheable handles currently */
-#if 0
- if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE &&
- end-start >= PAGE_SIZE*3 && op != NVMEM_CACHE_OP_INV) {
- flush_cache_all();
- goto out;
- }
-#endif
-
while (start < end) {
struct page *page = NULL;
unsigned long phys;
pr_err("%s: failed to create heap-%s device\n",
__func__, n->carveout.name);
return;
- }
+ }
if (sysfs_create_group(&n->dev.kobj, &nvmap_heap_defattr_group))
pr_err("%s: failed to create attribute group for heap-%s "
"device\n", __func__, n->carveout.name);
static void _nvmap_create_nvos_preserved(struct nvmap_carveout *co)
{
+#ifdef CONFIG_TEGRA_NVOS
unsigned int i, key;
NvBootArgsPreservedMemHandle mem;
static int was_created[NvBootArgKey_PreservedMemHandle_Num -
else
_nvmap_handle_put(h);
}
+#endif
}
int nvmap_add_carveout_heap(unsigned long base, size_t size,
spin_unlock(&co->lock);
if (_nvmap_init_carveout(&n->carveout,name, blkbase, blksize)) {
- nvmap_carveout_free(&i->carveout, idx);
+ nvmap_carveout_free(co, idx);
idx = -1;
} else {
spin_lock(&co->lock);
/* NvRmMemMgr APIs implemented on top of nvmap */
+#if defined(CONFIG_TEGRA_NVRM)
#include <linux/freezer.h>
NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
NvError NvRmMemAlloc(NvRmMemHandle hMem, const NvRmHeap *Heaps,
NvU32 NumHeaps, NvU32 Alignment, NvOsMemAttribute Coherency)
{
- unsigned int heap_mask = 0;
unsigned int flags = pgprot_kernel;
- int err;
+ int err = -ENOMEM;
BUG_ON(Alignment & (Alignment-1));
else
flags = NVMEM_HANDLE_WRITE_COMBINE;
- if (!NumHeaps || !Heaps)
- heap_mask = (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC);
+ if (!NumHeaps || !Heaps) {
+ err = _nvmap_do_alloc(&nvmap_context.init_data,
+ (unsigned long)hMem, NVMAP_KERNEL_DEFAULT_HEAPS,
+ (size_t)Alignment, flags);
+ }
else {
unsigned int i;
- for (i=0; i<NumHeaps; i++) {
+ for (i = 0; i < NumHeaps; i++) {
+ unsigned int heap;
switch (Heaps[i]) {
case NvRmHeap_GART:
- heap_mask |= NVMEM_HEAP_IOVMM;
+ heap = NVMEM_HEAP_IOVMM;
break;
case NvRmHeap_External:
- heap_mask |= NVMEM_HEAP_SYSMEM;
+ heap = NVMEM_HEAP_SYSMEM;
break;
case NvRmHeap_ExternalCarveOut:
- heap_mask |= NVMEM_HEAP_CARVEOUT_GENERIC;
+ heap = NVMEM_HEAP_CARVEOUT_GENERIC;
break;
case NvRmHeap_IRam:
- heap_mask |= NVMEM_HEAP_CARVEOUT_IRAM;
+ heap = NVMEM_HEAP_CARVEOUT_IRAM;
break;
default:
+ heap = 0;
break;
}
+ if (heap) {
+ err = _nvmap_do_alloc(&nvmap_context.init_data,
+ (unsigned long)hMem, heap,
+ (size_t)Alignment, flags);
+ if (!err) break;
+ }
}
}
- if (!heap_mask) return NvError_InsufficientMemory;
-
- err = _nvmap_do_alloc(&nvmap_context.init_data, (unsigned long)hMem,
- heap_mask, (size_t)Alignment, flags, false, true);
- if (err) return NvError_InsufficientMemory;
- return NvSuccess;
+ return (err ? NvError_InsufficientMemory : NvSuccess);
}
void NvRmMemReadStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 SrcStride,
{
return NvError_NotSupported;
}
+
+#endif
+
+static u32 nvmap_get_physaddr(struct nvmap_handle *h)
+{
+ u32 addr;
+
+ if (unlikely(!atomic_add_return(0, &h->pin))) {
+ WARN_ON(1);
+ return ~0ul;
+ }
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout.base;
+ }
+
+ return addr;
+}
+
+struct nvmap_handle *nvmap_alloc(
+ size_t size, size_t align,
+ unsigned int flags, void **map)
+{
+ struct nvmap_handle_ref *r = NULL;
+ struct nvmap_handle *h;
+ int err;
+
+ err = _nvmap_do_create(&nvmap_context.init_data,
+ NVMEM_IOC_CREATE, (unsigned long)size, true, &r);
+ if (err || !r)
+ return ERR_PTR(err);
+ h = r->h;
+
+ err = _nvmap_do_alloc(&nvmap_context.init_data,
+ (unsigned long)h, NVMAP_KERNEL_DEFAULT_HEAPS,
+ align, flags);
+ if (err) {
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+ return ERR_PTR(err);
+ }
+
+ if (!map)
+ return h;
+
+ if (h->heap_pgalloc) {
+ *map = vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, -1,
+ _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ } else {
+ size_t mapaddr = h->carveout.base;
+ size_t mapsize = h->size;
+
+ mapsize += (mapaddr & ~PAGE_MASK);
+ mapaddr &= PAGE_MASK;
+ mapsize = (mapsize + PAGE_SIZE - 1) & PAGE_MASK;
+
+ /* TODO: [ahatala 2010-06-21] honor coherency flag? */
+ *map = ioremap_wc(mapaddr, mapsize);
+ if (*map)
+ *map += (h->carveout.base - mapaddr);
+ }
+ if (!*map) {
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+ return ERR_PTR(-ENOMEM);
+ }
+ /* TODO: [ahatala 2010-06-22] get rid of kern_map */
+ h->kern_map = *map;
+ return h;
+}
+
+void nvmap_free(struct nvmap_handle *h, void *map)
+{
+ if (map) {
+ BUG_ON(h->kern_map != map);
+
+ if (h->heap_pgalloc) {
+ vm_unmap_ram(map, h->size >> PAGE_SHIFT);
+ } else {
+ unsigned long addr = (unsigned long)map;
+ addr &= ~PAGE_MASK;
+ iounmap((void *)addr);
+ }
+ h->kern_map = NULL;
+ }
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
+}
+
+u32 nvmap_pin_single(struct nvmap_handle *h)
+{
+ int ret;
+ do {
+ ret = _nvmap_handle_pin_fast(1, &h);
+ if (ret) {
+ pr_err("%s: failed to pin handle\n", __func__);
+ dump_stack();
+ }
+ } while (ret);
+
+ return nvmap_get_physaddr(h);
+}
+
+int nvmap_pin_array(struct nvmap_pinarray_elem *arr, int num_elems,
+ struct nvmap_handle **unique_arr, int *num_unique, bool wait)
+{
+ struct nvmap_pinarray_elem *elem;
+ int i, unique_idx = 0;
+ unsigned long pfn = 0;
+ void *pteaddr = NULL;
+ int ret = 0;
+
+ mutex_lock(&nvmap_pin_lock);
+
+ /* find unique handles, pin them and collect into unpin array */
+ for (elem = arr, i = num_elems; i && !ret; i--, elem++) {
+ struct nvmap_handle *to_pin = elem->pin_mem;
+ if (!(to_pin->flags & NVMEM_HANDLE_VISITED)) {
+ if (wait)
+ ret = wait_event_interruptible(
+ nvmap_pin_wait,
+ !_nvmap_handle_pin_locked(to_pin));
+ else
+ ret = _nvmap_handle_pin_locked(to_pin);
+ if (!ret) {
+ to_pin->flags |= NVMEM_HANDLE_VISITED;
+ unique_arr[unique_idx++] = to_pin;
+ }
+ }
+ }
+ /* clear visited flags before releasing mutex */
+ i = unique_idx;
+ while (i--)
+ unique_arr[i]->flags &= ~NVMEM_HANDLE_VISITED;
+
+ mutex_unlock(&nvmap_pin_lock);
+
+ if (!ret)
+ ret = nvmap_map_pte(pfn, pgprot_kernel, &pteaddr);
+
+ if (unlikely(ret)) {
+ int do_wake = 0;
+ i = unique_idx;
+ while (i--)
+ do_wake |= _nvmap_handle_unpin(unique_arr[i]);
+ if (do_wake)
+ wake_up(&nvmap_pin_wait);
+ return -EINTR;
+ }
+
+ for (elem = arr, i = num_elems; i; i--, elem++) {
+ struct nvmap_handle *h_patch = elem->patch_mem;
+ struct nvmap_handle *h_pin = elem->pin_mem;
+ struct page *page = NULL;
+ u32* patch_addr;
+
+ /* commit iovmm mapping */
+ if (h_pin->heap_pgalloc && h_pin->pgalloc.dirty)
+ _nvmap_handle_iovmm_map(h_pin);
+
+ /* patch */
+ if (h_patch->kern_map) {
+ patch_addr = (u32*)((unsigned long)h_patch->kern_map +
+ elem->patch_offset);
+ } else {
+ unsigned long phys, new_pfn;
+ if (h_patch->heap_pgalloc) {
+ page = h_patch->pgalloc.pages[elem->patch_offset >> PAGE_SHIFT];
+ get_page(page);
+ phys = page_to_phys(page) + (elem->patch_offset & ~PAGE_MASK);
+ } else {
+ phys = h_patch->carveout.base + elem->patch_offset;
+ }
+ new_pfn = __phys_to_pfn(phys);
+ if (new_pfn != pfn) {
+ _nvmap_set_pte_at((unsigned long)pteaddr, new_pfn,
+ _nvmap_flag_to_pgprot(h_patch->flags, pgprot_kernel));
+ pfn = new_pfn;
+ }
+ patch_addr = (u32*)((unsigned long)pteaddr + (phys & ~PAGE_MASK));
+ }
+
+ *patch_addr = nvmap_get_physaddr(h_pin) + elem->pin_offset;
+
+ if (page)
+ put_page(page);
+ }
+ nvmap_unmap_pte(pteaddr);
+ *num_unique = unique_idx;
+ return 0;
+}
+
+void nvmap_unpin(struct nvmap_handle **h, int num_handles)
+{
+ int do_wake = 0;
+
+ while (num_handles--) {
+ BUG_ON(!*h);
+ do_wake |= _nvmap_handle_unpin(*h);
+ h++;
+ }
+
+ if (do_wake) wake_up(&nvmap_pin_wait);
+}
--- /dev/null
+/*
+ * include/linux/nvmap.h
+ *
+ * structure declarations for nvmem and nvmap user-space ioctls
+ *
+ * Copyright (c) 2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/ioctl.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef __NVMAP_H
+#define __NVMAP_H
+
+struct nvmem_create_handle {
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
+};
+
+#define NVMEM_HEAP_SYSMEM (1ul<<31)
+#define NVMEM_HEAP_IOVMM (1ul<<30)
+
+/* common carveout heaps */
+#define NVMEM_HEAP_CARVEOUT_IRAM (1ul<<29)
+#define NVMEM_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMEM_HEAP_CARVEOUT_MASK (NVMEM_HEAP_IOVMM - 1)
+
+#define NVMEM_HANDLE_UNCACHEABLE (0x0ul << 0)
+#define NVMEM_HANDLE_WRITE_COMBINE (0x1ul << 0)
+#define NVMEM_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMEM_HANDLE_CACHEABLE (0x3ul << 0)
+
+#define NVMEM_HANDLE_SECURE (0x1ul << 2)
+
+struct nvmem_alloc_handle {
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
+};
+
+struct nvmem_map_caller {
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
+};
+
+struct nvmem_rw_handle {
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
+};
+
+struct nvmem_pin_handle {
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmem_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
+};
+
+enum {
+ NVMEM_HANDLE_PARAM_SIZE = 1,
+ NVMEM_HANDLE_PARAM_ALIGNMENT,
+ NVMEM_HANDLE_PARAM_BASE,
+ NVMEM_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMEM_CACHE_OP_WB = 0,
+ NVMEM_CACHE_OP_INV,
+ NVMEM_CACHE_OP_WB_INV,
+};
+
+struct nvmem_cache_op {
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
+};
+
+#define NVMEM_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMEM_IOC_CREATE _IOWR(NVMEM_IOC_MAGIC, 0, struct nvmem_create_handle)
+#define NVMEM_IOC_CLAIM _IOWR(NVMEM_IOC_MAGIC, 1, struct nvmem_create_handle)
+#define NVMEM_IOC_FROM_ID _IOWR(NVMEM_IOC_MAGIC, 2, struct nvmem_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMEM_IOC_ALLOC _IOW (NVMEM_IOC_MAGIC, 3, struct nvmem_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMEM_IOC_FREE _IO (NVMEM_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMEM_IOC_MMAP _IOWR(NVMEM_IOC_MAGIC, 5, struct nvmem_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMEM_IOC_WRITE _IOW (NVMEM_IOC_MAGIC, 6, struct nvmem_rw_handle)
+#define NVMEM_IOC_READ _IOW (NVMEM_IOC_MAGIC, 7, struct nvmem_rw_handle)
+
+#define NVMEM_IOC_PARAM _IOWR(NVMEM_IOC_MAGIC, 8, struct nvmem_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMEM_IOC_PIN_MULT _IOWR(NVMEM_IOC_MAGIC, 10, struct nvmem_pin_handle)
+#define NVMEM_IOC_UNPIN_MULT _IOW (NVMEM_IOC_MAGIC, 11, struct nvmem_pin_handle)
+
+#define NVMEM_IOC_CACHE _IOW (NVMEM_IOC_MAGIC, 12, struct nvmem_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMEM_IOC_GET_ID _IOWR(NVMEM_IOC_MAGIC, 13, struct nvmem_create_handle)
+
+#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_GET_ID))
+
+#if defined(__KERNEL__)
+
+struct nvmap_handle;
+
+struct nvmap_pinarray_elem {
+ struct nvmap_handle *patch_mem;
+ u32 patch_offset;
+ struct nvmap_handle *pin_mem;
+ u32 pin_offset;
+};
+
+struct nvmap_handle *nvmap_alloc(
+ size_t size, size_t align,
+ unsigned int flags, void **map);
+void nvmap_free(struct nvmap_handle *h, void *map);
+u32 nvmap_pin_single(struct nvmap_handle *h);
+int nvmap_pin_array(struct nvmap_pinarray_elem *arr, int num_elems,
+ struct nvmap_handle **unique_arr, int *num_unique, bool wait);
+void nvmap_unpin(struct nvmap_handle **h, int num_handles);
+
+int nvmap_add_carveout_heap(unsigned long base, size_t size,
+ const char *name, unsigned int bitmask);
+
+#endif
+
+#endif