2 * arch/arm/mach-tegra/iovmm-gart.c
4 * Tegra I/O VMM implementation for GART devices in Tegra and Tegra 2 series
7 * Copyright (c) 2010, NVIDIA Corporation.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
31 #include <asm/cacheflush.h>
33 #include <mach/iovmm.h>
35 #if defined(CONFIG_ARCH_TEGRA_2x_SOC)
36 #define GART_CONFIG 0x24
37 #define GART_ENTRY_ADDR 0x28
38 #define GART_ENTRY_DATA 0x2c
41 #define VMM_NAME "iovmm-gart"
42 #define DRIVER_NAME "tegra_gart"
44 #define GART_PAGE_SHIFT (12)
45 #define GART_PAGE_MASK (~((1<<GART_PAGE_SHIFT)-1))
50 u32 page_count; /* total remappable size */
51 tegra_iovmm_addr_t iovmm_base; /* offset to apply to vmm_area */
53 struct tegra_iovmm_device iovmm;
54 struct tegra_iovmm_domain domain;
56 bool needs_barrier; /* emulator WAR */
59 static int gart_map(struct tegra_iovmm_device *, struct tegra_iovmm_area *);
60 static void gart_unmap(struct tegra_iovmm_device *,
61 struct tegra_iovmm_area *, bool);
62 static void gart_map_pfn(struct tegra_iovmm_device *,
63 struct tegra_iovmm_area *, tegra_iovmm_addr_t, unsigned long);
64 static struct tegra_iovmm_domain *gart_alloc_domain(
65 struct tegra_iovmm_device *, struct tegra_iovmm_client *);
67 static int gart_probe(struct platform_device *);
68 static int gart_remove(struct platform_device *);
69 static int gart_suspend(struct tegra_iovmm_device *dev);
70 static void gart_resume(struct tegra_iovmm_device *dev);
73 static struct tegra_iovmm_device_ops tegra_iovmm_gart_ops = {
76 .map_pfn = gart_map_pfn,
77 .alloc_domain = gart_alloc_domain,
78 .suspend = gart_suspend,
79 .resume = gart_resume,
82 static struct platform_driver tegra_iovmm_gart_drv = {
84 .remove = gart_remove,
90 static int gart_suspend(struct tegra_iovmm_device *dev)
92 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
102 spin_lock(&gart->pte_lock);
103 reg = gart->iovmm_base;
104 for (i=0; i<gart->page_count; i++) {
105 writel(reg, gart->regs + GART_ENTRY_ADDR);
106 gart->savedata[i] = readl(gart->regs + GART_ENTRY_DATA);
108 reg += 1 << GART_PAGE_SHIFT;
110 spin_unlock(&gart->pte_lock);
114 static void do_gart_setup(struct gart_device *gart, const u32 *data)
119 writel(1, gart->regs + GART_CONFIG);
121 reg = gart->iovmm_base;
122 for (i=0; i<gart->page_count; i++) {
123 writel(reg, gart->regs + GART_ENTRY_ADDR);
124 writel((data) ? data[i] : 0, gart->regs + GART_ENTRY_DATA);
126 reg += 1 << GART_PAGE_SHIFT;
131 static void gart_resume(struct tegra_iovmm_device *dev)
133 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
135 if (!gart || !gart->enable || (gart->enable && !gart->savedata))
138 spin_lock(&gart->pte_lock);
139 do_gart_setup(gart, gart->savedata);
140 spin_unlock(&gart->pte_lock);
143 static int gart_remove(struct platform_device *pdev)
145 struct gart_device *gart = platform_get_drvdata(pdev);
151 writel(0, gart->regs + GART_CONFIG);
154 platform_set_drvdata(pdev, NULL);
155 tegra_iovmm_unregister(&gart->iovmm);
157 vfree(gart->savedata);
164 static int gart_probe(struct platform_device *pdev)
166 struct gart_device *gart = NULL;
167 struct resource *res, *res_remap;
168 void __iomem *gart_regs = NULL;
172 pr_err(DRIVER_NAME ": platform_device required\n");
176 if (PAGE_SHIFT != GART_PAGE_SHIFT) {
177 pr_err(DRIVER_NAME ": GART and CPU page size must match\n");
181 /* the GART memory aperture is required */
182 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183 res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
185 if (!res || !res_remap) {
186 pr_err(DRIVER_NAME ": GART memory aperture expected\n");
189 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
191 pr_err(DRIVER_NAME ": failed to allocate tegra_iovmm_device\n");
196 gart_regs = ioremap_wc(res->start, res->end - res->start + 1);
198 pr_err(DRIVER_NAME ": failed to remap GART registers\n");
203 gart->iovmm.name = VMM_NAME;
204 gart->iovmm.ops = &tegra_iovmm_gart_ops;
205 gart->iovmm.pgsize_bits = GART_PAGE_SHIFT;
206 spin_lock_init(&gart->pte_lock);
208 platform_set_drvdata(pdev, gart);
210 e = tegra_iovmm_register(&gart->iovmm);
213 e = tegra_iovmm_domain_init(&gart->domain, &gart->iovmm,
214 (tegra_iovmm_addr_t)res_remap->start,
215 (tegra_iovmm_addr_t)res_remap->end+1);
218 gart->regs = gart_regs;
219 gart->iovmm_base = (tegra_iovmm_addr_t)res_remap->start;
220 gart->page_count = res_remap->end - res_remap->start + 1;
221 gart->page_count >>= GART_PAGE_SHIFT;
223 gart->savedata = vmalloc(sizeof(u32)*gart->page_count);
224 if (!gart->savedata) {
225 pr_err(DRIVER_NAME ": failed to allocate context save area\n");
230 spin_lock(&gart->pte_lock);
232 do_gart_setup(gart, NULL);
235 spin_unlock(&gart->pte_lock);
241 if (gart && gart->savedata)
242 vfree(gart->savedata);
248 static int __devinit gart_init(void)
250 return platform_driver_register(&tegra_iovmm_gart_drv);
253 static void __exit gart_exit(void)
255 return platform_driver_unregister(&tegra_iovmm_gart_drv);
258 #define GART_PTE(_pfn) (0x80000000ul | ((_pfn)<<PAGE_SHIFT))
261 static int gart_map(struct tegra_iovmm_device *dev,
262 struct tegra_iovmm_area *iovma)
264 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
265 unsigned long gart_page, count;
268 gart_page = iovma->iovm_start;
269 count = iovma->iovm_length >> GART_PAGE_SHIFT;
271 for (i=0; i<count; i++) {
274 pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
278 spin_lock(&gart->pte_lock);
280 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
281 writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
283 gart_page += 1 << GART_PAGE_SHIFT;
285 spin_unlock(&gart->pte_lock);
291 spin_lock(&gart->pte_lock);
293 iovma->ops->release(iovma, i<<PAGE_SHIFT);
294 gart_page -= 1 << GART_PAGE_SHIFT;
295 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
296 writel(0, gart->regs + GART_ENTRY_DATA);
299 spin_unlock(&gart->pte_lock);
304 static void gart_unmap(struct tegra_iovmm_device *dev,
305 struct tegra_iovmm_area *iovma, bool decommit)
307 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
308 unsigned long gart_page, count;
311 count = iovma->iovm_length >> GART_PAGE_SHIFT;
312 gart_page = iovma->iovm_start;
314 spin_lock(&gart->pte_lock);
315 for (i=0; i<count; i++) {
316 if (iovma->ops && iovma->ops->release)
317 iovma->ops->release(iovma, i<<PAGE_SHIFT);
319 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
320 writel(0, gart->regs + GART_ENTRY_DATA);
322 gart_page += 1 << GART_PAGE_SHIFT;
324 spin_unlock(&gart->pte_lock);
328 static void gart_map_pfn(struct tegra_iovmm_device *dev,
329 struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t offs,
332 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
334 BUG_ON(!pfn_valid(pfn));
335 spin_lock(&gart->pte_lock);
336 writel(offs, gart->regs + GART_ENTRY_ADDR);
337 writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
339 spin_unlock(&gart->pte_lock);
343 static struct tegra_iovmm_domain *gart_alloc_domain(
344 struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
346 struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
347 return &gart->domain;
350 subsys_initcall(gart_init);
351 module_exit(gart_exit);