2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <core/engine.h>
31 #include <linux/swiotlb.h>
33 #include <subdev/fb.h>
34 #include <subdev/vm.h>
35 #include <subdev/bar.h>
37 #include "nouveau_drm.h"
38 #include "nouveau_dma.h"
39 #include "nouveau_fence.h"
41 #include "nouveau_bo.h"
42 #include "nouveau_ttm.h"
43 #include "nouveau_gem.h"
46 * NV10-NV40 tiling helpers
50 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51 u32 addr, u32 size, u32 pitch, u32 flags)
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 int i = reg - drm->tile.reg;
55 struct nouveau_fb *pfb = nouveau_fb(drm->device);
56 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine;
59 nouveau_fence_unref(®->fence);
62 pfb->tile.fini(pfb, i, tile);
65 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
67 pfb->tile.prog(pfb, i, tile);
69 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70 engine->tile_prog(engine, i);
71 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72 engine->tile_prog(engine, i);
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
78 struct nouveau_drm *drm = nouveau_drm(dev);
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
81 spin_lock(&drm->tile.lock);
84 (!tile->fence || nouveau_fence_done(tile->fence)))
89 spin_unlock(&drm->tile.lock);
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct nouveau_fence *fence)
97 struct nouveau_drm *drm = nouveau_drm(dev);
100 spin_lock(&drm->tile.lock);
102 /* Mark it as pending. */
104 nouveau_fence_ref(fence);
108 spin_unlock(&drm->tile.lock);
112 static struct nouveau_drm_tile *
113 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
114 u32 size, u32 pitch, u32 flags)
116 struct nouveau_drm *drm = nouveau_drm(dev);
117 struct nouveau_fb *pfb = nouveau_fb(drm->device);
118 struct nouveau_drm_tile *tile, *found = NULL;
121 for (i = 0; i < pfb->tile.regions; i++) {
122 tile = nv10_bo_get_tile_region(dev, i);
124 if (pitch && !found) {
128 } else if (tile && pfb->tile.region[i].pitch) {
129 /* Kill an unused tile region. */
130 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
133 nv10_bo_put_tile_region(dev, tile, NULL);
137 nv10_bo_update_tile_region(dev, found, addr, size,
143 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
145 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
146 struct drm_device *dev = drm->dev;
147 struct nouveau_bo *nvbo = nouveau_bo(bo);
149 if (unlikely(nvbo->gem))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
156 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
157 int *align, int *size)
159 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
160 struct nouveau_device *device = nv_device(drm->device);
162 if (device->card_type < NV_50) {
163 if (nvbo->tile_mode) {
164 if (device->chipset >= 0x40) {
166 *size = roundup(*size, 64 * nvbo->tile_mode);
168 } else if (device->chipset >= 0x30) {
170 *size = roundup(*size, 64 * nvbo->tile_mode);
172 } else if (device->chipset >= 0x20) {
174 *size = roundup(*size, 64 * nvbo->tile_mode);
176 } else if (device->chipset >= 0x10) {
178 *size = roundup(*size, 32 * nvbo->tile_mode);
182 *size = roundup(*size, (1 << nvbo->page_shift));
183 *align = max((1 << nvbo->page_shift), *align);
186 *size = roundup(*size, PAGE_SIZE);
190 nouveau_bo_new(struct drm_device *dev, int size, int align,
191 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
193 struct nouveau_bo **pnvbo)
195 struct nouveau_drm *drm = nouveau_drm(dev);
196 struct nouveau_bo *nvbo;
199 int type = ttm_bo_type_device;
202 type = ttm_bo_type_sg;
204 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
207 INIT_LIST_HEAD(&nvbo->head);
208 INIT_LIST_HEAD(&nvbo->entry);
209 INIT_LIST_HEAD(&nvbo->vma_list);
210 nvbo->tile_mode = tile_mode;
211 nvbo->tile_flags = tile_flags;
212 nvbo->bo.bdev = &drm->ttm.bdev;
214 nvbo->page_shift = 12;
215 if (drm->client.base.vm) {
216 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
217 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
220 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
221 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
222 nouveau_bo_placement_set(nvbo, flags, 0);
224 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
225 sizeof(struct nouveau_bo));
227 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
228 type, &nvbo->placement,
229 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
232 /* ttm will call nouveau_bo_del_ttm if it fails.. */
241 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
245 if (type & TTM_PL_FLAG_VRAM)
246 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
247 if (type & TTM_PL_FLAG_TT)
248 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
249 if (type & TTM_PL_FLAG_SYSTEM)
250 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
254 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
256 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
257 struct nouveau_fb *pfb = nouveau_fb(drm->device);
258 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
260 if (nv_device(drm->device)->card_type == NV_10 &&
261 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
262 nvbo->bo.mem.num_pages < vram_pages / 4) {
264 * Make sure that the color and depth buffers are handled
265 * by independent memory controller units. Up to a 9x
266 * speed up when alpha-blending and depth-test are enabled
269 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
270 nvbo->placement.fpfn = vram_pages / 2;
271 nvbo->placement.lpfn = ~0;
273 nvbo->placement.fpfn = 0;
274 nvbo->placement.lpfn = vram_pages / 2;
280 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
282 struct ttm_placement *pl = &nvbo->placement;
283 uint32_t flags = TTM_PL_MASK_CACHING |
284 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
286 pl->placement = nvbo->placements;
287 set_placement_list(nvbo->placements, &pl->num_placement,
290 pl->busy_placement = nvbo->busy_placements;
291 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
294 set_placement_range(nvbo, type);
298 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
300 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
301 struct ttm_buffer_object *bo = &nvbo->bo;
304 ret = ttm_bo_reserve(bo, false, false, false, 0);
308 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
309 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
310 1 << bo->mem.mem_type, memtype);
315 if (nvbo->pin_refcnt++)
318 nouveau_bo_placement_set(nvbo, memtype, 0);
320 ret = nouveau_bo_validate(nvbo, false, false);
322 switch (bo->mem.mem_type) {
324 drm->gem.vram_available -= bo->mem.size;
327 drm->gem.gart_available -= bo->mem.size;
334 ttm_bo_unreserve(bo);
339 nouveau_bo_unpin(struct nouveau_bo *nvbo)
341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
342 struct ttm_buffer_object *bo = &nvbo->bo;
345 ret = ttm_bo_reserve(bo, false, false, false, 0);
349 if (--nvbo->pin_refcnt)
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
354 ret = nouveau_bo_validate(nvbo, false, false);
356 switch (bo->mem.mem_type) {
358 drm->gem.vram_available += bo->mem.size;
361 drm->gem.gart_available += bo->mem.size;
369 ttm_bo_unreserve(bo);
374 nouveau_bo_map(struct nouveau_bo *nvbo)
378 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
382 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
383 ttm_bo_unreserve(&nvbo->bo);
388 nouveau_bo_unmap(struct nouveau_bo *nvbo)
391 ttm_bo_kunmap(&nvbo->kmap);
395 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
400 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
401 interruptible, no_wait_gpu);
409 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
412 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
415 return ioread16_native((void __force __iomem *)mem);
421 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
424 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
427 iowrite16_native(val, (void __force __iomem *)mem);
433 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
436 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
439 return ioread32_native((void __force __iomem *)mem);
445 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
448 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
451 iowrite32_native(val, (void __force __iomem *)mem);
456 static struct ttm_tt *
457 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
458 uint32_t page_flags, struct page *dummy_read)
461 struct nouveau_drm *drm = nouveau_bdev(bdev);
462 struct drm_device *dev = drm->dev;
464 if (drm->agp.stat == ENABLED) {
465 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
466 page_flags, dummy_read);
470 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
474 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
476 /* We'll do this from user space. */
481 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
482 struct ttm_mem_type_manager *man)
484 struct nouveau_drm *drm = nouveau_bdev(bdev);
488 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
489 man->available_caching = TTM_PL_MASK_CACHING;
490 man->default_caching = TTM_PL_FLAG_CACHED;
493 if (nv_device(drm->device)->card_type >= NV_50) {
494 man->func = &nouveau_vram_manager;
495 man->io_reserve_fastpath = false;
496 man->use_io_reserve_lru = true;
498 man->func = &ttm_bo_manager_func;
500 man->flags = TTM_MEMTYPE_FLAG_FIXED |
501 TTM_MEMTYPE_FLAG_MAPPABLE;
502 man->available_caching = TTM_PL_FLAG_UNCACHED |
504 man->default_caching = TTM_PL_FLAG_WC;
507 if (nv_device(drm->device)->card_type >= NV_50)
508 man->func = &nouveau_gart_manager;
510 if (drm->agp.stat != ENABLED)
511 man->func = &nv04_gart_manager;
513 man->func = &ttm_bo_manager_func;
515 if (drm->agp.stat == ENABLED) {
516 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
517 man->available_caching = TTM_PL_FLAG_UNCACHED |
519 man->default_caching = TTM_PL_FLAG_WC;
521 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
522 TTM_MEMTYPE_FLAG_CMA;
523 man->available_caching = TTM_PL_MASK_CACHING;
524 man->default_caching = TTM_PL_FLAG_CACHED;
535 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
537 struct nouveau_bo *nvbo = nouveau_bo(bo);
539 switch (bo->mem.mem_type) {
541 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
545 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
549 *pl = nvbo->placement;
553 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
554 * TTM_PL_{VRAM,TT} directly.
558 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
559 struct nouveau_bo *nvbo, bool evict,
560 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
562 struct nouveau_fence *fence = NULL;
565 ret = nouveau_fence_new(chan, false, &fence);
569 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
570 no_wait_gpu, new_mem);
571 nouveau_fence_unref(&fence);
576 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
578 int ret = RING_SPACE(chan, 2);
580 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
581 OUT_RING (chan, handle);
588 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
589 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
591 struct nouveau_mem *node = old_mem->mm_node;
592 int ret = RING_SPACE(chan, 10);
594 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
595 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
596 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
597 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
598 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
599 OUT_RING (chan, PAGE_SIZE);
600 OUT_RING (chan, PAGE_SIZE);
601 OUT_RING (chan, PAGE_SIZE);
602 OUT_RING (chan, new_mem->num_pages);
603 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
609 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
611 int ret = RING_SPACE(chan, 2);
613 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
614 OUT_RING (chan, handle);
620 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
621 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
623 struct nouveau_mem *node = old_mem->mm_node;
624 u64 src_offset = node->vma[0].offset;
625 u64 dst_offset = node->vma[1].offset;
626 u32 page_count = new_mem->num_pages;
629 page_count = new_mem->num_pages;
631 int line_count = (page_count > 8191) ? 8191 : page_count;
633 ret = RING_SPACE(chan, 11);
637 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
638 OUT_RING (chan, upper_32_bits(src_offset));
639 OUT_RING (chan, lower_32_bits(src_offset));
640 OUT_RING (chan, upper_32_bits(dst_offset));
641 OUT_RING (chan, lower_32_bits(dst_offset));
642 OUT_RING (chan, PAGE_SIZE);
643 OUT_RING (chan, PAGE_SIZE);
644 OUT_RING (chan, PAGE_SIZE);
645 OUT_RING (chan, line_count);
646 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
647 OUT_RING (chan, 0x00000110);
649 page_count -= line_count;
650 src_offset += (PAGE_SIZE * line_count);
651 dst_offset += (PAGE_SIZE * line_count);
658 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
659 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
661 struct nouveau_mem *node = old_mem->mm_node;
662 u64 src_offset = node->vma[0].offset;
663 u64 dst_offset = node->vma[1].offset;
664 u32 page_count = new_mem->num_pages;
667 page_count = new_mem->num_pages;
669 int line_count = (page_count > 2047) ? 2047 : page_count;
671 ret = RING_SPACE(chan, 12);
675 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
676 OUT_RING (chan, upper_32_bits(dst_offset));
677 OUT_RING (chan, lower_32_bits(dst_offset));
678 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
679 OUT_RING (chan, upper_32_bits(src_offset));
680 OUT_RING (chan, lower_32_bits(src_offset));
681 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
682 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
683 OUT_RING (chan, PAGE_SIZE); /* line_length */
684 OUT_RING (chan, line_count);
685 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
686 OUT_RING (chan, 0x00100110);
688 page_count -= line_count;
689 src_offset += (PAGE_SIZE * line_count);
690 dst_offset += (PAGE_SIZE * line_count);
697 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
698 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
700 struct nouveau_mem *node = old_mem->mm_node;
701 u64 src_offset = node->vma[0].offset;
702 u64 dst_offset = node->vma[1].offset;
703 u32 page_count = new_mem->num_pages;
706 page_count = new_mem->num_pages;
708 int line_count = (page_count > 8191) ? 8191 : page_count;
710 ret = RING_SPACE(chan, 11);
714 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
715 OUT_RING (chan, upper_32_bits(src_offset));
716 OUT_RING (chan, lower_32_bits(src_offset));
717 OUT_RING (chan, upper_32_bits(dst_offset));
718 OUT_RING (chan, lower_32_bits(dst_offset));
719 OUT_RING (chan, PAGE_SIZE);
720 OUT_RING (chan, PAGE_SIZE);
721 OUT_RING (chan, PAGE_SIZE);
722 OUT_RING (chan, line_count);
723 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
724 OUT_RING (chan, 0x00000110);
726 page_count -= line_count;
727 src_offset += (PAGE_SIZE * line_count);
728 dst_offset += (PAGE_SIZE * line_count);
735 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
736 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
738 struct nouveau_mem *node = old_mem->mm_node;
739 int ret = RING_SPACE(chan, 7);
741 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
742 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
743 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
744 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
745 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
746 OUT_RING (chan, 0x00000000 /* COPY */);
747 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
753 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
754 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
756 struct nouveau_mem *node = old_mem->mm_node;
757 int ret = RING_SPACE(chan, 7);
759 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
760 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
761 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
762 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
763 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
764 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
765 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
771 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
773 int ret = RING_SPACE(chan, 6);
775 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
776 OUT_RING (chan, handle);
777 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
778 OUT_RING (chan, NvNotify0);
779 OUT_RING (chan, NvDmaFB);
780 OUT_RING (chan, NvDmaFB);
787 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
788 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
790 struct nouveau_mem *node = old_mem->mm_node;
791 u64 length = (new_mem->num_pages << PAGE_SHIFT);
792 u64 src_offset = node->vma[0].offset;
793 u64 dst_offset = node->vma[1].offset;
794 int src_tiled = !!node->memtype;
795 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
799 u32 amount, stride, height;
801 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
805 amount = min(length, (u64)(4 * 1024 * 1024));
807 height = amount / stride;
810 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
813 OUT_RING (chan, stride);
814 OUT_RING (chan, height);
819 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
823 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
826 OUT_RING (chan, stride);
827 OUT_RING (chan, height);
832 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
836 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
837 OUT_RING (chan, upper_32_bits(src_offset));
838 OUT_RING (chan, upper_32_bits(dst_offset));
839 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
840 OUT_RING (chan, lower_32_bits(src_offset));
841 OUT_RING (chan, lower_32_bits(dst_offset));
842 OUT_RING (chan, stride);
843 OUT_RING (chan, stride);
844 OUT_RING (chan, stride);
845 OUT_RING (chan, height);
846 OUT_RING (chan, 0x00000101);
847 OUT_RING (chan, 0x00000000);
848 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
852 src_offset += amount;
853 dst_offset += amount;
860 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
862 int ret = RING_SPACE(chan, 4);
864 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
865 OUT_RING (chan, handle);
866 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
867 OUT_RING (chan, NvNotify0);
873 static inline uint32_t
874 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
875 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
877 if (mem->mem_type == TTM_PL_TT)
883 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
884 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
886 u32 src_offset = old_mem->start << PAGE_SHIFT;
887 u32 dst_offset = new_mem->start << PAGE_SHIFT;
888 u32 page_count = new_mem->num_pages;
891 ret = RING_SPACE(chan, 3);
895 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
896 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
897 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
899 page_count = new_mem->num_pages;
901 int line_count = (page_count > 2047) ? 2047 : page_count;
903 ret = RING_SPACE(chan, 11);
907 BEGIN_NV04(chan, NvSubCopy,
908 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
909 OUT_RING (chan, src_offset);
910 OUT_RING (chan, dst_offset);
911 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
912 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
913 OUT_RING (chan, PAGE_SIZE); /* line_length */
914 OUT_RING (chan, line_count);
915 OUT_RING (chan, 0x00000101);
916 OUT_RING (chan, 0x00000000);
917 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
920 page_count -= line_count;
921 src_offset += (PAGE_SIZE * line_count);
922 dst_offset += (PAGE_SIZE * line_count);
929 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
930 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
932 struct nouveau_mem *node = mem->mm_node;
935 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
936 PAGE_SHIFT, node->page_shift,
937 NV_MEM_ACCESS_RW, vma);
941 if (mem->mem_type == TTM_PL_VRAM)
942 nouveau_vm_map(vma, node);
944 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
950 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
951 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
953 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
954 struct nouveau_channel *chan = chan = drm->channel;
955 struct nouveau_bo *nvbo = nouveau_bo(bo);
956 struct ttm_mem_reg *old_mem = &bo->mem;
959 mutex_lock(&chan->cli->mutex);
961 /* create temporary vmas for the transfer and attach them to the
962 * old nouveau_mem node, these will get cleaned up after ttm has
963 * destroyed the ttm_mem_reg
965 if (nv_device(drm->device)->card_type >= NV_50) {
966 struct nouveau_mem *node = old_mem->mm_node;
968 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
972 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
977 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
979 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
980 no_wait_gpu, new_mem);
984 mutex_unlock(&chan->cli->mutex);
989 nouveau_bo_move_init(struct nouveau_drm *drm)
991 static const struct {
995 int (*exec)(struct nouveau_channel *,
996 struct ttm_buffer_object *,
997 struct ttm_mem_reg *, struct ttm_mem_reg *);
998 int (*init)(struct nouveau_channel *, u32 handle);
1000 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1001 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1002 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1003 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1004 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1005 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1006 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1007 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1008 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1010 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1011 }, *mthd = _methods;
1012 const char *name = "CPU";
1016 struct nouveau_object *object;
1017 struct nouveau_channel *chan;
1018 u32 handle = (mthd->engine << 16) | mthd->oclass;
1020 if (mthd->init == nve0_bo_move_init)
1023 chan = drm->channel;
1027 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1028 mthd->oclass, NULL, 0, &object);
1030 ret = mthd->init(chan, handle);
1032 nouveau_object_del(nv_object(drm),
1033 chan->handle, handle);
1037 drm->ttm.move = mthd->exec;
1041 } while ((++mthd)->exec);
1043 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1047 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1048 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1050 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1051 struct ttm_placement placement;
1052 struct ttm_mem_reg tmp_mem;
1055 placement.fpfn = placement.lpfn = 0;
1056 placement.num_placement = placement.num_busy_placement = 1;
1057 placement.placement = placement.busy_placement = &placement_memtype;
1060 tmp_mem.mm_node = NULL;
1061 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1065 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1069 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1073 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1075 ttm_bo_mem_put(bo, &tmp_mem);
1080 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1081 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1083 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1084 struct ttm_placement placement;
1085 struct ttm_mem_reg tmp_mem;
1088 placement.fpfn = placement.lpfn = 0;
1089 placement.num_placement = placement.num_busy_placement = 1;
1090 placement.placement = placement.busy_placement = &placement_memtype;
1093 tmp_mem.mm_node = NULL;
1094 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1098 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1102 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1107 ttm_bo_mem_put(bo, &tmp_mem);
1112 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1114 struct nouveau_bo *nvbo = nouveau_bo(bo);
1115 struct nouveau_vma *vma;
1117 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1118 if (bo->destroy != nouveau_bo_del_ttm)
1121 list_for_each_entry(vma, &nvbo->vma_list, head) {
1122 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1123 nouveau_vm_map(vma, new_mem->mm_node);
1125 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1126 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1127 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1128 nouveau_vm_map_sg_table(vma, 0, new_mem->
1129 num_pages << PAGE_SHIFT,
1132 nouveau_vm_map_sg(vma, 0, new_mem->
1133 num_pages << PAGE_SHIFT,
1136 nouveau_vm_unmap(vma);
1142 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1143 struct nouveau_drm_tile **new_tile)
1145 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1146 struct drm_device *dev = drm->dev;
1147 struct nouveau_bo *nvbo = nouveau_bo(bo);
1148 u64 offset = new_mem->start << PAGE_SHIFT;
1151 if (new_mem->mem_type != TTM_PL_VRAM)
1154 if (nv_device(drm->device)->card_type >= NV_10) {
1155 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1164 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1165 struct nouveau_drm_tile *new_tile,
1166 struct nouveau_drm_tile **old_tile)
1168 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1169 struct drm_device *dev = drm->dev;
1171 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1172 *old_tile = new_tile;
1176 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1177 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1179 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1180 struct nouveau_bo *nvbo = nouveau_bo(bo);
1181 struct ttm_mem_reg *old_mem = &bo->mem;
1182 struct nouveau_drm_tile *new_tile = NULL;
1185 if (nv_device(drm->device)->card_type < NV_50) {
1186 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1192 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1193 BUG_ON(bo->mem.mm_node != NULL);
1195 new_mem->mm_node = NULL;
1199 /* CPU copy if we have no accelerated method available */
1200 if (!drm->ttm.move) {
1201 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1205 /* Hardware assisted copy. */
1206 if (new_mem->mem_type == TTM_PL_SYSTEM)
1207 ret = nouveau_bo_move_flipd(bo, evict, intr,
1208 no_wait_gpu, new_mem);
1209 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1210 ret = nouveau_bo_move_flips(bo, evict, intr,
1211 no_wait_gpu, new_mem);
1213 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1214 no_wait_gpu, new_mem);
1219 /* Fallback to software copy. */
1220 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1223 if (nv_device(drm->device)->card_type < NV_50) {
1225 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1227 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1234 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1240 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1242 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1243 struct nouveau_drm *drm = nouveau_bdev(bdev);
1244 struct drm_device *dev = drm->dev;
1247 mem->bus.addr = NULL;
1248 mem->bus.offset = 0;
1249 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1251 mem->bus.is_iomem = false;
1252 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1254 switch (mem->mem_type) {
1260 if (drm->agp.stat == ENABLED) {
1261 mem->bus.offset = mem->start << PAGE_SHIFT;
1262 mem->bus.base = drm->agp.base;
1263 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1268 mem->bus.offset = mem->start << PAGE_SHIFT;
1269 mem->bus.base = pci_resource_start(dev->pdev, 1);
1270 mem->bus.is_iomem = true;
1271 if (nv_device(drm->device)->card_type >= NV_50) {
1272 struct nouveau_bar *bar = nouveau_bar(drm->device);
1273 struct nouveau_mem *node = mem->mm_node;
1275 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1280 mem->bus.offset = node->bar_vma.offset;
1290 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1292 struct nouveau_drm *drm = nouveau_bdev(bdev);
1293 struct nouveau_bar *bar = nouveau_bar(drm->device);
1294 struct nouveau_mem *node = mem->mm_node;
1296 if (!node->bar_vma.node)
1299 bar->unmap(bar, &node->bar_vma);
1303 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1305 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1306 struct nouveau_bo *nvbo = nouveau_bo(bo);
1307 struct nouveau_device *device = nv_device(drm->device);
1308 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1310 /* as long as the bo isn't in vram, and isn't tiled, we've got
1311 * nothing to do here.
1313 if (bo->mem.mem_type != TTM_PL_VRAM) {
1314 if (nv_device(drm->device)->card_type < NV_50 ||
1315 !nouveau_bo_tile_layout(nvbo))
1319 /* make sure bo is in mappable vram */
1320 if (bo->mem.start + bo->mem.num_pages < mappable)
1324 nvbo->placement.fpfn = 0;
1325 nvbo->placement.lpfn = mappable;
1326 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1327 return nouveau_bo_validate(nvbo, false, false);
1331 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1333 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1334 struct nouveau_drm *drm;
1335 struct drm_device *dev;
1338 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1340 if (ttm->state != tt_unpopulated)
1343 if (slave && ttm->sg) {
1344 /* make userspace faulting work */
1345 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1346 ttm_dma->dma_address, ttm->num_pages);
1347 ttm->state = tt_unbound;
1351 drm = nouveau_bdev(ttm->bdev);
1355 if (drm->agp.stat == ENABLED) {
1356 return ttm_agp_tt_populate(ttm);
1360 #ifdef CONFIG_SWIOTLB
1361 if (swiotlb_nr_tbl()) {
1362 return ttm_dma_populate((void *)ttm, dev->dev);
1366 r = ttm_pool_populate(ttm);
1371 for (i = 0; i < ttm->num_pages; i++) {
1372 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1374 PCI_DMA_BIDIRECTIONAL);
1375 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1377 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1378 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1379 ttm_dma->dma_address[i] = 0;
1381 ttm_pool_unpopulate(ttm);
1389 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1391 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1392 struct nouveau_drm *drm;
1393 struct drm_device *dev;
1395 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1400 drm = nouveau_bdev(ttm->bdev);
1404 if (drm->agp.stat == ENABLED) {
1405 ttm_agp_tt_unpopulate(ttm);
1410 #ifdef CONFIG_SWIOTLB
1411 if (swiotlb_nr_tbl()) {
1412 ttm_dma_unpopulate((void *)ttm, dev->dev);
1417 for (i = 0; i < ttm->num_pages; i++) {
1418 if (ttm_dma->dma_address[i]) {
1419 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1420 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1424 ttm_pool_unpopulate(ttm);
1428 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1430 struct nouveau_fence *old_fence = NULL;
1433 nouveau_fence_ref(fence);
1435 spin_lock(&nvbo->bo.bdev->fence_lock);
1436 old_fence = nvbo->bo.sync_obj;
1437 nvbo->bo.sync_obj = fence;
1438 spin_unlock(&nvbo->bo.bdev->fence_lock);
1440 nouveau_fence_unref(&old_fence);
1444 nouveau_bo_fence_unref(void **sync_obj)
1446 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1450 nouveau_bo_fence_ref(void *sync_obj)
1452 return nouveau_fence_ref(sync_obj);
1456 nouveau_bo_fence_signalled(void *sync_obj)
1458 return nouveau_fence_done(sync_obj);
1462 nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1464 return nouveau_fence_wait(sync_obj, lazy, intr);
1468 nouveau_bo_fence_flush(void *sync_obj)
1473 struct ttm_bo_driver nouveau_bo_driver = {
1474 .ttm_tt_create = &nouveau_ttm_tt_create,
1475 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1476 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1477 .invalidate_caches = nouveau_bo_invalidate_caches,
1478 .init_mem_type = nouveau_bo_init_mem_type,
1479 .evict_flags = nouveau_bo_evict_flags,
1480 .move_notify = nouveau_bo_move_ntfy,
1481 .move = nouveau_bo_move,
1482 .verify_access = nouveau_bo_verify_access,
1483 .sync_obj_signaled = nouveau_bo_fence_signalled,
1484 .sync_obj_wait = nouveau_bo_fence_wait,
1485 .sync_obj_flush = nouveau_bo_fence_flush,
1486 .sync_obj_unref = nouveau_bo_fence_unref,
1487 .sync_obj_ref = nouveau_bo_fence_ref,
1488 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1489 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1490 .io_mem_free = &nouveau_ttm_io_mem_free,
1493 struct nouveau_vma *
1494 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1496 struct nouveau_vma *vma;
1497 list_for_each_entry(vma, &nvbo->vma_list, head) {
1506 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1507 struct nouveau_vma *vma)
1509 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1510 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1513 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1514 NV_MEM_ACCESS_RW, vma);
1518 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1519 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1520 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1522 nouveau_vm_map_sg_table(vma, 0, size, node);
1524 nouveau_vm_map_sg(vma, 0, size, node);
1527 list_add_tail(&vma->head, &nvbo->vma_list);
1533 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1536 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1537 spin_lock(&nvbo->bo.bdev->fence_lock);
1538 ttm_bo_wait(&nvbo->bo, false, false, false);
1539 spin_unlock(&nvbo->bo.bdev->fence_lock);
1540 nouveau_vm_unmap(vma);
1543 nouveau_vm_put(vma);
1544 list_del(&vma->head);