2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
51 struct drm_i915_gem_pwrite *args,
52 struct drm_file *file);
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
60 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc);
62 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc);
64 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
68 static bool cpu_cache_is_coherent(struct drm_device *dev,
69 enum i915_cache_level level)
71 return HAS_LLC(dev) || level != I915_CACHE_NONE;
74 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
76 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
79 return obj->pin_display;
82 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
85 i915_gem_release_mmap(obj);
87 /* As we do not have an associated fence register, we will force
88 * a tiling change if we ever need to acquire one.
90 obj->fence_dirty = false;
91 obj->fence_reg = I915_FENCE_REG_NONE;
94 /* some bookkeeping */
95 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
98 spin_lock(&dev_priv->mm.object_stat_lock);
99 dev_priv->mm.object_count++;
100 dev_priv->mm.object_memory += size;
101 spin_unlock(&dev_priv->mm.object_stat_lock);
104 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
107 spin_lock(&dev_priv->mm.object_stat_lock);
108 dev_priv->mm.object_count--;
109 dev_priv->mm.object_memory -= size;
110 spin_unlock(&dev_priv->mm.object_stat_lock);
114 i915_gem_wait_for_error(struct i915_gpu_error *error)
118 #define EXIT_COND (!i915_reset_in_progress(error) || \
119 i915_terminally_wedged(error))
124 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
125 * userspace. If it takes that long something really bad is going on and
126 * we should simply try to bail out and fail as gracefully as possible.
128 ret = wait_event_interruptible_timeout(error->reset_queue,
132 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
134 } else if (ret < 0) {
142 int i915_mutex_lock_interruptible(struct drm_device *dev)
144 struct drm_i915_private *dev_priv = dev->dev_private;
147 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 WARN_ON(i915_verify_lists(dev));
160 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
162 return i915_gem_obj_bound_any(obj) && !obj->active;
166 i915_gem_init_ioctl(struct drm_device *dev, void *data,
167 struct drm_file *file)
169 struct drm_i915_private *dev_priv = dev->dev_private;
170 struct drm_i915_gem_init *args = data;
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
175 if (args->gtt_start >= args->gtt_end ||
176 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
179 /* GEM with user mode setting was never supported on ilk and later. */
180 if (INTEL_INFO(dev)->gen >= 5)
183 mutex_lock(&dev->struct_mutex);
184 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
186 dev_priv->gtt.mappable_end = args->gtt_end;
187 mutex_unlock(&dev->struct_mutex);
193 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
194 struct drm_file *file)
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 struct drm_i915_gem_get_aperture *args = data;
198 struct drm_i915_gem_object *obj;
202 mutex_lock(&dev->struct_mutex);
203 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
204 if (i915_gem_obj_is_pinned(obj))
205 pinned += i915_gem_obj_ggtt_size(obj);
206 mutex_unlock(&dev->struct_mutex);
208 args->aper_size = dev_priv->gtt.base.total;
209 args->aper_available_size = args->aper_size - pinned;
214 void *i915_gem_object_alloc(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private;
217 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
220 void i915_gem_object_free(struct drm_i915_gem_object *obj)
222 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
223 kmem_cache_free(dev_priv->slab, obj);
227 i915_gem_create(struct drm_file *file,
228 struct drm_device *dev,
232 struct drm_i915_gem_object *obj;
236 size = roundup(size, PAGE_SIZE);
240 /* Allocate the new object */
241 obj = i915_gem_alloc_object(dev, size);
245 ret = drm_gem_handle_create(file, &obj->base, &handle);
246 /* drop reference from allocate - handle holds it now */
247 drm_gem_object_unreference_unlocked(&obj->base);
256 i915_gem_dumb_create(struct drm_file *file,
257 struct drm_device *dev,
258 struct drm_mode_create_dumb *args)
260 /* have to work out size/pitch and return them */
261 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
262 args->size = args->pitch * args->height;
263 return i915_gem_create(file, dev,
264 args->size, &args->handle);
268 * Creates a new mm object and returns a handle to it.
271 i915_gem_create_ioctl(struct drm_device *dev, void *data,
272 struct drm_file *file)
274 struct drm_i915_gem_create *args = data;
276 return i915_gem_create(file, dev,
277 args->size, &args->handle);
281 __copy_to_user_swizzled(char __user *cpu_vaddr,
282 const char *gpu_vaddr, int gpu_offset,
285 int ret, cpu_offset = 0;
288 int cacheline_end = ALIGN(gpu_offset + 1, 64);
289 int this_length = min(cacheline_end - gpu_offset, length);
290 int swizzled_gpu_offset = gpu_offset ^ 64;
292 ret = __copy_to_user(cpu_vaddr + cpu_offset,
293 gpu_vaddr + swizzled_gpu_offset,
298 cpu_offset += this_length;
299 gpu_offset += this_length;
300 length -= this_length;
307 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
308 const char __user *cpu_vaddr,
311 int ret, cpu_offset = 0;
314 int cacheline_end = ALIGN(gpu_offset + 1, 64);
315 int this_length = min(cacheline_end - gpu_offset, length);
316 int swizzled_gpu_offset = gpu_offset ^ 64;
318 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
319 cpu_vaddr + cpu_offset,
324 cpu_offset += this_length;
325 gpu_offset += this_length;
326 length -= this_length;
333 * Pins the specified object's pages and synchronizes the object with
334 * GPU accesses. Sets needs_clflush to non-zero if the caller should
335 * flush the object from the CPU cache.
337 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
347 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
348 /* If we're not in the cpu read domain, set ourself into the gtt
349 * read domain and manually flush cachelines (if required). This
350 * optimizes for the case when the gpu will dirty the data
351 * anyway again before the next pread happens. */
352 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
354 ret = i915_gem_object_wait_rendering(obj, true);
358 i915_gem_object_retire(obj);
361 ret = i915_gem_object_get_pages(obj);
365 i915_gem_object_pin_pages(obj);
370 /* Per-page copy function for the shmem pread fastpath.
371 * Flushes invalid cachelines before reading the target if
372 * needs_clflush is set. */
374 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
375 char __user *user_data,
376 bool page_do_bit17_swizzling, bool needs_clflush)
381 if (unlikely(page_do_bit17_swizzling))
384 vaddr = kmap_atomic(page);
386 drm_clflush_virt_range(vaddr + shmem_page_offset,
388 ret = __copy_to_user_inatomic(user_data,
389 vaddr + shmem_page_offset,
391 kunmap_atomic(vaddr);
393 return ret ? -EFAULT : 0;
397 shmem_clflush_swizzled_range(char *addr, unsigned long length,
400 if (unlikely(swizzled)) {
401 unsigned long start = (unsigned long) addr;
402 unsigned long end = (unsigned long) addr + length;
404 /* For swizzling simply ensure that we always flush both
405 * channels. Lame, but simple and it works. Swizzled
406 * pwrite/pread is far from a hotpath - current userspace
407 * doesn't use it at all. */
408 start = round_down(start, 128);
409 end = round_up(end, 128);
411 drm_clflush_virt_range((void *)start, end - start);
413 drm_clflush_virt_range(addr, length);
418 /* Only difference to the fast-path function is that this can handle bit17
419 * and uses non-atomic copy and kmap functions. */
421 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
422 char __user *user_data,
423 bool page_do_bit17_swizzling, bool needs_clflush)
430 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
432 page_do_bit17_swizzling);
434 if (page_do_bit17_swizzling)
435 ret = __copy_to_user_swizzled(user_data,
436 vaddr, shmem_page_offset,
439 ret = __copy_to_user(user_data,
440 vaddr + shmem_page_offset,
444 return ret ? - EFAULT : 0;
448 i915_gem_shmem_pread(struct drm_device *dev,
449 struct drm_i915_gem_object *obj,
450 struct drm_i915_gem_pread *args,
451 struct drm_file *file)
453 char __user *user_data;
456 int shmem_page_offset, page_length, ret = 0;
457 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
459 int needs_clflush = 0;
460 struct sg_page_iter sg_iter;
462 user_data = to_user_ptr(args->data_ptr);
465 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
467 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
471 offset = args->offset;
473 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
474 offset >> PAGE_SHIFT) {
475 struct page *page = sg_page_iter_page(&sg_iter);
480 /* Operation in this page
482 * shmem_page_offset = offset within page in shmem file
483 * page_length = bytes to copy for this page
485 shmem_page_offset = offset_in_page(offset);
486 page_length = remain;
487 if ((shmem_page_offset + page_length) > PAGE_SIZE)
488 page_length = PAGE_SIZE - shmem_page_offset;
490 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
491 (page_to_phys(page) & (1 << 17)) != 0;
493 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
494 user_data, page_do_bit17_swizzling,
499 mutex_unlock(&dev->struct_mutex);
501 if (likely(!i915.prefault_disable) && !prefaulted) {
502 ret = fault_in_multipages_writeable(user_data, remain);
503 /* Userspace is tricking us, but we've already clobbered
504 * its pages with the prefault and promised to write the
505 * data up to the first fault. Hence ignore any errors
506 * and just continue. */
511 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
512 user_data, page_do_bit17_swizzling,
515 mutex_lock(&dev->struct_mutex);
521 remain -= page_length;
522 user_data += page_length;
523 offset += page_length;
527 i915_gem_object_unpin_pages(obj);
533 * Reads data from the object referenced by handle.
535 * On error, the contents of *data are undefined.
538 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
539 struct drm_file *file)
541 struct drm_i915_gem_pread *args = data;
542 struct drm_i915_gem_object *obj;
548 if (!access_ok(VERIFY_WRITE,
549 to_user_ptr(args->data_ptr),
553 ret = i915_mutex_lock_interruptible(dev);
557 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
558 if (&obj->base == NULL) {
563 /* Bounds check source. */
564 if (args->offset > obj->base.size ||
565 args->size > obj->base.size - args->offset) {
570 /* prime objects have no backing filp to GEM pread/pwrite
573 if (!obj->base.filp) {
578 trace_i915_gem_object_pread(obj, args->offset, args->size);
580 ret = i915_gem_shmem_pread(dev, obj, args, file);
583 drm_gem_object_unreference(&obj->base);
585 mutex_unlock(&dev->struct_mutex);
589 /* This is the fast write path which cannot handle
590 * page faults in the source data
594 fast_user_write(struct io_mapping *mapping,
595 loff_t page_base, int page_offset,
596 char __user *user_data,
599 void __iomem *vaddr_atomic;
601 unsigned long unwritten;
603 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
604 /* We can use the cpu mem copy function because this is X86. */
605 vaddr = (void __force*)vaddr_atomic + page_offset;
606 unwritten = __copy_from_user_inatomic_nocache(vaddr,
608 io_mapping_unmap_atomic(vaddr_atomic);
613 * This is the fast pwrite path, where we copy the data directly from the
614 * user into the GTT, uncached.
617 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
618 struct drm_i915_gem_object *obj,
619 struct drm_i915_gem_pwrite *args,
620 struct drm_file *file)
622 struct drm_i915_private *dev_priv = dev->dev_private;
624 loff_t offset, page_base;
625 char __user *user_data;
626 int page_offset, page_length, ret;
628 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
632 ret = i915_gem_object_set_to_gtt_domain(obj, true);
636 ret = i915_gem_object_put_fence(obj);
640 user_data = to_user_ptr(args->data_ptr);
643 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
646 /* Operation in this page
648 * page_base = page offset within aperture
649 * page_offset = offset within page
650 * page_length = bytes to copy for this page
652 page_base = offset & PAGE_MASK;
653 page_offset = offset_in_page(offset);
654 page_length = remain;
655 if ((page_offset + remain) > PAGE_SIZE)
656 page_length = PAGE_SIZE - page_offset;
658 /* If we get a fault while copying data, then (presumably) our
659 * source page isn't available. Return the error and we'll
660 * retry in the slow path.
662 if (fast_user_write(dev_priv->gtt.mappable, page_base,
663 page_offset, user_data, page_length)) {
668 remain -= page_length;
669 user_data += page_length;
670 offset += page_length;
674 i915_gem_object_ggtt_unpin(obj);
679 /* Per-page copy function for the shmem pwrite fastpath.
680 * Flushes invalid cachelines before writing to the target if
681 * needs_clflush_before is set and flushes out any written cachelines after
682 * writing if needs_clflush is set. */
684 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
685 char __user *user_data,
686 bool page_do_bit17_swizzling,
687 bool needs_clflush_before,
688 bool needs_clflush_after)
693 if (unlikely(page_do_bit17_swizzling))
696 vaddr = kmap_atomic(page);
697 if (needs_clflush_before)
698 drm_clflush_virt_range(vaddr + shmem_page_offset,
700 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
701 user_data, page_length);
702 if (needs_clflush_after)
703 drm_clflush_virt_range(vaddr + shmem_page_offset,
705 kunmap_atomic(vaddr);
707 return ret ? -EFAULT : 0;
710 /* Only difference to the fast-path function is that this can handle bit17
711 * and uses non-atomic copy and kmap functions. */
713 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
714 char __user *user_data,
715 bool page_do_bit17_swizzling,
716 bool needs_clflush_before,
717 bool needs_clflush_after)
723 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
724 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
726 page_do_bit17_swizzling);
727 if (page_do_bit17_swizzling)
728 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
732 ret = __copy_from_user(vaddr + shmem_page_offset,
735 if (needs_clflush_after)
736 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
738 page_do_bit17_swizzling);
741 return ret ? -EFAULT : 0;
745 i915_gem_shmem_pwrite(struct drm_device *dev,
746 struct drm_i915_gem_object *obj,
747 struct drm_i915_gem_pwrite *args,
748 struct drm_file *file)
752 char __user *user_data;
753 int shmem_page_offset, page_length, ret = 0;
754 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
755 int hit_slowpath = 0;
756 int needs_clflush_after = 0;
757 int needs_clflush_before = 0;
758 struct sg_page_iter sg_iter;
760 user_data = to_user_ptr(args->data_ptr);
763 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
765 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
766 /* If we're not in the cpu write domain, set ourself into the gtt
767 * write domain and manually flush cachelines (if required). This
768 * optimizes for the case when the gpu will use the data
769 * right away and we therefore have to clflush anyway. */
770 needs_clflush_after = cpu_write_needs_clflush(obj);
771 ret = i915_gem_object_wait_rendering(obj, false);
775 i915_gem_object_retire(obj);
777 /* Same trick applies to invalidate partially written cachelines read
779 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
780 needs_clflush_before =
781 !cpu_cache_is_coherent(dev, obj->cache_level);
783 ret = i915_gem_object_get_pages(obj);
787 i915_gem_object_pin_pages(obj);
789 offset = args->offset;
792 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
793 offset >> PAGE_SHIFT) {
794 struct page *page = sg_page_iter_page(&sg_iter);
795 int partial_cacheline_write;
800 /* Operation in this page
802 * shmem_page_offset = offset within page in shmem file
803 * page_length = bytes to copy for this page
805 shmem_page_offset = offset_in_page(offset);
807 page_length = remain;
808 if ((shmem_page_offset + page_length) > PAGE_SIZE)
809 page_length = PAGE_SIZE - shmem_page_offset;
811 /* If we don't overwrite a cacheline completely we need to be
812 * careful to have up-to-date data by first clflushing. Don't
813 * overcomplicate things and flush the entire patch. */
814 partial_cacheline_write = needs_clflush_before &&
815 ((shmem_page_offset | page_length)
816 & (boot_cpu_data.x86_clflush_size - 1));
818 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
819 (page_to_phys(page) & (1 << 17)) != 0;
821 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
822 user_data, page_do_bit17_swizzling,
823 partial_cacheline_write,
824 needs_clflush_after);
829 mutex_unlock(&dev->struct_mutex);
830 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
831 user_data, page_do_bit17_swizzling,
832 partial_cacheline_write,
833 needs_clflush_after);
835 mutex_lock(&dev->struct_mutex);
841 remain -= page_length;
842 user_data += page_length;
843 offset += page_length;
847 i915_gem_object_unpin_pages(obj);
851 * Fixup: Flush cpu caches in case we didn't flush the dirty
852 * cachelines in-line while writing and the object moved
853 * out of the cpu write domain while we've dropped the lock.
855 if (!needs_clflush_after &&
856 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
857 if (i915_gem_clflush_object(obj, obj->pin_display))
858 i915_gem_chipset_flush(dev);
862 if (needs_clflush_after)
863 i915_gem_chipset_flush(dev);
869 * Writes data to the object referenced by handle.
871 * On error, the contents of the buffer that were to be modified are undefined.
874 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file)
877 struct drm_i915_gem_pwrite *args = data;
878 struct drm_i915_gem_object *obj;
884 if (!access_ok(VERIFY_READ,
885 to_user_ptr(args->data_ptr),
889 if (likely(!i915.prefault_disable)) {
890 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
896 ret = i915_mutex_lock_interruptible(dev);
900 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
901 if (&obj->base == NULL) {
906 /* Bounds check destination. */
907 if (args->offset > obj->base.size ||
908 args->size > obj->base.size - args->offset) {
913 /* prime objects have no backing filp to GEM pread/pwrite
916 if (!obj->base.filp) {
921 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
924 /* We can only do the GTT pwrite on untiled buffers, as otherwise
925 * it would end up going through the fenced access, and we'll get
926 * different detiling behavior between reading and writing.
927 * pread/pwrite currently are reading and writing from the CPU
928 * perspective, requiring manual detiling by the client.
931 ret = i915_gem_phys_pwrite(dev, obj, args, file);
935 if (obj->tiling_mode == I915_TILING_NONE &&
936 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
937 cpu_write_needs_clflush(obj)) {
938 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
939 /* Note that the gtt paths might fail with non-page-backed user
940 * pointers (e.g. gtt mappings when moving data between
941 * textures). Fallback to the shmem path in that case. */
944 if (ret == -EFAULT || ret == -ENOSPC)
945 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
948 drm_gem_object_unreference(&obj->base);
950 mutex_unlock(&dev->struct_mutex);
955 i915_gem_check_wedge(struct i915_gpu_error *error,
958 if (i915_reset_in_progress(error)) {
959 /* Non-interruptible callers can't handle -EAGAIN, hence return
960 * -EIO unconditionally for these. */
964 /* Recovery complete, but the reset failed ... */
965 if (i915_terminally_wedged(error))
975 * Compare seqno against outstanding lazy request. Emit a request if they are
979 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
983 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
986 if (seqno == ring->outstanding_lazy_seqno)
987 ret = i915_add_request(ring, NULL);
992 static void fake_irq(unsigned long data)
994 wake_up_process((struct task_struct *)data);
997 static bool missed_irq(struct drm_i915_private *dev_priv,
998 struct intel_ring_buffer *ring)
1000 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1003 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1005 if (file_priv == NULL)
1008 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1012 * __wait_seqno - wait until execution of seqno has finished
1013 * @ring: the ring expected to report seqno
1015 * @reset_counter: reset sequence associated with the given seqno
1016 * @interruptible: do an interruptible wait (normally yes)
1017 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1019 * Note: It is of utmost importance that the passed in seqno and reset_counter
1020 * values have been read by the caller in an smp safe manner. Where read-side
1021 * locks are involved, it is sufficient to read the reset_counter before
1022 * unlocking the lock that protects the seqno. For lockless tricks, the
1023 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1026 * Returns 0 if the seqno was found within the alloted time. Else returns the
1027 * errno with remaining time filled in timeout argument.
1029 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1030 unsigned reset_counter,
1032 struct timespec *timeout,
1033 struct drm_i915_file_private *file_priv)
1035 struct drm_device *dev = ring->dev;
1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 const bool irq_test_in_progress =
1038 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1039 struct timespec before, now;
1041 unsigned long timeout_expire;
1044 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1046 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1049 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1051 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1052 gen6_rps_boost(dev_priv);
1054 mod_delayed_work(dev_priv->wq,
1055 &file_priv->mm.idle_work,
1056 msecs_to_jiffies(100));
1059 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1062 /* Record current time in case interrupted by signal, or wedged */
1063 trace_i915_gem_request_wait_begin(ring, seqno);
1064 getrawmonotonic(&before);
1066 struct timer_list timer;
1068 prepare_to_wait(&ring->irq_queue, &wait,
1069 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1071 /* We need to check whether any gpu reset happened in between
1072 * the caller grabbing the seqno and now ... */
1073 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1074 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1075 * is truely gone. */
1076 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1082 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1087 if (interruptible && signal_pending(current)) {
1092 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1097 timer.function = NULL;
1098 if (timeout || missed_irq(dev_priv, ring)) {
1099 unsigned long expire;
1101 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1102 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1103 mod_timer(&timer, expire);
1108 if (timer.function) {
1109 del_singleshot_timer_sync(&timer);
1110 destroy_timer_on_stack(&timer);
1113 getrawmonotonic(&now);
1114 trace_i915_gem_request_wait_end(ring, seqno);
1116 if (!irq_test_in_progress)
1117 ring->irq_put(ring);
1119 finish_wait(&ring->irq_queue, &wait);
1122 struct timespec sleep_time = timespec_sub(now, before);
1123 *timeout = timespec_sub(*timeout, sleep_time);
1124 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1125 set_normalized_timespec(timeout, 0, 0);
1132 * Waits for a sequence number to be signaled, and cleans up the
1133 * request and object lists appropriately for that event.
1136 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1138 struct drm_device *dev = ring->dev;
1139 struct drm_i915_private *dev_priv = dev->dev_private;
1140 bool interruptible = dev_priv->mm.interruptible;
1143 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1146 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1150 ret = i915_gem_check_olr(ring, seqno);
1154 return __wait_seqno(ring, seqno,
1155 atomic_read(&dev_priv->gpu_error.reset_counter),
1156 interruptible, NULL, NULL);
1160 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1161 struct intel_ring_buffer *ring)
1166 /* Manually manage the write flush as we may have not yet
1167 * retired the buffer.
1169 * Note that the last_write_seqno is always the earlier of
1170 * the two (read/write) seqno, so if we haved successfully waited,
1171 * we know we have passed the last write.
1173 obj->last_write_seqno = 0;
1179 * Ensures that all rendering to the object has completed and the object is
1180 * safe to unbind from the GTT or access from the CPU.
1182 static __must_check int
1183 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1186 struct intel_ring_buffer *ring = obj->ring;
1190 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1194 ret = i915_wait_seqno(ring, seqno);
1198 return i915_gem_object_wait_rendering__tail(obj, ring);
1201 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1202 * as the object state may change during this call.
1204 static __must_check int
1205 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1206 struct drm_i915_file_private *file_priv,
1209 struct drm_device *dev = obj->base.dev;
1210 struct drm_i915_private *dev_priv = dev->dev_private;
1211 struct intel_ring_buffer *ring = obj->ring;
1212 unsigned reset_counter;
1216 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1217 BUG_ON(!dev_priv->mm.interruptible);
1219 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1223 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1227 ret = i915_gem_check_olr(ring, seqno);
1231 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1232 mutex_unlock(&dev->struct_mutex);
1233 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1234 mutex_lock(&dev->struct_mutex);
1238 return i915_gem_object_wait_rendering__tail(obj, ring);
1242 * Called when user space prepares to use an object with the CPU, either
1243 * through the mmap ioctl's mapping or a GTT mapping.
1246 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1247 struct drm_file *file)
1249 struct drm_i915_gem_set_domain *args = data;
1250 struct drm_i915_gem_object *obj;
1251 uint32_t read_domains = args->read_domains;
1252 uint32_t write_domain = args->write_domain;
1255 /* Only handle setting domains to types used by the CPU. */
1256 if (write_domain & I915_GEM_GPU_DOMAINS)
1259 if (read_domains & I915_GEM_GPU_DOMAINS)
1262 /* Having something in the write domain implies it's in the read
1263 * domain, and only that read domain. Enforce that in the request.
1265 if (write_domain != 0 && read_domains != write_domain)
1268 ret = i915_mutex_lock_interruptible(dev);
1272 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1273 if (&obj->base == NULL) {
1278 /* Try to flush the object off the GPU without holding the lock.
1279 * We will repeat the flush holding the lock in the normal manner
1280 * to catch cases where we are gazumped.
1282 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1288 if (read_domains & I915_GEM_DOMAIN_GTT) {
1289 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1291 /* Silently promote "you're not bound, there was nothing to do"
1292 * to success, since the client was just asking us to
1293 * make sure everything was done.
1298 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1302 drm_gem_object_unreference(&obj->base);
1304 mutex_unlock(&dev->struct_mutex);
1309 * Called when user space has done writes to this buffer
1312 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1313 struct drm_file *file)
1315 struct drm_i915_gem_sw_finish *args = data;
1316 struct drm_i915_gem_object *obj;
1319 ret = i915_mutex_lock_interruptible(dev);
1323 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1324 if (&obj->base == NULL) {
1329 /* Pinned buffers may be scanout, so flush the cache */
1330 if (obj->pin_display)
1331 i915_gem_object_flush_cpu_write_domain(obj, true);
1333 drm_gem_object_unreference(&obj->base);
1335 mutex_unlock(&dev->struct_mutex);
1340 * Maps the contents of an object, returning the address it is mapped
1343 * While the mapping holds a reference on the contents of the object, it doesn't
1344 * imply a ref on the object itself.
1347 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1348 struct drm_file *file)
1350 struct drm_i915_gem_mmap *args = data;
1351 struct drm_gem_object *obj;
1354 obj = drm_gem_object_lookup(dev, file, args->handle);
1358 /* prime objects have no backing filp to GEM mmap
1362 drm_gem_object_unreference_unlocked(obj);
1366 addr = vm_mmap(obj->filp, 0, args->size,
1367 PROT_READ | PROT_WRITE, MAP_SHARED,
1369 drm_gem_object_unreference_unlocked(obj);
1370 if (IS_ERR((void *)addr))
1373 args->addr_ptr = (uint64_t) addr;
1379 * i915_gem_fault - fault a page into the GTT
1380 * vma: VMA in question
1383 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1384 * from userspace. The fault handler takes care of binding the object to
1385 * the GTT (if needed), allocating and programming a fence register (again,
1386 * only if needed based on whether the old reg is still valid or the object
1387 * is tiled) and inserting a new PTE into the faulting process.
1389 * Note that the faulting process may involve evicting existing objects
1390 * from the GTT and/or fence registers to make room. So performance may
1391 * suffer if the GTT working set is large or there are few fence registers
1394 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1397 struct drm_device *dev = obj->base.dev;
1398 struct drm_i915_private *dev_priv = dev->dev_private;
1399 pgoff_t page_offset;
1402 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1404 intel_runtime_pm_get(dev_priv);
1406 /* We don't use vmf->pgoff since that has the fake offset */
1407 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1410 ret = i915_mutex_lock_interruptible(dev);
1414 trace_i915_gem_object_fault(obj, page_offset, true, write);
1416 /* Try to flush the object off the GPU first without holding the lock.
1417 * Upon reacquiring the lock, we will perform our sanity checks and then
1418 * repeat the flush holding the lock in the normal manner to catch cases
1419 * where we are gazumped.
1421 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1425 /* Access to snoopable pages through the GTT is incoherent. */
1426 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1431 /* Now bind it into the GTT if needed */
1432 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1436 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1440 ret = i915_gem_object_get_fence(obj);
1444 obj->fault_mappable = true;
1446 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1450 /* Finally, remap it using the new GTT offset */
1451 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1453 i915_gem_object_ggtt_unpin(obj);
1455 mutex_unlock(&dev->struct_mutex);
1459 /* If this -EIO is due to a gpu hang, give the reset code a
1460 * chance to clean up the mess. Otherwise return the proper
1462 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1463 ret = VM_FAULT_SIGBUS;
1468 * EAGAIN means the gpu is hung and we'll wait for the error
1469 * handler to reset everything when re-faulting in
1470 * i915_mutex_lock_interruptible.
1477 * EBUSY is ok: this just means that another thread
1478 * already did the job.
1480 ret = VM_FAULT_NOPAGE;
1487 ret = VM_FAULT_SIGBUS;
1490 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1491 ret = VM_FAULT_SIGBUS;
1495 intel_runtime_pm_put(dev_priv);
1499 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1501 struct i915_vma *vma;
1504 * Only the global gtt is relevant for gtt memory mappings, so restrict
1505 * list traversal to objects bound into the global address space. Note
1506 * that the active list should be empty, but better safe than sorry.
1508 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1509 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1510 i915_gem_release_mmap(vma->obj);
1511 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1512 i915_gem_release_mmap(vma->obj);
1516 * i915_gem_release_mmap - remove physical page mappings
1517 * @obj: obj in question
1519 * Preserve the reservation of the mmapping with the DRM core code, but
1520 * relinquish ownership of the pages back to the system.
1522 * It is vital that we remove the page mapping if we have mapped a tiled
1523 * object through the GTT and then lose the fence register due to
1524 * resource pressure. Similarly if the object has been moved out of the
1525 * aperture, than pages mapped into userspace must be revoked. Removing the
1526 * mapping will then trigger a page fault on the next user access, allowing
1527 * fixup by i915_gem_fault().
1530 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1532 if (!obj->fault_mappable)
1535 drm_vma_node_unmap(&obj->base.vma_node,
1536 obj->base.dev->anon_inode->i_mapping);
1537 obj->fault_mappable = false;
1541 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1545 if (INTEL_INFO(dev)->gen >= 4 ||
1546 tiling_mode == I915_TILING_NONE)
1549 /* Previous chips need a power-of-two fence region when tiling */
1550 if (INTEL_INFO(dev)->gen == 3)
1551 gtt_size = 1024*1024;
1553 gtt_size = 512*1024;
1555 while (gtt_size < size)
1562 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1563 * @obj: object to check
1565 * Return the required GTT alignment for an object, taking into account
1566 * potential fence register mapping.
1569 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1570 int tiling_mode, bool fenced)
1573 * Minimum alignment is 4k (GTT page size), but might be greater
1574 * if a fence register is needed for the object.
1576 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1577 tiling_mode == I915_TILING_NONE)
1581 * Previous chips need to be aligned to the size of the smallest
1582 * fence register that can contain the object.
1584 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1587 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1589 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1592 if (drm_vma_node_has_offset(&obj->base.vma_node))
1595 dev_priv->mm.shrinker_no_lock_stealing = true;
1597 ret = drm_gem_create_mmap_offset(&obj->base);
1601 /* Badly fragmented mmap space? The only way we can recover
1602 * space is by destroying unwanted objects. We can't randomly release
1603 * mmap_offsets as userspace expects them to be persistent for the
1604 * lifetime of the objects. The closest we can is to release the
1605 * offsets on purgeable objects by truncating it and marking it purged,
1606 * which prevents userspace from ever using that object again.
1608 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1609 ret = drm_gem_create_mmap_offset(&obj->base);
1613 i915_gem_shrink_all(dev_priv);
1614 ret = drm_gem_create_mmap_offset(&obj->base);
1616 dev_priv->mm.shrinker_no_lock_stealing = false;
1621 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1623 drm_gem_free_mmap_offset(&obj->base);
1627 i915_gem_mmap_gtt(struct drm_file *file,
1628 struct drm_device *dev,
1632 struct drm_i915_private *dev_priv = dev->dev_private;
1633 struct drm_i915_gem_object *obj;
1636 ret = i915_mutex_lock_interruptible(dev);
1640 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1641 if (&obj->base == NULL) {
1646 if (obj->base.size > dev_priv->gtt.mappable_end) {
1651 if (obj->madv != I915_MADV_WILLNEED) {
1652 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1657 ret = i915_gem_object_create_mmap_offset(obj);
1661 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1664 drm_gem_object_unreference(&obj->base);
1666 mutex_unlock(&dev->struct_mutex);
1671 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1673 * @data: GTT mapping ioctl data
1674 * @file: GEM object info
1676 * Simply returns the fake offset to userspace so it can mmap it.
1677 * The mmap call will end up in drm_gem_mmap(), which will set things
1678 * up so we can get faults in the handler above.
1680 * The fault handler will take care of binding the object into the GTT
1681 * (since it may have been evicted to make room for something), allocating
1682 * a fence register, and mapping the appropriate aperture address into
1686 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1687 struct drm_file *file)
1689 struct drm_i915_gem_mmap_gtt *args = data;
1691 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1694 /* Immediately discard the backing storage */
1696 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1698 struct inode *inode;
1700 i915_gem_object_free_mmap_offset(obj);
1702 if (obj->base.filp == NULL)
1705 /* Our goal here is to return as much of the memory as
1706 * is possible back to the system as we are called from OOM.
1707 * To do this we must instruct the shmfs to drop all of its
1708 * backing pages, *now*.
1710 inode = file_inode(obj->base.filp);
1711 shmem_truncate_range(inode, 0, (loff_t)-1);
1713 obj->madv = __I915_MADV_PURGED;
1717 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1719 return obj->madv == I915_MADV_DONTNEED;
1723 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1725 struct sg_page_iter sg_iter;
1728 BUG_ON(obj->madv == __I915_MADV_PURGED);
1730 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1732 /* In the event of a disaster, abandon all caches and
1733 * hope for the best.
1735 WARN_ON(ret != -EIO);
1736 i915_gem_clflush_object(obj, true);
1737 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1740 if (i915_gem_object_needs_bit17_swizzle(obj))
1741 i915_gem_object_save_bit_17_swizzle(obj);
1743 if (obj->madv == I915_MADV_DONTNEED)
1746 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1747 struct page *page = sg_page_iter_page(&sg_iter);
1750 set_page_dirty(page);
1752 if (obj->madv == I915_MADV_WILLNEED)
1753 mark_page_accessed(page);
1755 page_cache_release(page);
1759 sg_free_table(obj->pages);
1764 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1766 const struct drm_i915_gem_object_ops *ops = obj->ops;
1768 if (obj->pages == NULL)
1771 if (obj->pages_pin_count)
1774 BUG_ON(i915_gem_obj_bound_any(obj));
1776 /* ->put_pages might need to allocate memory for the bit17 swizzle
1777 * array, hence protect them from being reaped by removing them from gtt
1779 list_del(&obj->global_list);
1781 ops->put_pages(obj);
1784 if (i915_gem_object_is_purgeable(obj))
1785 i915_gem_object_truncate(obj);
1790 static unsigned long
1791 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1792 bool purgeable_only)
1794 struct list_head still_in_list;
1795 struct drm_i915_gem_object *obj;
1796 unsigned long count = 0;
1799 * As we may completely rewrite the (un)bound list whilst unbinding
1800 * (due to retiring requests) we have to strictly process only
1801 * one element of the list at the time, and recheck the list
1802 * on every iteration.
1804 * In particular, we must hold a reference whilst removing the
1805 * object as we may end up waiting for and/or retiring the objects.
1806 * This might release the final reference (held by the active list)
1807 * and result in the object being freed from under us. This is
1808 * similar to the precautions the eviction code must take whilst
1811 * Also note that although these lists do not hold a reference to
1812 * the object we can safely grab one here: The final object
1813 * unreferencing and the bound_list are both protected by the
1814 * dev->struct_mutex and so we won't ever be able to observe an
1815 * object on the bound_list with a reference count equals 0.
1817 INIT_LIST_HEAD(&still_in_list);
1818 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1819 obj = list_first_entry(&dev_priv->mm.unbound_list,
1820 typeof(*obj), global_list);
1821 list_move_tail(&obj->global_list, &still_in_list);
1823 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1826 drm_gem_object_reference(&obj->base);
1828 if (i915_gem_object_put_pages(obj) == 0)
1829 count += obj->base.size >> PAGE_SHIFT;
1831 drm_gem_object_unreference(&obj->base);
1833 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1835 INIT_LIST_HEAD(&still_in_list);
1836 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1837 struct i915_vma *vma, *v;
1839 obj = list_first_entry(&dev_priv->mm.bound_list,
1840 typeof(*obj), global_list);
1841 list_move_tail(&obj->global_list, &still_in_list);
1843 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1846 drm_gem_object_reference(&obj->base);
1848 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1849 if (i915_vma_unbind(vma))
1852 if (i915_gem_object_put_pages(obj) == 0)
1853 count += obj->base.size >> PAGE_SHIFT;
1855 drm_gem_object_unreference(&obj->base);
1857 list_splice(&still_in_list, &dev_priv->mm.bound_list);
1862 static unsigned long
1863 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1865 return __i915_gem_shrink(dev_priv, target, true);
1868 static unsigned long
1869 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1871 i915_gem_evict_everything(dev_priv->dev);
1872 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
1876 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1878 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1880 struct address_space *mapping;
1881 struct sg_table *st;
1882 struct scatterlist *sg;
1883 struct sg_page_iter sg_iter;
1885 unsigned long last_pfn = 0; /* suppress gcc warning */
1888 /* Assert that the object is not currently in any GPU domain. As it
1889 * wasn't in the GTT, there shouldn't be any way it could have been in
1892 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1893 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1895 st = kmalloc(sizeof(*st), GFP_KERNEL);
1899 page_count = obj->base.size / PAGE_SIZE;
1900 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1905 /* Get the list of pages out of our struct file. They'll be pinned
1906 * at this point until we release them.
1908 * Fail silently without starting the shrinker
1910 mapping = file_inode(obj->base.filp)->i_mapping;
1911 gfp = mapping_gfp_mask(mapping);
1912 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1913 gfp &= ~(__GFP_IO | __GFP_WAIT);
1916 for (i = 0; i < page_count; i++) {
1917 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1919 i915_gem_purge(dev_priv, page_count);
1920 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1923 /* We've tried hard to allocate the memory by reaping
1924 * our own buffer, now let the real VM do its job and
1925 * go down in flames if truly OOM.
1927 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1928 gfp |= __GFP_IO | __GFP_WAIT;
1930 i915_gem_shrink_all(dev_priv);
1931 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1935 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1936 gfp &= ~(__GFP_IO | __GFP_WAIT);
1938 #ifdef CONFIG_SWIOTLB
1939 if (swiotlb_nr_tbl()) {
1941 sg_set_page(sg, page, PAGE_SIZE, 0);
1946 if (!i || page_to_pfn(page) != last_pfn + 1) {
1950 sg_set_page(sg, page, PAGE_SIZE, 0);
1952 sg->length += PAGE_SIZE;
1954 last_pfn = page_to_pfn(page);
1956 /* Check that the i965g/gm workaround works. */
1957 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1959 #ifdef CONFIG_SWIOTLB
1960 if (!swiotlb_nr_tbl())
1965 if (i915_gem_object_needs_bit17_swizzle(obj))
1966 i915_gem_object_do_bit_17_swizzle(obj);
1972 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1973 page_cache_release(sg_page_iter_page(&sg_iter));
1976 return PTR_ERR(page);
1979 /* Ensure that the associated pages are gathered from the backing storage
1980 * and pinned into our object. i915_gem_object_get_pages() may be called
1981 * multiple times before they are released by a single call to
1982 * i915_gem_object_put_pages() - once the pages are no longer referenced
1983 * either as a result of memory pressure (reaping pages under the shrinker)
1984 * or as the object is itself released.
1987 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1989 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1990 const struct drm_i915_gem_object_ops *ops = obj->ops;
1996 if (obj->madv != I915_MADV_WILLNEED) {
1997 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2001 BUG_ON(obj->pages_pin_count);
2003 ret = ops->get_pages(obj);
2007 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2012 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2013 struct intel_ring_buffer *ring)
2015 struct drm_device *dev = obj->base.dev;
2016 struct drm_i915_private *dev_priv = dev->dev_private;
2017 u32 seqno = intel_ring_get_seqno(ring);
2019 BUG_ON(ring == NULL);
2020 if (obj->ring != ring && obj->last_write_seqno) {
2021 /* Keep the seqno relative to the current ring */
2022 obj->last_write_seqno = seqno;
2026 /* Add a reference if we're newly entering the active list. */
2028 drm_gem_object_reference(&obj->base);
2032 list_move_tail(&obj->ring_list, &ring->active_list);
2034 obj->last_read_seqno = seqno;
2036 if (obj->fenced_gpu_access) {
2037 obj->last_fenced_seqno = seqno;
2039 /* Bump MRU to take account of the delayed flush */
2040 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2041 struct drm_i915_fence_reg *reg;
2043 reg = &dev_priv->fence_regs[obj->fence_reg];
2044 list_move_tail(®->lru_list,
2045 &dev_priv->mm.fence_list);
2050 void i915_vma_move_to_active(struct i915_vma *vma,
2051 struct intel_ring_buffer *ring)
2053 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2054 return i915_gem_object_move_to_active(vma->obj, ring);
2058 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2060 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2061 struct i915_address_space *vm;
2062 struct i915_vma *vma;
2064 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2065 BUG_ON(!obj->active);
2067 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2068 vma = i915_gem_obj_to_vma(obj, vm);
2069 if (vma && !list_empty(&vma->mm_list))
2070 list_move_tail(&vma->mm_list, &vm->inactive_list);
2073 list_del_init(&obj->ring_list);
2076 obj->last_read_seqno = 0;
2077 obj->last_write_seqno = 0;
2078 obj->base.write_domain = 0;
2080 obj->last_fenced_seqno = 0;
2081 obj->fenced_gpu_access = false;
2084 drm_gem_object_unreference(&obj->base);
2086 WARN_ON(i915_verify_lists(dev));
2090 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2092 struct intel_ring_buffer *ring = obj->ring;
2097 if (i915_seqno_passed(ring->get_seqno(ring, true),
2098 obj->last_read_seqno))
2099 i915_gem_object_move_to_inactive(obj);
2103 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2105 struct drm_i915_private *dev_priv = dev->dev_private;
2106 struct intel_ring_buffer *ring;
2109 /* Carefully retire all requests without writing to the rings */
2110 for_each_ring(ring, dev_priv, i) {
2111 ret = intel_ring_idle(ring);
2115 i915_gem_retire_requests(dev);
2117 /* Finally reset hw state */
2118 for_each_ring(ring, dev_priv, i) {
2119 intel_ring_init_seqno(ring, seqno);
2121 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2122 ring->semaphore.sync_seqno[j] = 0;
2128 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2136 /* HWS page needs to be set less than what we
2137 * will inject to ring
2139 ret = i915_gem_init_seqno(dev, seqno - 1);
2143 /* Carefully set the last_seqno value so that wrap
2144 * detection still works
2146 dev_priv->next_seqno = seqno;
2147 dev_priv->last_seqno = seqno - 1;
2148 if (dev_priv->last_seqno == 0)
2149 dev_priv->last_seqno--;
2155 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2159 /* reserve 0 for non-seqno */
2160 if (dev_priv->next_seqno == 0) {
2161 int ret = i915_gem_init_seqno(dev, 0);
2165 dev_priv->next_seqno = 1;
2168 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2172 int __i915_add_request(struct intel_ring_buffer *ring,
2173 struct drm_file *file,
2174 struct drm_i915_gem_object *obj,
2177 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2178 struct drm_i915_gem_request *request;
2179 u32 request_ring_position, request_start;
2182 request_start = intel_ring_get_tail(ring);
2184 * Emit any outstanding flushes - execbuf can fail to emit the flush
2185 * after having emitted the batchbuffer command. Hence we need to fix
2186 * things up similar to emitting the lazy request. The difference here
2187 * is that the flush _must_ happen before the next request, no matter
2190 ret = intel_ring_flush_all_caches(ring);
2194 request = ring->preallocated_lazy_request;
2195 if (WARN_ON(request == NULL))
2198 /* Record the position of the start of the request so that
2199 * should we detect the updated seqno part-way through the
2200 * GPU processing the request, we never over-estimate the
2201 * position of the head.
2203 request_ring_position = intel_ring_get_tail(ring);
2205 ret = ring->add_request(ring);
2209 request->seqno = intel_ring_get_seqno(ring);
2210 request->ring = ring;
2211 request->head = request_start;
2212 request->tail = request_ring_position;
2214 /* Whilst this request exists, batch_obj will be on the
2215 * active_list, and so will hold the active reference. Only when this
2216 * request is retired will the the batch_obj be moved onto the
2217 * inactive_list and lose its active reference. Hence we do not need
2218 * to explicitly hold another reference here.
2220 request->batch_obj = obj;
2222 /* Hold a reference to the current context so that we can inspect
2223 * it later in case a hangcheck error event fires.
2225 request->ctx = ring->last_context;
2227 i915_gem_context_reference(request->ctx);
2229 request->emitted_jiffies = jiffies;
2230 list_add_tail(&request->list, &ring->request_list);
2231 request->file_priv = NULL;
2234 struct drm_i915_file_private *file_priv = file->driver_priv;
2236 spin_lock(&file_priv->mm.lock);
2237 request->file_priv = file_priv;
2238 list_add_tail(&request->client_list,
2239 &file_priv->mm.request_list);
2240 spin_unlock(&file_priv->mm.lock);
2243 trace_i915_gem_request_add(ring, request->seqno);
2244 ring->outstanding_lazy_seqno = 0;
2245 ring->preallocated_lazy_request = NULL;
2247 if (!dev_priv->ums.mm_suspended) {
2248 i915_queue_hangcheck(ring->dev);
2250 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2251 queue_delayed_work(dev_priv->wq,
2252 &dev_priv->mm.retire_work,
2253 round_jiffies_up_relative(HZ));
2254 intel_mark_busy(dev_priv->dev);
2258 *out_seqno = request->seqno;
2263 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2265 struct drm_i915_file_private *file_priv = request->file_priv;
2270 spin_lock(&file_priv->mm.lock);
2271 list_del(&request->client_list);
2272 request->file_priv = NULL;
2273 spin_unlock(&file_priv->mm.lock);
2276 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2277 const struct i915_hw_context *ctx)
2279 unsigned long elapsed;
2281 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2283 if (ctx->hang_stats.banned)
2286 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2287 if (!i915_gem_context_is_default(ctx)) {
2288 DRM_DEBUG("context hanging too fast, banning!\n");
2290 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2291 if (i915_stop_ring_allow_warn(dev_priv))
2292 DRM_ERROR("gpu hanging too fast, banning!\n");
2300 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2301 struct i915_hw_context *ctx,
2304 struct i915_ctx_hang_stats *hs;
2309 hs = &ctx->hang_stats;
2312 hs->banned = i915_context_is_banned(dev_priv, ctx);
2314 hs->guilty_ts = get_seconds();
2316 hs->batch_pending++;
2320 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2322 list_del(&request->list);
2323 i915_gem_request_remove_from_client(request);
2326 i915_gem_context_unreference(request->ctx);
2331 struct drm_i915_gem_request *
2332 i915_gem_find_active_request(struct intel_ring_buffer *ring)
2334 struct drm_i915_gem_request *request;
2335 u32 completed_seqno;
2337 completed_seqno = ring->get_seqno(ring, false);
2339 list_for_each_entry(request, &ring->request_list, list) {
2340 if (i915_seqno_passed(completed_seqno, request->seqno))
2349 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2350 struct intel_ring_buffer *ring)
2352 struct drm_i915_gem_request *request;
2355 request = i915_gem_find_active_request(ring);
2357 if (request == NULL)
2360 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2362 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2364 list_for_each_entry_continue(request, &ring->request_list, list)
2365 i915_set_reset_status(dev_priv, request->ctx, false);
2368 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2369 struct intel_ring_buffer *ring)
2371 while (!list_empty(&ring->active_list)) {
2372 struct drm_i915_gem_object *obj;
2374 obj = list_first_entry(&ring->active_list,
2375 struct drm_i915_gem_object,
2378 i915_gem_object_move_to_inactive(obj);
2382 * We must free the requests after all the corresponding objects have
2383 * been moved off active lists. Which is the same order as the normal
2384 * retire_requests function does. This is important if object hold
2385 * implicit references on things like e.g. ppgtt address spaces through
2388 while (!list_empty(&ring->request_list)) {
2389 struct drm_i915_gem_request *request;
2391 request = list_first_entry(&ring->request_list,
2392 struct drm_i915_gem_request,
2395 i915_gem_free_request(request);
2398 /* These may not have been flush before the reset, do so now */
2399 kfree(ring->preallocated_lazy_request);
2400 ring->preallocated_lazy_request = NULL;
2401 ring->outstanding_lazy_seqno = 0;
2404 void i915_gem_restore_fences(struct drm_device *dev)
2406 struct drm_i915_private *dev_priv = dev->dev_private;
2409 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2410 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2413 * Commit delayed tiling changes if we have an object still
2414 * attached to the fence, otherwise just clear the fence.
2417 i915_gem_object_update_fence(reg->obj, reg,
2418 reg->obj->tiling_mode);
2420 i915_gem_write_fence(dev, i, NULL);
2425 void i915_gem_reset(struct drm_device *dev)
2427 struct drm_i915_private *dev_priv = dev->dev_private;
2428 struct intel_ring_buffer *ring;
2432 * Before we free the objects from the requests, we need to inspect
2433 * them for finding the guilty party. As the requests only borrow
2434 * their reference to the objects, the inspection must be done first.
2436 for_each_ring(ring, dev_priv, i)
2437 i915_gem_reset_ring_status(dev_priv, ring);
2439 for_each_ring(ring, dev_priv, i)
2440 i915_gem_reset_ring_cleanup(dev_priv, ring);
2442 i915_gem_context_reset(dev);
2444 i915_gem_restore_fences(dev);
2448 * This function clears the request list as sequence numbers are passed.
2451 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2455 if (list_empty(&ring->request_list))
2458 WARN_ON(i915_verify_lists(ring->dev));
2460 seqno = ring->get_seqno(ring, true);
2462 /* Move any buffers on the active list that are no longer referenced
2463 * by the ringbuffer to the flushing/inactive lists as appropriate,
2464 * before we free the context associated with the requests.
2466 while (!list_empty(&ring->active_list)) {
2467 struct drm_i915_gem_object *obj;
2469 obj = list_first_entry(&ring->active_list,
2470 struct drm_i915_gem_object,
2473 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2476 i915_gem_object_move_to_inactive(obj);
2480 while (!list_empty(&ring->request_list)) {
2481 struct drm_i915_gem_request *request;
2483 request = list_first_entry(&ring->request_list,
2484 struct drm_i915_gem_request,
2487 if (!i915_seqno_passed(seqno, request->seqno))
2490 trace_i915_gem_request_retire(ring, request->seqno);
2491 /* We know the GPU must have read the request to have
2492 * sent us the seqno + interrupt, so use the position
2493 * of tail of the request to update the last known position
2496 ring->last_retired_head = request->tail;
2498 i915_gem_free_request(request);
2501 if (unlikely(ring->trace_irq_seqno &&
2502 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2503 ring->irq_put(ring);
2504 ring->trace_irq_seqno = 0;
2507 WARN_ON(i915_verify_lists(ring->dev));
2511 i915_gem_retire_requests(struct drm_device *dev)
2513 struct drm_i915_private *dev_priv = dev->dev_private;
2514 struct intel_ring_buffer *ring;
2518 for_each_ring(ring, dev_priv, i) {
2519 i915_gem_retire_requests_ring(ring);
2520 idle &= list_empty(&ring->request_list);
2524 mod_delayed_work(dev_priv->wq,
2525 &dev_priv->mm.idle_work,
2526 msecs_to_jiffies(100));
2532 i915_gem_retire_work_handler(struct work_struct *work)
2534 struct drm_i915_private *dev_priv =
2535 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2536 struct drm_device *dev = dev_priv->dev;
2539 /* Come back later if the device is busy... */
2541 if (mutex_trylock(&dev->struct_mutex)) {
2542 idle = i915_gem_retire_requests(dev);
2543 mutex_unlock(&dev->struct_mutex);
2546 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2547 round_jiffies_up_relative(HZ));
2551 i915_gem_idle_work_handler(struct work_struct *work)
2553 struct drm_i915_private *dev_priv =
2554 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2556 intel_mark_idle(dev_priv->dev);
2560 * Ensures that an object will eventually get non-busy by flushing any required
2561 * write domains, emitting any outstanding lazy request and retiring and
2562 * completed requests.
2565 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2570 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2574 i915_gem_retire_requests_ring(obj->ring);
2581 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2582 * @DRM_IOCTL_ARGS: standard ioctl arguments
2584 * Returns 0 if successful, else an error is returned with the remaining time in
2585 * the timeout parameter.
2586 * -ETIME: object is still busy after timeout
2587 * -ERESTARTSYS: signal interrupted the wait
2588 * -ENONENT: object doesn't exist
2589 * Also possible, but rare:
2590 * -EAGAIN: GPU wedged
2592 * -ENODEV: Internal IRQ fail
2593 * -E?: The add request failed
2595 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2596 * non-zero timeout parameter the wait ioctl will wait for the given number of
2597 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2598 * without holding struct_mutex the object may become re-busied before this
2599 * function completes. A similar but shorter * race condition exists in the busy
2603 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2605 struct drm_i915_private *dev_priv = dev->dev_private;
2606 struct drm_i915_gem_wait *args = data;
2607 struct drm_i915_gem_object *obj;
2608 struct intel_ring_buffer *ring = NULL;
2609 struct timespec timeout_stack, *timeout = NULL;
2610 unsigned reset_counter;
2614 if (args->timeout_ns >= 0) {
2615 timeout_stack = ns_to_timespec(args->timeout_ns);
2616 timeout = &timeout_stack;
2619 ret = i915_mutex_lock_interruptible(dev);
2623 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2624 if (&obj->base == NULL) {
2625 mutex_unlock(&dev->struct_mutex);
2629 /* Need to make sure the object gets inactive eventually. */
2630 ret = i915_gem_object_flush_active(obj);
2635 seqno = obj->last_read_seqno;
2642 /* Do this after OLR check to make sure we make forward progress polling
2643 * on this IOCTL with a 0 timeout (like busy ioctl)
2645 if (!args->timeout_ns) {
2650 drm_gem_object_unreference(&obj->base);
2651 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2652 mutex_unlock(&dev->struct_mutex);
2654 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2656 args->timeout_ns = timespec_to_ns(timeout);
2660 drm_gem_object_unreference(&obj->base);
2661 mutex_unlock(&dev->struct_mutex);
2666 * i915_gem_object_sync - sync an object to a ring.
2668 * @obj: object which may be in use on another ring.
2669 * @to: ring we wish to use the object on. May be NULL.
2671 * This code is meant to abstract object synchronization with the GPU.
2672 * Calling with NULL implies synchronizing the object with the CPU
2673 * rather than a particular GPU ring.
2675 * Returns 0 if successful, else propagates up the lower layer error.
2678 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2679 struct intel_ring_buffer *to)
2681 struct intel_ring_buffer *from = obj->ring;
2685 if (from == NULL || to == from)
2688 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2689 return i915_gem_object_wait_rendering(obj, false);
2691 idx = intel_ring_sync_index(from, to);
2693 seqno = obj->last_read_seqno;
2694 if (seqno <= from->semaphore.sync_seqno[idx])
2697 ret = i915_gem_check_olr(obj->ring, seqno);
2701 trace_i915_gem_ring_sync_to(from, to, seqno);
2702 ret = to->semaphore.sync_to(to, from, seqno);
2704 /* We use last_read_seqno because sync_to()
2705 * might have just caused seqno wrap under
2708 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2713 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2715 u32 old_write_domain, old_read_domains;
2717 /* Force a pagefault for domain tracking on next user access */
2718 i915_gem_release_mmap(obj);
2720 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2723 /* Wait for any direct GTT access to complete */
2726 old_read_domains = obj->base.read_domains;
2727 old_write_domain = obj->base.write_domain;
2729 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2730 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2732 trace_i915_gem_object_change_domain(obj,
2737 int i915_vma_unbind(struct i915_vma *vma)
2739 struct drm_i915_gem_object *obj = vma->obj;
2740 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2743 if (list_empty(&vma->vma_link))
2746 if (!drm_mm_node_allocated(&vma->node)) {
2747 i915_gem_vma_destroy(vma);
2754 BUG_ON(obj->pages == NULL);
2756 ret = i915_gem_object_finish_gpu(obj);
2759 /* Continue on if we fail due to EIO, the GPU is hung so we
2760 * should be safe and we need to cleanup or else we might
2761 * cause memory corruption through use-after-free.
2764 i915_gem_object_finish_gtt(obj);
2766 /* release the fence reg _after_ flushing */
2767 ret = i915_gem_object_put_fence(obj);
2771 trace_i915_vma_unbind(vma);
2773 vma->unbind_vma(vma);
2775 i915_gem_gtt_finish_object(obj);
2777 list_del_init(&vma->mm_list);
2778 /* Avoid an unnecessary call to unbind on rebind. */
2779 if (i915_is_ggtt(vma->vm))
2780 obj->map_and_fenceable = true;
2782 drm_mm_remove_node(&vma->node);
2783 i915_gem_vma_destroy(vma);
2785 /* Since the unbound list is global, only move to that list if
2786 * no more VMAs exist. */
2787 if (list_empty(&obj->vma_list))
2788 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2790 /* And finally now the object is completely decoupled from this vma,
2791 * we can drop its hold on the backing storage and allow it to be
2792 * reaped by the shrinker.
2794 i915_gem_object_unpin_pages(obj);
2799 int i915_gpu_idle(struct drm_device *dev)
2801 struct drm_i915_private *dev_priv = dev->dev_private;
2802 struct intel_ring_buffer *ring;
2805 /* Flush everything onto the inactive list. */
2806 for_each_ring(ring, dev_priv, i) {
2807 ret = i915_switch_context(ring, ring->default_context);
2811 ret = intel_ring_idle(ring);
2819 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2820 struct drm_i915_gem_object *obj)
2822 struct drm_i915_private *dev_priv = dev->dev_private;
2824 int fence_pitch_shift;
2826 if (INTEL_INFO(dev)->gen >= 6) {
2827 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2828 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2830 fence_reg = FENCE_REG_965_0;
2831 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2834 fence_reg += reg * 8;
2836 /* To w/a incoherency with non-atomic 64-bit register updates,
2837 * we split the 64-bit update into two 32-bit writes. In order
2838 * for a partial fence not to be evaluated between writes, we
2839 * precede the update with write to turn off the fence register,
2840 * and only enable the fence as the last step.
2842 * For extra levels of paranoia, we make sure each step lands
2843 * before applying the next step.
2845 I915_WRITE(fence_reg, 0);
2846 POSTING_READ(fence_reg);
2849 u32 size = i915_gem_obj_ggtt_size(obj);
2852 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2854 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2855 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2856 if (obj->tiling_mode == I915_TILING_Y)
2857 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2858 val |= I965_FENCE_REG_VALID;
2860 I915_WRITE(fence_reg + 4, val >> 32);
2861 POSTING_READ(fence_reg + 4);
2863 I915_WRITE(fence_reg + 0, val);
2864 POSTING_READ(fence_reg);
2866 I915_WRITE(fence_reg + 4, 0);
2867 POSTING_READ(fence_reg + 4);
2871 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2872 struct drm_i915_gem_object *obj)
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2878 u32 size = i915_gem_obj_ggtt_size(obj);
2882 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2883 (size & -size) != size ||
2884 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2885 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2886 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2888 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2893 /* Note: pitch better be a power of two tile widths */
2894 pitch_val = obj->stride / tile_width;
2895 pitch_val = ffs(pitch_val) - 1;
2897 val = i915_gem_obj_ggtt_offset(obj);
2898 if (obj->tiling_mode == I915_TILING_Y)
2899 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2900 val |= I915_FENCE_SIZE_BITS(size);
2901 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2902 val |= I830_FENCE_REG_VALID;
2907 reg = FENCE_REG_830_0 + reg * 4;
2909 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2911 I915_WRITE(reg, val);
2915 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2916 struct drm_i915_gem_object *obj)
2918 struct drm_i915_private *dev_priv = dev->dev_private;
2922 u32 size = i915_gem_obj_ggtt_size(obj);
2925 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2926 (size & -size) != size ||
2927 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2928 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2929 i915_gem_obj_ggtt_offset(obj), size);
2931 pitch_val = obj->stride / 128;
2932 pitch_val = ffs(pitch_val) - 1;
2934 val = i915_gem_obj_ggtt_offset(obj);
2935 if (obj->tiling_mode == I915_TILING_Y)
2936 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2937 val |= I830_FENCE_SIZE_BITS(size);
2938 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2939 val |= I830_FENCE_REG_VALID;
2943 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2944 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2947 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2949 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2952 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2953 struct drm_i915_gem_object *obj)
2955 struct drm_i915_private *dev_priv = dev->dev_private;
2957 /* Ensure that all CPU reads are completed before installing a fence
2958 * and all writes before removing the fence.
2960 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2963 WARN(obj && (!obj->stride || !obj->tiling_mode),
2964 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2965 obj->stride, obj->tiling_mode);
2967 switch (INTEL_INFO(dev)->gen) {
2972 case 4: i965_write_fence_reg(dev, reg, obj); break;
2973 case 3: i915_write_fence_reg(dev, reg, obj); break;
2974 case 2: i830_write_fence_reg(dev, reg, obj); break;
2978 /* And similarly be paranoid that no direct access to this region
2979 * is reordered to before the fence is installed.
2981 if (i915_gem_object_needs_mb(obj))
2985 static inline int fence_number(struct drm_i915_private *dev_priv,
2986 struct drm_i915_fence_reg *fence)
2988 return fence - dev_priv->fence_regs;
2991 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2992 struct drm_i915_fence_reg *fence,
2995 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2996 int reg = fence_number(dev_priv, fence);
2998 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3001 obj->fence_reg = reg;
3003 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3005 obj->fence_reg = I915_FENCE_REG_NONE;
3007 list_del_init(&fence->lru_list);
3009 obj->fence_dirty = false;
3013 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3015 if (obj->last_fenced_seqno) {
3016 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3020 obj->last_fenced_seqno = 0;
3023 obj->fenced_gpu_access = false;
3028 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3030 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3031 struct drm_i915_fence_reg *fence;
3034 ret = i915_gem_object_wait_fence(obj);
3038 if (obj->fence_reg == I915_FENCE_REG_NONE)
3041 fence = &dev_priv->fence_regs[obj->fence_reg];
3043 i915_gem_object_fence_lost(obj);
3044 i915_gem_object_update_fence(obj, fence, false);
3049 static struct drm_i915_fence_reg *
3050 i915_find_fence_reg(struct drm_device *dev)
3052 struct drm_i915_private *dev_priv = dev->dev_private;
3053 struct drm_i915_fence_reg *reg, *avail;
3056 /* First try to find a free reg */
3058 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3059 reg = &dev_priv->fence_regs[i];
3063 if (!reg->pin_count)
3070 /* None available, try to steal one or wait for a user to finish */
3071 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3079 /* Wait for completion of pending flips which consume fences */
3080 if (intel_has_pending_fb_unpin(dev))
3081 return ERR_PTR(-EAGAIN);
3083 return ERR_PTR(-EDEADLK);
3087 * i915_gem_object_get_fence - set up fencing for an object
3088 * @obj: object to map through a fence reg
3090 * When mapping objects through the GTT, userspace wants to be able to write
3091 * to them without having to worry about swizzling if the object is tiled.
3092 * This function walks the fence regs looking for a free one for @obj,
3093 * stealing one if it can't find any.
3095 * It then sets up the reg based on the object's properties: address, pitch
3096 * and tiling format.
3098 * For an untiled surface, this removes any existing fence.
3101 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3103 struct drm_device *dev = obj->base.dev;
3104 struct drm_i915_private *dev_priv = dev->dev_private;
3105 bool enable = obj->tiling_mode != I915_TILING_NONE;
3106 struct drm_i915_fence_reg *reg;
3109 /* Have we updated the tiling parameters upon the object and so
3110 * will need to serialise the write to the associated fence register?
3112 if (obj->fence_dirty) {
3113 ret = i915_gem_object_wait_fence(obj);
3118 /* Just update our place in the LRU if our fence is getting reused. */
3119 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3120 reg = &dev_priv->fence_regs[obj->fence_reg];
3121 if (!obj->fence_dirty) {
3122 list_move_tail(®->lru_list,
3123 &dev_priv->mm.fence_list);
3126 } else if (enable) {
3127 reg = i915_find_fence_reg(dev);
3129 return PTR_ERR(reg);
3132 struct drm_i915_gem_object *old = reg->obj;
3134 ret = i915_gem_object_wait_fence(old);
3138 i915_gem_object_fence_lost(old);
3143 i915_gem_object_update_fence(obj, reg, enable);
3148 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3149 struct drm_mm_node *gtt_space,
3150 unsigned long cache_level)
3152 struct drm_mm_node *other;
3154 /* On non-LLC machines we have to be careful when putting differing
3155 * types of snoopable memory together to avoid the prefetcher
3156 * crossing memory domains and dying.
3161 if (!drm_mm_node_allocated(gtt_space))
3164 if (list_empty(>t_space->node_list))
3167 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3168 if (other->allocated && !other->hole_follows && other->color != cache_level)
3171 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3172 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3178 static void i915_gem_verify_gtt(struct drm_device *dev)
3181 struct drm_i915_private *dev_priv = dev->dev_private;
3182 struct drm_i915_gem_object *obj;
3185 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3186 if (obj->gtt_space == NULL) {
3187 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3192 if (obj->cache_level != obj->gtt_space->color) {
3193 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3194 i915_gem_obj_ggtt_offset(obj),
3195 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3197 obj->gtt_space->color);
3202 if (!i915_gem_valid_gtt_space(dev,
3204 obj->cache_level)) {
3205 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3206 i915_gem_obj_ggtt_offset(obj),
3207 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3219 * Finds free space in the GTT aperture and binds the object there.
3221 static struct i915_vma *
3222 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3223 struct i915_address_space *vm,
3227 struct drm_device *dev = obj->base.dev;
3228 struct drm_i915_private *dev_priv = dev->dev_private;
3229 u32 size, fence_size, fence_alignment, unfenced_alignment;
3231 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3232 struct i915_vma *vma;
3235 fence_size = i915_gem_get_gtt_size(dev,
3238 fence_alignment = i915_gem_get_gtt_alignment(dev,
3240 obj->tiling_mode, true);
3241 unfenced_alignment =
3242 i915_gem_get_gtt_alignment(dev,
3244 obj->tiling_mode, false);
3247 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3249 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3250 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3251 return ERR_PTR(-EINVAL);
3254 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3256 /* If the object is bigger than the entire aperture, reject it early
3257 * before evicting everything in a vain attempt to find space.
3259 if (obj->base.size > gtt_max) {
3260 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3262 flags & PIN_MAPPABLE ? "mappable" : "total",
3264 return ERR_PTR(-E2BIG);
3267 ret = i915_gem_object_get_pages(obj);
3269 return ERR_PTR(ret);
3271 i915_gem_object_pin_pages(obj);
3273 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3278 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3280 obj->cache_level, 0, gtt_max,
3281 DRM_MM_SEARCH_DEFAULT,
3282 DRM_MM_CREATE_DEFAULT);
3284 ret = i915_gem_evict_something(dev, vm, size, alignment,
3285 obj->cache_level, flags);
3291 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3292 obj->cache_level))) {
3294 goto err_remove_node;
3297 ret = i915_gem_gtt_prepare_object(obj);
3299 goto err_remove_node;
3301 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3302 list_add_tail(&vma->mm_list, &vm->inactive_list);
3304 if (i915_is_ggtt(vm)) {
3305 bool mappable, fenceable;
3307 fenceable = (vma->node.size == fence_size &&
3308 (vma->node.start & (fence_alignment - 1)) == 0);
3310 mappable = (vma->node.start + obj->base.size <=
3311 dev_priv->gtt.mappable_end);
3313 obj->map_and_fenceable = mappable && fenceable;
3316 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3318 trace_i915_vma_bind(vma, flags);
3319 vma->bind_vma(vma, obj->cache_level,
3320 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3322 i915_gem_verify_gtt(dev);
3326 drm_mm_remove_node(&vma->node);
3328 i915_gem_vma_destroy(vma);
3331 i915_gem_object_unpin_pages(obj);
3336 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3339 /* If we don't have a page list set up, then we're not pinned
3340 * to GPU, and we can ignore the cache flush because it'll happen
3341 * again at bind time.
3343 if (obj->pages == NULL)
3347 * Stolen memory is always coherent with the GPU as it is explicitly
3348 * marked as wc by the system, or the system is cache-coherent.
3353 /* If the GPU is snooping the contents of the CPU cache,
3354 * we do not need to manually clear the CPU cache lines. However,
3355 * the caches are only snooped when the render cache is
3356 * flushed/invalidated. As we always have to emit invalidations
3357 * and flushes when moving into and out of the RENDER domain, correct
3358 * snooping behaviour occurs naturally as the result of our domain
3361 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3364 trace_i915_gem_object_clflush(obj);
3365 drm_clflush_sg(obj->pages);
3370 /** Flushes the GTT write domain for the object if it's dirty. */
3372 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3374 uint32_t old_write_domain;
3376 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3379 /* No actual flushing is required for the GTT write domain. Writes
3380 * to it immediately go to main memory as far as we know, so there's
3381 * no chipset flush. It also doesn't land in render cache.
3383 * However, we do have to enforce the order so that all writes through
3384 * the GTT land before any writes to the device, such as updates to
3389 old_write_domain = obj->base.write_domain;
3390 obj->base.write_domain = 0;
3392 trace_i915_gem_object_change_domain(obj,
3393 obj->base.read_domains,
3397 /** Flushes the CPU write domain for the object if it's dirty. */
3399 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3402 uint32_t old_write_domain;
3404 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3407 if (i915_gem_clflush_object(obj, force))
3408 i915_gem_chipset_flush(obj->base.dev);
3410 old_write_domain = obj->base.write_domain;
3411 obj->base.write_domain = 0;
3413 trace_i915_gem_object_change_domain(obj,
3414 obj->base.read_domains,
3419 * Moves a single object to the GTT read, and possibly write domain.
3421 * This function returns when the move is complete, including waiting on
3425 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3427 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3428 uint32_t old_write_domain, old_read_domains;
3431 /* Not valid to be called on unbound objects. */
3432 if (!i915_gem_obj_bound_any(obj))
3435 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3438 ret = i915_gem_object_wait_rendering(obj, !write);
3442 i915_gem_object_retire(obj);
3443 i915_gem_object_flush_cpu_write_domain(obj, false);
3445 /* Serialise direct access to this object with the barriers for
3446 * coherent writes from the GPU, by effectively invalidating the
3447 * GTT domain upon first access.
3449 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3452 old_write_domain = obj->base.write_domain;
3453 old_read_domains = obj->base.read_domains;
3455 /* It should now be out of any other write domains, and we can update
3456 * the domain values for our changes.
3458 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3459 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3461 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3462 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3466 trace_i915_gem_object_change_domain(obj,
3470 /* And bump the LRU for this access */
3471 if (i915_gem_object_is_inactive(obj)) {
3472 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3474 list_move_tail(&vma->mm_list,
3475 &dev_priv->gtt.base.inactive_list);
3482 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3483 enum i915_cache_level cache_level)
3485 struct drm_device *dev = obj->base.dev;
3486 struct i915_vma *vma, *next;
3489 if (obj->cache_level == cache_level)
3492 if (i915_gem_obj_is_pinned(obj)) {
3493 DRM_DEBUG("can not change the cache level of pinned objects\n");
3497 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3498 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3499 ret = i915_vma_unbind(vma);
3505 if (i915_gem_obj_bound_any(obj)) {
3506 ret = i915_gem_object_finish_gpu(obj);
3510 i915_gem_object_finish_gtt(obj);
3512 /* Before SandyBridge, you could not use tiling or fence
3513 * registers with snooped memory, so relinquish any fences
3514 * currently pointing to our region in the aperture.
3516 if (INTEL_INFO(dev)->gen < 6) {
3517 ret = i915_gem_object_put_fence(obj);
3522 list_for_each_entry(vma, &obj->vma_list, vma_link)
3523 if (drm_mm_node_allocated(&vma->node))
3524 vma->bind_vma(vma, cache_level,
3525 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3528 list_for_each_entry(vma, &obj->vma_list, vma_link)
3529 vma->node.color = cache_level;
3530 obj->cache_level = cache_level;
3532 if (cpu_write_needs_clflush(obj)) {
3533 u32 old_read_domains, old_write_domain;
3535 /* If we're coming from LLC cached, then we haven't
3536 * actually been tracking whether the data is in the
3537 * CPU cache or not, since we only allow one bit set
3538 * in obj->write_domain and have been skipping the clflushes.
3539 * Just set it to the CPU cache for now.
3541 i915_gem_object_retire(obj);
3542 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3544 old_read_domains = obj->base.read_domains;
3545 old_write_domain = obj->base.write_domain;
3547 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3548 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3550 trace_i915_gem_object_change_domain(obj,
3555 i915_gem_verify_gtt(dev);
3559 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3560 struct drm_file *file)
3562 struct drm_i915_gem_caching *args = data;
3563 struct drm_i915_gem_object *obj;
3566 ret = i915_mutex_lock_interruptible(dev);
3570 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3571 if (&obj->base == NULL) {
3576 switch (obj->cache_level) {
3577 case I915_CACHE_LLC:
3578 case I915_CACHE_L3_LLC:
3579 args->caching = I915_CACHING_CACHED;
3583 args->caching = I915_CACHING_DISPLAY;
3587 args->caching = I915_CACHING_NONE;
3591 drm_gem_object_unreference(&obj->base);
3593 mutex_unlock(&dev->struct_mutex);
3597 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3598 struct drm_file *file)
3600 struct drm_i915_gem_caching *args = data;
3601 struct drm_i915_gem_object *obj;
3602 enum i915_cache_level level;
3605 switch (args->caching) {
3606 case I915_CACHING_NONE:
3607 level = I915_CACHE_NONE;
3609 case I915_CACHING_CACHED:
3610 level = I915_CACHE_LLC;
3612 case I915_CACHING_DISPLAY:
3613 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3619 ret = i915_mutex_lock_interruptible(dev);
3623 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3624 if (&obj->base == NULL) {
3629 ret = i915_gem_object_set_cache_level(obj, level);
3631 drm_gem_object_unreference(&obj->base);
3633 mutex_unlock(&dev->struct_mutex);
3637 static bool is_pin_display(struct drm_i915_gem_object *obj)
3639 /* There are 3 sources that pin objects:
3640 * 1. The display engine (scanouts, sprites, cursors);
3641 * 2. Reservations for execbuffer;
3644 * We can ignore reservations as we hold the struct_mutex and
3645 * are only called outside of the reservation path. The user
3646 * can only increment pin_count once, and so if after
3647 * subtracting the potential reference by the user, any pin_count
3648 * remains, it must be due to another use by the display engine.
3650 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3654 * Prepare buffer for display plane (scanout, cursors, etc).
3655 * Can be called from an uninterruptible phase (modesetting) and allows
3656 * any flushes to be pipelined (for pageflips).
3659 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3661 struct intel_ring_buffer *pipelined)
3663 u32 old_read_domains, old_write_domain;
3666 if (pipelined != obj->ring) {
3667 ret = i915_gem_object_sync(obj, pipelined);
3672 /* Mark the pin_display early so that we account for the
3673 * display coherency whilst setting up the cache domains.
3675 obj->pin_display = true;
3677 /* The display engine is not coherent with the LLC cache on gen6. As
3678 * a result, we make sure that the pinning that is about to occur is
3679 * done with uncached PTEs. This is lowest common denominator for all
3682 * However for gen6+, we could do better by using the GFDT bit instead
3683 * of uncaching, which would allow us to flush all the LLC-cached data
3684 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3686 ret = i915_gem_object_set_cache_level(obj,
3687 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3689 goto err_unpin_display;
3691 /* As the user may map the buffer once pinned in the display plane
3692 * (e.g. libkms for the bootup splash), we have to ensure that we
3693 * always use map_and_fenceable for all scanout buffers.
3695 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3697 goto err_unpin_display;
3699 i915_gem_object_flush_cpu_write_domain(obj, true);
3701 old_write_domain = obj->base.write_domain;
3702 old_read_domains = obj->base.read_domains;
3704 /* It should now be out of any other write domains, and we can update
3705 * the domain values for our changes.
3707 obj->base.write_domain = 0;
3708 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3710 trace_i915_gem_object_change_domain(obj,
3717 obj->pin_display = is_pin_display(obj);
3722 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3724 i915_gem_object_ggtt_unpin(obj);
3725 obj->pin_display = is_pin_display(obj);
3729 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3733 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3736 ret = i915_gem_object_wait_rendering(obj, false);
3740 /* Ensure that we invalidate the GPU's caches and TLBs. */
3741 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3746 * Moves a single object to the CPU read, and possibly write domain.
3748 * This function returns when the move is complete, including waiting on
3752 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3754 uint32_t old_write_domain, old_read_domains;
3757 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3760 ret = i915_gem_object_wait_rendering(obj, !write);
3764 i915_gem_object_retire(obj);
3765 i915_gem_object_flush_gtt_write_domain(obj);
3767 old_write_domain = obj->base.write_domain;
3768 old_read_domains = obj->base.read_domains;
3770 /* Flush the CPU cache if it's still invalid. */
3771 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3772 i915_gem_clflush_object(obj, false);
3774 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3777 /* It should now be out of any other write domains, and we can update
3778 * the domain values for our changes.
3780 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3782 /* If we're writing through the CPU, then the GPU read domains will
3783 * need to be invalidated at next use.
3786 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3787 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3790 trace_i915_gem_object_change_domain(obj,
3797 /* Throttle our rendering by waiting until the ring has completed our requests
3798 * emitted over 20 msec ago.
3800 * Note that if we were to use the current jiffies each time around the loop,
3801 * we wouldn't escape the function with any frames outstanding if the time to
3802 * render a frame was over 20ms.
3804 * This should get us reasonable parallelism between CPU and GPU but also
3805 * relatively low latency when blocking on a particular request to finish.
3808 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3810 struct drm_i915_private *dev_priv = dev->dev_private;
3811 struct drm_i915_file_private *file_priv = file->driver_priv;
3812 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3813 struct drm_i915_gem_request *request;
3814 struct intel_ring_buffer *ring = NULL;
3815 unsigned reset_counter;
3819 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3823 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3827 spin_lock(&file_priv->mm.lock);
3828 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3829 if (time_after_eq(request->emitted_jiffies, recent_enough))
3832 ring = request->ring;
3833 seqno = request->seqno;
3835 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3836 spin_unlock(&file_priv->mm.lock);
3841 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3843 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3849 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3850 struct i915_address_space *vm,
3854 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3855 struct i915_vma *vma;
3858 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3861 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3864 vma = i915_gem_obj_to_vma(obj, vm);
3866 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3870 vma->node.start & (alignment - 1)) ||
3871 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3872 WARN(vma->pin_count,
3873 "bo is already pinned with incorrect alignment:"
3874 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3875 " obj->map_and_fenceable=%d\n",
3876 i915_gem_obj_offset(obj, vm), alignment,
3877 flags & PIN_MAPPABLE,
3878 obj->map_and_fenceable);
3879 ret = i915_vma_unbind(vma);
3887 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3888 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3890 return PTR_ERR(vma);
3893 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3894 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
3897 if (flags & PIN_MAPPABLE)
3898 obj->pin_mappable |= true;
3904 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3906 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3909 BUG_ON(vma->pin_count == 0);
3910 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3912 if (--vma->pin_count == 0)
3913 obj->pin_mappable = false;
3917 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3918 struct drm_file *file)
3920 struct drm_i915_gem_pin *args = data;
3921 struct drm_i915_gem_object *obj;
3924 if (INTEL_INFO(dev)->gen >= 6)
3927 ret = i915_mutex_lock_interruptible(dev);
3931 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3932 if (&obj->base == NULL) {
3937 if (obj->madv != I915_MADV_WILLNEED) {
3938 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
3943 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3944 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
3950 if (obj->user_pin_count == ULONG_MAX) {
3955 if (obj->user_pin_count == 0) {
3956 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3961 obj->user_pin_count++;
3962 obj->pin_filp = file;
3964 args->offset = i915_gem_obj_ggtt_offset(obj);
3966 drm_gem_object_unreference(&obj->base);
3968 mutex_unlock(&dev->struct_mutex);
3973 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3974 struct drm_file *file)
3976 struct drm_i915_gem_pin *args = data;
3977 struct drm_i915_gem_object *obj;
3980 ret = i915_mutex_lock_interruptible(dev);
3984 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3985 if (&obj->base == NULL) {
3990 if (obj->pin_filp != file) {
3991 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3996 obj->user_pin_count--;
3997 if (obj->user_pin_count == 0) {
3998 obj->pin_filp = NULL;
3999 i915_gem_object_ggtt_unpin(obj);
4003 drm_gem_object_unreference(&obj->base);
4005 mutex_unlock(&dev->struct_mutex);
4010 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4011 struct drm_file *file)
4013 struct drm_i915_gem_busy *args = data;
4014 struct drm_i915_gem_object *obj;
4017 ret = i915_mutex_lock_interruptible(dev);
4021 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4022 if (&obj->base == NULL) {
4027 /* Count all active objects as busy, even if they are currently not used
4028 * by the gpu. Users of this interface expect objects to eventually
4029 * become non-busy without any further actions, therefore emit any
4030 * necessary flushes here.
4032 ret = i915_gem_object_flush_active(obj);
4034 args->busy = obj->active;
4036 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4037 args->busy |= intel_ring_flag(obj->ring) << 16;
4040 drm_gem_object_unreference(&obj->base);
4042 mutex_unlock(&dev->struct_mutex);
4047 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4048 struct drm_file *file_priv)
4050 return i915_gem_ring_throttle(dev, file_priv);
4054 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4055 struct drm_file *file_priv)
4057 struct drm_i915_gem_madvise *args = data;
4058 struct drm_i915_gem_object *obj;
4061 switch (args->madv) {
4062 case I915_MADV_DONTNEED:
4063 case I915_MADV_WILLNEED:
4069 ret = i915_mutex_lock_interruptible(dev);
4073 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4074 if (&obj->base == NULL) {
4079 if (i915_gem_obj_is_pinned(obj)) {
4084 if (obj->madv != __I915_MADV_PURGED)
4085 obj->madv = args->madv;
4087 /* if the object is no longer attached, discard its backing storage */
4088 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4089 i915_gem_object_truncate(obj);
4091 args->retained = obj->madv != __I915_MADV_PURGED;
4094 drm_gem_object_unreference(&obj->base);
4096 mutex_unlock(&dev->struct_mutex);
4100 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4101 const struct drm_i915_gem_object_ops *ops)
4103 INIT_LIST_HEAD(&obj->global_list);
4104 INIT_LIST_HEAD(&obj->ring_list);
4105 INIT_LIST_HEAD(&obj->obj_exec_link);
4106 INIT_LIST_HEAD(&obj->vma_list);
4110 obj->fence_reg = I915_FENCE_REG_NONE;
4111 obj->madv = I915_MADV_WILLNEED;
4112 /* Avoid an unnecessary call to unbind on the first bind. */
4113 obj->map_and_fenceable = true;
4115 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4118 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4119 .get_pages = i915_gem_object_get_pages_gtt,
4120 .put_pages = i915_gem_object_put_pages_gtt,
4123 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4126 struct drm_i915_gem_object *obj;
4127 struct address_space *mapping;
4130 obj = i915_gem_object_alloc(dev);
4134 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4135 i915_gem_object_free(obj);
4139 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4140 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4141 /* 965gm cannot relocate objects above 4GiB. */
4142 mask &= ~__GFP_HIGHMEM;
4143 mask |= __GFP_DMA32;
4146 mapping = file_inode(obj->base.filp)->i_mapping;
4147 mapping_set_gfp_mask(mapping, mask);
4149 i915_gem_object_init(obj, &i915_gem_object_ops);
4151 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4152 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4155 /* On some devices, we can have the GPU use the LLC (the CPU
4156 * cache) for about a 10% performance improvement
4157 * compared to uncached. Graphics requests other than
4158 * display scanout are coherent with the CPU in
4159 * accessing this cache. This means in this mode we
4160 * don't need to clflush on the CPU side, and on the
4161 * GPU side we only need to flush internal caches to
4162 * get data visible to the CPU.
4164 * However, we maintain the display planes as UC, and so
4165 * need to rebind when first used as such.
4167 obj->cache_level = I915_CACHE_LLC;
4169 obj->cache_level = I915_CACHE_NONE;
4171 trace_i915_gem_object_create(obj);
4176 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4178 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4179 struct drm_device *dev = obj->base.dev;
4180 struct drm_i915_private *dev_priv = dev->dev_private;
4181 struct i915_vma *vma, *next;
4183 intel_runtime_pm_get(dev_priv);
4185 trace_i915_gem_object_destroy(obj);
4188 i915_gem_detach_phys_object(dev, obj);
4190 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4194 ret = i915_vma_unbind(vma);
4195 if (WARN_ON(ret == -ERESTARTSYS)) {
4196 bool was_interruptible;
4198 was_interruptible = dev_priv->mm.interruptible;
4199 dev_priv->mm.interruptible = false;
4201 WARN_ON(i915_vma_unbind(vma));
4203 dev_priv->mm.interruptible = was_interruptible;
4207 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4208 * before progressing. */
4210 i915_gem_object_unpin_pages(obj);
4212 if (WARN_ON(obj->pages_pin_count))
4213 obj->pages_pin_count = 0;
4214 i915_gem_object_put_pages(obj);
4215 i915_gem_object_free_mmap_offset(obj);
4216 i915_gem_object_release_stolen(obj);
4220 if (obj->base.import_attach)
4221 drm_prime_gem_destroy(&obj->base, NULL);
4223 drm_gem_object_release(&obj->base);
4224 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4227 i915_gem_object_free(obj);
4229 intel_runtime_pm_put(dev_priv);
4232 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4233 struct i915_address_space *vm)
4235 struct i915_vma *vma;
4236 list_for_each_entry(vma, &obj->vma_list, vma_link)
4243 void i915_gem_vma_destroy(struct i915_vma *vma)
4245 WARN_ON(vma->node.allocated);
4247 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4248 if (!list_empty(&vma->exec_list))
4251 list_del(&vma->vma_link);
4257 i915_gem_stop_ringbuffers(struct drm_device *dev)
4259 struct drm_i915_private *dev_priv = dev->dev_private;
4260 struct intel_ring_buffer *ring;
4263 for_each_ring(ring, dev_priv, i)
4264 intel_stop_ring_buffer(ring);
4268 i915_gem_suspend(struct drm_device *dev)
4270 struct drm_i915_private *dev_priv = dev->dev_private;
4273 mutex_lock(&dev->struct_mutex);
4274 if (dev_priv->ums.mm_suspended)
4277 ret = i915_gpu_idle(dev);
4281 i915_gem_retire_requests(dev);
4283 /* Under UMS, be paranoid and evict. */
4284 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4285 i915_gem_evict_everything(dev);
4287 i915_kernel_lost_context(dev);
4288 i915_gem_stop_ringbuffers(dev);
4290 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4291 * We need to replace this with a semaphore, or something.
4292 * And not confound ums.mm_suspended!
4294 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4296 mutex_unlock(&dev->struct_mutex);
4298 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4299 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4300 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4305 mutex_unlock(&dev->struct_mutex);
4309 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4311 struct drm_device *dev = ring->dev;
4312 struct drm_i915_private *dev_priv = dev->dev_private;
4313 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4314 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4317 if (!HAS_L3_DPF(dev) || !remap_info)
4320 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4325 * Note: We do not worry about the concurrent register cacheline hang
4326 * here because no other code should access these registers other than
4327 * at initialization time.
4329 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4330 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4331 intel_ring_emit(ring, reg_base + i);
4332 intel_ring_emit(ring, remap_info[i/4]);
4335 intel_ring_advance(ring);
4340 void i915_gem_init_swizzling(struct drm_device *dev)
4342 struct drm_i915_private *dev_priv = dev->dev_private;
4344 if (INTEL_INFO(dev)->gen < 5 ||
4345 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4348 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4349 DISP_TILE_SURFACE_SWIZZLING);
4354 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4356 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4357 else if (IS_GEN7(dev))
4358 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4359 else if (IS_GEN8(dev))
4360 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4366 intel_enable_blt(struct drm_device *dev)
4371 /* The blitter was dysfunctional on early prototypes */
4372 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4373 DRM_INFO("BLT not supported on this pre-production hardware;"
4374 " graphics performance will be degraded.\n");
4381 static int i915_gem_init_rings(struct drm_device *dev)
4383 struct drm_i915_private *dev_priv = dev->dev_private;
4386 ret = intel_init_render_ring_buffer(dev);
4391 ret = intel_init_bsd_ring_buffer(dev);
4393 goto cleanup_render_ring;
4396 if (intel_enable_blt(dev)) {
4397 ret = intel_init_blt_ring_buffer(dev);
4399 goto cleanup_bsd_ring;
4402 if (HAS_VEBOX(dev)) {
4403 ret = intel_init_vebox_ring_buffer(dev);
4405 goto cleanup_blt_ring;
4408 if (HAS_BSD2(dev)) {
4409 ret = intel_init_bsd2_ring_buffer(dev);
4411 goto cleanup_vebox_ring;
4414 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4416 goto cleanup_bsd2_ring;
4421 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4423 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4425 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4427 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4428 cleanup_render_ring:
4429 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4435 i915_gem_init_hw(struct drm_device *dev)
4437 struct drm_i915_private *dev_priv = dev->dev_private;
4440 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4443 if (dev_priv->ellc_size)
4444 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4446 if (IS_HASWELL(dev))
4447 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4448 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4450 if (HAS_PCH_NOP(dev)) {
4451 if (IS_IVYBRIDGE(dev)) {
4452 u32 temp = I915_READ(GEN7_MSG_CTL);
4453 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4454 I915_WRITE(GEN7_MSG_CTL, temp);
4455 } else if (INTEL_INFO(dev)->gen >= 7) {
4456 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4457 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4458 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4462 i915_gem_init_swizzling(dev);
4464 ret = i915_gem_init_rings(dev);
4468 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4469 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4472 * XXX: Contexts should only be initialized once. Doing a switch to the
4473 * default context switch however is something we'd like to do after
4474 * reset or thaw (the latter may not actually be necessary for HW, but
4475 * goes with our code better). Context switching requires rings (for
4476 * the do_switch), but before enabling PPGTT. So don't move this.
4478 ret = i915_gem_context_enable(dev_priv);
4479 if (ret && ret != -EIO) {
4480 DRM_ERROR("Context enable failed %d\n", ret);
4481 i915_gem_cleanup_ringbuffer(dev);
4487 int i915_gem_init(struct drm_device *dev)
4489 struct drm_i915_private *dev_priv = dev->dev_private;
4492 mutex_lock(&dev->struct_mutex);
4494 if (IS_VALLEYVIEW(dev)) {
4495 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4496 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4497 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4498 VLV_GTLC_ALLOWWAKEACK), 10))
4499 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4502 i915_gem_init_global_gtt(dev);
4504 ret = i915_gem_context_init(dev);
4506 mutex_unlock(&dev->struct_mutex);
4510 ret = i915_gem_init_hw(dev);
4512 /* Allow ring initialisation to fail by marking the GPU as
4513 * wedged. But we only want to do this where the GPU is angry,
4514 * for all other failure, such as an allocation failure, bail.
4516 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4517 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4520 mutex_unlock(&dev->struct_mutex);
4522 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4523 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4524 dev_priv->dri1.allow_batchbuffer = 1;
4529 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4531 struct drm_i915_private *dev_priv = dev->dev_private;
4532 struct intel_ring_buffer *ring;
4535 for_each_ring(ring, dev_priv, i)
4536 intel_cleanup_ring_buffer(ring);
4540 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4541 struct drm_file *file_priv)
4543 struct drm_i915_private *dev_priv = dev->dev_private;
4546 if (drm_core_check_feature(dev, DRIVER_MODESET))
4549 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4550 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4551 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4554 mutex_lock(&dev->struct_mutex);
4555 dev_priv->ums.mm_suspended = 0;
4557 ret = i915_gem_init_hw(dev);
4559 mutex_unlock(&dev->struct_mutex);
4563 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4565 ret = drm_irq_install(dev, dev->pdev->irq);
4567 goto cleanup_ringbuffer;
4568 mutex_unlock(&dev->struct_mutex);
4573 i915_gem_cleanup_ringbuffer(dev);
4574 dev_priv->ums.mm_suspended = 1;
4575 mutex_unlock(&dev->struct_mutex);
4581 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4582 struct drm_file *file_priv)
4584 if (drm_core_check_feature(dev, DRIVER_MODESET))
4587 mutex_lock(&dev->struct_mutex);
4588 drm_irq_uninstall(dev);
4589 mutex_unlock(&dev->struct_mutex);
4591 return i915_gem_suspend(dev);
4595 i915_gem_lastclose(struct drm_device *dev)
4599 if (drm_core_check_feature(dev, DRIVER_MODESET))
4602 ret = i915_gem_suspend(dev);
4604 DRM_ERROR("failed to idle hardware: %d\n", ret);
4608 init_ring_lists(struct intel_ring_buffer *ring)
4610 INIT_LIST_HEAD(&ring->active_list);
4611 INIT_LIST_HEAD(&ring->request_list);
4614 void i915_init_vm(struct drm_i915_private *dev_priv,
4615 struct i915_address_space *vm)
4617 if (!i915_is_ggtt(vm))
4618 drm_mm_init(&vm->mm, vm->start, vm->total);
4619 vm->dev = dev_priv->dev;
4620 INIT_LIST_HEAD(&vm->active_list);
4621 INIT_LIST_HEAD(&vm->inactive_list);
4622 INIT_LIST_HEAD(&vm->global_link);
4623 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4627 i915_gem_load(struct drm_device *dev)
4629 struct drm_i915_private *dev_priv = dev->dev_private;
4633 kmem_cache_create("i915_gem_object",
4634 sizeof(struct drm_i915_gem_object), 0,
4638 INIT_LIST_HEAD(&dev_priv->vm_list);
4639 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4641 INIT_LIST_HEAD(&dev_priv->context_list);
4642 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4643 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4644 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4645 for (i = 0; i < I915_NUM_RINGS; i++)
4646 init_ring_lists(&dev_priv->ring[i]);
4647 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4648 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4649 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4650 i915_gem_retire_work_handler);
4651 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4652 i915_gem_idle_work_handler);
4653 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4655 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4657 I915_WRITE(MI_ARB_STATE,
4658 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4661 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4663 /* Old X drivers will take 0-2 for front, back, depth buffers */
4664 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4665 dev_priv->fence_reg_start = 3;
4667 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4668 dev_priv->num_fence_regs = 32;
4669 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4670 dev_priv->num_fence_regs = 16;
4672 dev_priv->num_fence_regs = 8;
4674 /* Initialize fence registers to zero */
4675 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4676 i915_gem_restore_fences(dev);
4678 i915_gem_detect_bit_6_swizzle(dev);
4679 init_waitqueue_head(&dev_priv->pending_flip_queue);
4681 dev_priv->mm.interruptible = true;
4683 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4684 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4685 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4686 register_shrinker(&dev_priv->mm.inactive_shrinker);
4690 * Create a physically contiguous memory object for this object
4691 * e.g. for cursor + overlay regs
4693 static int i915_gem_init_phys_object(struct drm_device *dev,
4694 int id, int size, int align)
4696 struct drm_i915_private *dev_priv = dev->dev_private;
4697 struct drm_i915_gem_phys_object *phys_obj;
4700 if (dev_priv->mm.phys_objs[id - 1] || !size)
4703 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4709 phys_obj->handle = drm_pci_alloc(dev, size, align);
4710 if (!phys_obj->handle) {
4715 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4718 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4726 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4728 struct drm_i915_private *dev_priv = dev->dev_private;
4729 struct drm_i915_gem_phys_object *phys_obj;
4731 if (!dev_priv->mm.phys_objs[id - 1])
4734 phys_obj = dev_priv->mm.phys_objs[id - 1];
4735 if (phys_obj->cur_obj) {
4736 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4740 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4742 drm_pci_free(dev, phys_obj->handle);
4744 dev_priv->mm.phys_objs[id - 1] = NULL;
4747 void i915_gem_free_all_phys_object(struct drm_device *dev)
4751 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4752 i915_gem_free_phys_object(dev, i);
4755 void i915_gem_detach_phys_object(struct drm_device *dev,
4756 struct drm_i915_gem_object *obj)
4758 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4765 vaddr = obj->phys_obj->handle->vaddr;
4767 page_count = obj->base.size / PAGE_SIZE;
4768 for (i = 0; i < page_count; i++) {
4769 struct page *page = shmem_read_mapping_page(mapping, i);
4770 if (!IS_ERR(page)) {
4771 char *dst = kmap_atomic(page);
4772 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4775 drm_clflush_pages(&page, 1);
4777 set_page_dirty(page);
4778 mark_page_accessed(page);
4779 page_cache_release(page);
4782 i915_gem_chipset_flush(dev);
4784 obj->phys_obj->cur_obj = NULL;
4785 obj->phys_obj = NULL;
4789 i915_gem_attach_phys_object(struct drm_device *dev,
4790 struct drm_i915_gem_object *obj,
4794 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4795 struct drm_i915_private *dev_priv = dev->dev_private;
4800 if (id > I915_MAX_PHYS_OBJECT)
4803 if (obj->phys_obj) {
4804 if (obj->phys_obj->id == id)
4806 i915_gem_detach_phys_object(dev, obj);
4809 /* create a new object */
4810 if (!dev_priv->mm.phys_objs[id - 1]) {
4811 ret = i915_gem_init_phys_object(dev, id,
4812 obj->base.size, align);
4814 DRM_ERROR("failed to init phys object %d size: %zu\n",
4815 id, obj->base.size);
4820 /* bind to the object */
4821 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4822 obj->phys_obj->cur_obj = obj;
4824 page_count = obj->base.size / PAGE_SIZE;
4826 for (i = 0; i < page_count; i++) {
4830 page = shmem_read_mapping_page(mapping, i);
4832 return PTR_ERR(page);
4834 src = kmap_atomic(page);
4835 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4836 memcpy(dst, src, PAGE_SIZE);
4839 mark_page_accessed(page);
4840 page_cache_release(page);
4847 i915_gem_phys_pwrite(struct drm_device *dev,
4848 struct drm_i915_gem_object *obj,
4849 struct drm_i915_gem_pwrite *args,
4850 struct drm_file *file_priv)
4852 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4853 char __user *user_data = to_user_ptr(args->data_ptr);
4855 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4856 unsigned long unwritten;
4858 /* The physical object once assigned is fixed for the lifetime
4859 * of the obj, so we can safely drop the lock and continue
4862 mutex_unlock(&dev->struct_mutex);
4863 unwritten = copy_from_user(vaddr, user_data, args->size);
4864 mutex_lock(&dev->struct_mutex);
4869 i915_gem_chipset_flush(dev);
4873 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4875 struct drm_i915_file_private *file_priv = file->driver_priv;
4877 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4879 /* Clean up our request list when the client is going away, so that
4880 * later retire_requests won't dereference our soon-to-be-gone
4883 spin_lock(&file_priv->mm.lock);
4884 while (!list_empty(&file_priv->mm.request_list)) {
4885 struct drm_i915_gem_request *request;
4887 request = list_first_entry(&file_priv->mm.request_list,
4888 struct drm_i915_gem_request,
4890 list_del(&request->client_list);
4891 request->file_priv = NULL;
4893 spin_unlock(&file_priv->mm.lock);
4897 i915_gem_file_idle_work_handler(struct work_struct *work)
4899 struct drm_i915_file_private *file_priv =
4900 container_of(work, typeof(*file_priv), mm.idle_work.work);
4902 atomic_set(&file_priv->rps_wait_boost, false);
4905 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4907 struct drm_i915_file_private *file_priv;
4910 DRM_DEBUG_DRIVER("\n");
4912 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4916 file->driver_priv = file_priv;
4917 file_priv->dev_priv = dev->dev_private;
4918 file_priv->file = file;
4920 spin_lock_init(&file_priv->mm.lock);
4921 INIT_LIST_HEAD(&file_priv->mm.request_list);
4922 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4923 i915_gem_file_idle_work_handler);
4925 ret = i915_gem_context_open(dev, file);
4932 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4934 if (!mutex_is_locked(mutex))
4937 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4938 return mutex->owner == task;
4940 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4945 static unsigned long
4946 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4948 struct drm_i915_private *dev_priv =
4949 container_of(shrinker,
4950 struct drm_i915_private,
4951 mm.inactive_shrinker);
4952 struct drm_device *dev = dev_priv->dev;
4953 struct drm_i915_gem_object *obj;
4955 unsigned long count;
4957 if (!mutex_trylock(&dev->struct_mutex)) {
4958 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4961 if (dev_priv->mm.shrinker_no_lock_stealing)
4968 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4969 if (obj->pages_pin_count == 0)
4970 count += obj->base.size >> PAGE_SHIFT;
4972 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4976 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
4977 count += obj->base.size >> PAGE_SHIFT;
4981 mutex_unlock(&dev->struct_mutex);
4986 /* All the new VM stuff */
4987 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4988 struct i915_address_space *vm)
4990 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4991 struct i915_vma *vma;
4993 if (!dev_priv->mm.aliasing_ppgtt ||
4994 vm == &dev_priv->mm.aliasing_ppgtt->base)
4995 vm = &dev_priv->gtt.base;
4997 BUG_ON(list_empty(&o->vma_list));
4998 list_for_each_entry(vma, &o->vma_list, vma_link) {
5000 return vma->node.start;
5006 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5007 struct i915_address_space *vm)
5009 struct i915_vma *vma;
5011 list_for_each_entry(vma, &o->vma_list, vma_link)
5012 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5018 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5020 struct i915_vma *vma;
5022 list_for_each_entry(vma, &o->vma_list, vma_link)
5023 if (drm_mm_node_allocated(&vma->node))
5029 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5030 struct i915_address_space *vm)
5032 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5033 struct i915_vma *vma;
5035 if (!dev_priv->mm.aliasing_ppgtt ||
5036 vm == &dev_priv->mm.aliasing_ppgtt->base)
5037 vm = &dev_priv->gtt.base;
5039 BUG_ON(list_empty(&o->vma_list));
5041 list_for_each_entry(vma, &o->vma_list, vma_link)
5043 return vma->node.size;
5048 static unsigned long
5049 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5051 struct drm_i915_private *dev_priv =
5052 container_of(shrinker,
5053 struct drm_i915_private,
5054 mm.inactive_shrinker);
5055 struct drm_device *dev = dev_priv->dev;
5056 unsigned long freed;
5059 if (!mutex_trylock(&dev->struct_mutex)) {
5060 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5063 if (dev_priv->mm.shrinker_no_lock_stealing)
5069 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5070 if (freed < sc->nr_to_scan)
5071 freed += __i915_gem_shrink(dev_priv,
5072 sc->nr_to_scan - freed,
5074 if (freed < sc->nr_to_scan)
5075 freed += i915_gem_shrink_all(dev_priv);
5078 mutex_unlock(&dev->struct_mutex);
5083 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5085 struct i915_vma *vma;
5087 if (WARN_ON(list_empty(&obj->vma_list)))
5090 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5091 if (vma->vm != obj_to_ggtt(obj))