2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
51 struct drm_i915_gem_pwrite *args,
52 struct drm_file *file);
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
60 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
61 struct shrink_control *sc);
62 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
63 struct shrink_control *sc);
64 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
67 static bool cpu_cache_is_coherent(struct drm_device *dev,
68 enum i915_cache_level level)
70 return HAS_LLC(dev) || level != I915_CACHE_NONE;
73 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
75 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
78 return obj->pin_display;
81 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
84 i915_gem_release_mmap(obj);
86 /* As we do not have an associated fence register, we will force
87 * a tiling change if we ever need to acquire one.
89 obj->fence_dirty = false;
90 obj->fence_reg = I915_FENCE_REG_NONE;
93 /* some bookkeeping */
94 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
97 spin_lock(&dev_priv->mm.object_stat_lock);
98 dev_priv->mm.object_count++;
99 dev_priv->mm.object_memory += size;
100 spin_unlock(&dev_priv->mm.object_stat_lock);
103 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
106 spin_lock(&dev_priv->mm.object_stat_lock);
107 dev_priv->mm.object_count--;
108 dev_priv->mm.object_memory -= size;
109 spin_unlock(&dev_priv->mm.object_stat_lock);
113 i915_gem_wait_for_error(struct i915_gpu_error *error)
117 #define EXIT_COND (!i915_reset_in_progress(error) || \
118 i915_terminally_wedged(error))
123 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
124 * userspace. If it takes that long something really bad is going on and
125 * we should simply try to bail out and fail as gracefully as possible.
127 ret = wait_event_interruptible_timeout(error->reset_queue,
131 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
133 } else if (ret < 0) {
141 int i915_mutex_lock_interruptible(struct drm_device *dev)
143 struct drm_i915_private *dev_priv = dev->dev_private;
146 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
150 ret = mutex_lock_interruptible(&dev->struct_mutex);
154 WARN_ON(i915_verify_lists(dev));
159 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
161 return i915_gem_obj_bound_any(obj) && !obj->active;
165 i915_gem_init_ioctl(struct drm_device *dev, void *data,
166 struct drm_file *file)
168 struct drm_i915_private *dev_priv = dev->dev_private;
169 struct drm_i915_gem_init *args = data;
171 if (drm_core_check_feature(dev, DRIVER_MODESET))
174 if (args->gtt_start >= args->gtt_end ||
175 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
178 /* GEM with user mode setting was never supported on ilk and later. */
179 if (INTEL_INFO(dev)->gen >= 5)
182 mutex_lock(&dev->struct_mutex);
183 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
185 dev_priv->gtt.mappable_end = args->gtt_end;
186 mutex_unlock(&dev->struct_mutex);
192 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
193 struct drm_file *file)
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 struct drm_i915_gem_get_aperture *args = data;
197 struct drm_i915_gem_object *obj;
201 mutex_lock(&dev->struct_mutex);
202 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
203 if (i915_gem_obj_is_pinned(obj))
204 pinned += i915_gem_obj_ggtt_size(obj);
205 mutex_unlock(&dev->struct_mutex);
207 args->aper_size = dev_priv->gtt.base.total;
208 args->aper_available_size = args->aper_size - pinned;
213 void *i915_gem_object_alloc(struct drm_device *dev)
215 struct drm_i915_private *dev_priv = dev->dev_private;
216 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
219 void i915_gem_object_free(struct drm_i915_gem_object *obj)
221 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
222 kmem_cache_free(dev_priv->slab, obj);
226 i915_gem_create(struct drm_file *file,
227 struct drm_device *dev,
231 struct drm_i915_gem_object *obj;
235 size = roundup(size, PAGE_SIZE);
239 /* Allocate the new object */
240 obj = i915_gem_alloc_object(dev, size);
244 ret = drm_gem_handle_create(file, &obj->base, &handle);
245 /* drop reference from allocate - handle holds it now */
246 drm_gem_object_unreference_unlocked(&obj->base);
255 i915_gem_dumb_create(struct drm_file *file,
256 struct drm_device *dev,
257 struct drm_mode_create_dumb *args)
259 /* have to work out size/pitch and return them */
260 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
261 args->size = args->pitch * args->height;
262 return i915_gem_create(file, dev,
263 args->size, &args->handle);
267 * Creates a new mm object and returns a handle to it.
270 i915_gem_create_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *file)
273 struct drm_i915_gem_create *args = data;
275 return i915_gem_create(file, dev,
276 args->size, &args->handle);
280 __copy_to_user_swizzled(char __user *cpu_vaddr,
281 const char *gpu_vaddr, int gpu_offset,
284 int ret, cpu_offset = 0;
287 int cacheline_end = ALIGN(gpu_offset + 1, 64);
288 int this_length = min(cacheline_end - gpu_offset, length);
289 int swizzled_gpu_offset = gpu_offset ^ 64;
291 ret = __copy_to_user(cpu_vaddr + cpu_offset,
292 gpu_vaddr + swizzled_gpu_offset,
297 cpu_offset += this_length;
298 gpu_offset += this_length;
299 length -= this_length;
306 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
307 const char __user *cpu_vaddr,
310 int ret, cpu_offset = 0;
313 int cacheline_end = ALIGN(gpu_offset + 1, 64);
314 int this_length = min(cacheline_end - gpu_offset, length);
315 int swizzled_gpu_offset = gpu_offset ^ 64;
317 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
318 cpu_vaddr + cpu_offset,
323 cpu_offset += this_length;
324 gpu_offset += this_length;
325 length -= this_length;
332 * Pins the specified object's pages and synchronizes the object with
333 * GPU accesses. Sets needs_clflush to non-zero if the caller should
334 * flush the object from the CPU cache.
336 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
346 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
347 /* If we're not in the cpu read domain, set ourself into the gtt
348 * read domain and manually flush cachelines (if required). This
349 * optimizes for the case when the gpu will dirty the data
350 * anyway again before the next pread happens. */
351 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
353 ret = i915_gem_object_wait_rendering(obj, true);
357 i915_gem_object_retire(obj);
360 ret = i915_gem_object_get_pages(obj);
364 i915_gem_object_pin_pages(obj);
369 /* Per-page copy function for the shmem pread fastpath.
370 * Flushes invalid cachelines before reading the target if
371 * needs_clflush is set. */
373 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
374 char __user *user_data,
375 bool page_do_bit17_swizzling, bool needs_clflush)
380 if (unlikely(page_do_bit17_swizzling))
383 vaddr = kmap_atomic(page);
385 drm_clflush_virt_range(vaddr + shmem_page_offset,
387 ret = __copy_to_user_inatomic(user_data,
388 vaddr + shmem_page_offset,
390 kunmap_atomic(vaddr);
392 return ret ? -EFAULT : 0;
396 shmem_clflush_swizzled_range(char *addr, unsigned long length,
399 if (unlikely(swizzled)) {
400 unsigned long start = (unsigned long) addr;
401 unsigned long end = (unsigned long) addr + length;
403 /* For swizzling simply ensure that we always flush both
404 * channels. Lame, but simple and it works. Swizzled
405 * pwrite/pread is far from a hotpath - current userspace
406 * doesn't use it at all. */
407 start = round_down(start, 128);
408 end = round_up(end, 128);
410 drm_clflush_virt_range((void *)start, end - start);
412 drm_clflush_virt_range(addr, length);
417 /* Only difference to the fast-path function is that this can handle bit17
418 * and uses non-atomic copy and kmap functions. */
420 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
421 char __user *user_data,
422 bool page_do_bit17_swizzling, bool needs_clflush)
429 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
431 page_do_bit17_swizzling);
433 if (page_do_bit17_swizzling)
434 ret = __copy_to_user_swizzled(user_data,
435 vaddr, shmem_page_offset,
438 ret = __copy_to_user(user_data,
439 vaddr + shmem_page_offset,
443 return ret ? - EFAULT : 0;
447 i915_gem_shmem_pread(struct drm_device *dev,
448 struct drm_i915_gem_object *obj,
449 struct drm_i915_gem_pread *args,
450 struct drm_file *file)
452 char __user *user_data;
455 int shmem_page_offset, page_length, ret = 0;
456 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
458 int needs_clflush = 0;
459 struct sg_page_iter sg_iter;
461 user_data = to_user_ptr(args->data_ptr);
464 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
466 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
470 offset = args->offset;
472 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
473 offset >> PAGE_SHIFT) {
474 struct page *page = sg_page_iter_page(&sg_iter);
479 /* Operation in this page
481 * shmem_page_offset = offset within page in shmem file
482 * page_length = bytes to copy for this page
484 shmem_page_offset = offset_in_page(offset);
485 page_length = remain;
486 if ((shmem_page_offset + page_length) > PAGE_SIZE)
487 page_length = PAGE_SIZE - shmem_page_offset;
489 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
490 (page_to_phys(page) & (1 << 17)) != 0;
492 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
493 user_data, page_do_bit17_swizzling,
498 mutex_unlock(&dev->struct_mutex);
500 if (likely(!i915.prefault_disable) && !prefaulted) {
501 ret = fault_in_multipages_writeable(user_data, remain);
502 /* Userspace is tricking us, but we've already clobbered
503 * its pages with the prefault and promised to write the
504 * data up to the first fault. Hence ignore any errors
505 * and just continue. */
510 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
511 user_data, page_do_bit17_swizzling,
514 mutex_lock(&dev->struct_mutex);
520 remain -= page_length;
521 user_data += page_length;
522 offset += page_length;
526 i915_gem_object_unpin_pages(obj);
532 * Reads data from the object referenced by handle.
534 * On error, the contents of *data are undefined.
537 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
538 struct drm_file *file)
540 struct drm_i915_gem_pread *args = data;
541 struct drm_i915_gem_object *obj;
547 if (!access_ok(VERIFY_WRITE,
548 to_user_ptr(args->data_ptr),
552 ret = i915_mutex_lock_interruptible(dev);
556 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
557 if (&obj->base == NULL) {
562 /* Bounds check source. */
563 if (args->offset > obj->base.size ||
564 args->size > obj->base.size - args->offset) {
569 /* prime objects have no backing filp to GEM pread/pwrite
572 if (!obj->base.filp) {
577 trace_i915_gem_object_pread(obj, args->offset, args->size);
579 ret = i915_gem_shmem_pread(dev, obj, args, file);
582 drm_gem_object_unreference(&obj->base);
584 mutex_unlock(&dev->struct_mutex);
588 /* This is the fast write path which cannot handle
589 * page faults in the source data
593 fast_user_write(struct io_mapping *mapping,
594 loff_t page_base, int page_offset,
595 char __user *user_data,
598 void __iomem *vaddr_atomic;
600 unsigned long unwritten;
602 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
603 /* We can use the cpu mem copy function because this is X86. */
604 vaddr = (void __force*)vaddr_atomic + page_offset;
605 unwritten = __copy_from_user_inatomic_nocache(vaddr,
607 io_mapping_unmap_atomic(vaddr_atomic);
612 * This is the fast pwrite path, where we copy the data directly from the
613 * user into the GTT, uncached.
616 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
617 struct drm_i915_gem_object *obj,
618 struct drm_i915_gem_pwrite *args,
619 struct drm_file *file)
621 struct drm_i915_private *dev_priv = dev->dev_private;
623 loff_t offset, page_base;
624 char __user *user_data;
625 int page_offset, page_length, ret;
627 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
631 ret = i915_gem_object_set_to_gtt_domain(obj, true);
635 ret = i915_gem_object_put_fence(obj);
639 user_data = to_user_ptr(args->data_ptr);
642 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
645 /* Operation in this page
647 * page_base = page offset within aperture
648 * page_offset = offset within page
649 * page_length = bytes to copy for this page
651 page_base = offset & PAGE_MASK;
652 page_offset = offset_in_page(offset);
653 page_length = remain;
654 if ((page_offset + remain) > PAGE_SIZE)
655 page_length = PAGE_SIZE - page_offset;
657 /* If we get a fault while copying data, then (presumably) our
658 * source page isn't available. Return the error and we'll
659 * retry in the slow path.
661 if (fast_user_write(dev_priv->gtt.mappable, page_base,
662 page_offset, user_data, page_length)) {
667 remain -= page_length;
668 user_data += page_length;
669 offset += page_length;
673 i915_gem_object_ggtt_unpin(obj);
678 /* Per-page copy function for the shmem pwrite fastpath.
679 * Flushes invalid cachelines before writing to the target if
680 * needs_clflush_before is set and flushes out any written cachelines after
681 * writing if needs_clflush is set. */
683 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
684 char __user *user_data,
685 bool page_do_bit17_swizzling,
686 bool needs_clflush_before,
687 bool needs_clflush_after)
692 if (unlikely(page_do_bit17_swizzling))
695 vaddr = kmap_atomic(page);
696 if (needs_clflush_before)
697 drm_clflush_virt_range(vaddr + shmem_page_offset,
699 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
700 user_data, page_length);
701 if (needs_clflush_after)
702 drm_clflush_virt_range(vaddr + shmem_page_offset,
704 kunmap_atomic(vaddr);
706 return ret ? -EFAULT : 0;
709 /* Only difference to the fast-path function is that this can handle bit17
710 * and uses non-atomic copy and kmap functions. */
712 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
713 char __user *user_data,
714 bool page_do_bit17_swizzling,
715 bool needs_clflush_before,
716 bool needs_clflush_after)
722 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
723 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
725 page_do_bit17_swizzling);
726 if (page_do_bit17_swizzling)
727 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
731 ret = __copy_from_user(vaddr + shmem_page_offset,
734 if (needs_clflush_after)
735 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
737 page_do_bit17_swizzling);
740 return ret ? -EFAULT : 0;
744 i915_gem_shmem_pwrite(struct drm_device *dev,
745 struct drm_i915_gem_object *obj,
746 struct drm_i915_gem_pwrite *args,
747 struct drm_file *file)
751 char __user *user_data;
752 int shmem_page_offset, page_length, ret = 0;
753 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
754 int hit_slowpath = 0;
755 int needs_clflush_after = 0;
756 int needs_clflush_before = 0;
757 struct sg_page_iter sg_iter;
759 user_data = to_user_ptr(args->data_ptr);
762 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
764 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
765 /* If we're not in the cpu write domain, set ourself into the gtt
766 * write domain and manually flush cachelines (if required). This
767 * optimizes for the case when the gpu will use the data
768 * right away and we therefore have to clflush anyway. */
769 needs_clflush_after = cpu_write_needs_clflush(obj);
770 ret = i915_gem_object_wait_rendering(obj, false);
774 i915_gem_object_retire(obj);
776 /* Same trick applies to invalidate partially written cachelines read
778 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
779 needs_clflush_before =
780 !cpu_cache_is_coherent(dev, obj->cache_level);
782 ret = i915_gem_object_get_pages(obj);
786 i915_gem_object_pin_pages(obj);
788 offset = args->offset;
791 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
792 offset >> PAGE_SHIFT) {
793 struct page *page = sg_page_iter_page(&sg_iter);
794 int partial_cacheline_write;
799 /* Operation in this page
801 * shmem_page_offset = offset within page in shmem file
802 * page_length = bytes to copy for this page
804 shmem_page_offset = offset_in_page(offset);
806 page_length = remain;
807 if ((shmem_page_offset + page_length) > PAGE_SIZE)
808 page_length = PAGE_SIZE - shmem_page_offset;
810 /* If we don't overwrite a cacheline completely we need to be
811 * careful to have up-to-date data by first clflushing. Don't
812 * overcomplicate things and flush the entire patch. */
813 partial_cacheline_write = needs_clflush_before &&
814 ((shmem_page_offset | page_length)
815 & (boot_cpu_data.x86_clflush_size - 1));
817 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
818 (page_to_phys(page) & (1 << 17)) != 0;
820 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
821 user_data, page_do_bit17_swizzling,
822 partial_cacheline_write,
823 needs_clflush_after);
828 mutex_unlock(&dev->struct_mutex);
829 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
830 user_data, page_do_bit17_swizzling,
831 partial_cacheline_write,
832 needs_clflush_after);
834 mutex_lock(&dev->struct_mutex);
840 remain -= page_length;
841 user_data += page_length;
842 offset += page_length;
846 i915_gem_object_unpin_pages(obj);
850 * Fixup: Flush cpu caches in case we didn't flush the dirty
851 * cachelines in-line while writing and the object moved
852 * out of the cpu write domain while we've dropped the lock.
854 if (!needs_clflush_after &&
855 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
856 if (i915_gem_clflush_object(obj, obj->pin_display))
857 i915_gem_chipset_flush(dev);
861 if (needs_clflush_after)
862 i915_gem_chipset_flush(dev);
868 * Writes data to the object referenced by handle.
870 * On error, the contents of the buffer that were to be modified are undefined.
873 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
874 struct drm_file *file)
876 struct drm_i915_gem_pwrite *args = data;
877 struct drm_i915_gem_object *obj;
883 if (!access_ok(VERIFY_READ,
884 to_user_ptr(args->data_ptr),
888 if (likely(!i915.prefault_disable)) {
889 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
895 ret = i915_mutex_lock_interruptible(dev);
899 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
900 if (&obj->base == NULL) {
905 /* Bounds check destination. */
906 if (args->offset > obj->base.size ||
907 args->size > obj->base.size - args->offset) {
912 /* prime objects have no backing filp to GEM pread/pwrite
915 if (!obj->base.filp) {
920 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
923 /* We can only do the GTT pwrite on untiled buffers, as otherwise
924 * it would end up going through the fenced access, and we'll get
925 * different detiling behavior between reading and writing.
926 * pread/pwrite currently are reading and writing from the CPU
927 * perspective, requiring manual detiling by the client.
930 ret = i915_gem_phys_pwrite(dev, obj, args, file);
934 if (obj->tiling_mode == I915_TILING_NONE &&
935 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
936 cpu_write_needs_clflush(obj)) {
937 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
938 /* Note that the gtt paths might fail with non-page-backed user
939 * pointers (e.g. gtt mappings when moving data between
940 * textures). Fallback to the shmem path in that case. */
943 if (ret == -EFAULT || ret == -ENOSPC)
944 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
947 drm_gem_object_unreference(&obj->base);
949 mutex_unlock(&dev->struct_mutex);
954 i915_gem_check_wedge(struct i915_gpu_error *error,
957 if (i915_reset_in_progress(error)) {
958 /* Non-interruptible callers can't handle -EAGAIN, hence return
959 * -EIO unconditionally for these. */
963 /* Recovery complete, but the reset failed ... */
964 if (i915_terminally_wedged(error))
974 * Compare seqno against outstanding lazy request. Emit a request if they are
978 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
982 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
985 if (seqno == ring->outstanding_lazy_seqno)
986 ret = i915_add_request(ring, NULL);
991 static void fake_irq(unsigned long data)
993 wake_up_process((struct task_struct *)data);
996 static bool missed_irq(struct drm_i915_private *dev_priv,
997 struct intel_ring_buffer *ring)
999 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1002 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1004 if (file_priv == NULL)
1007 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1011 * __wait_seqno - wait until execution of seqno has finished
1012 * @ring: the ring expected to report seqno
1014 * @reset_counter: reset sequence associated with the given seqno
1015 * @interruptible: do an interruptible wait (normally yes)
1016 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1018 * Note: It is of utmost importance that the passed in seqno and reset_counter
1019 * values have been read by the caller in an smp safe manner. Where read-side
1020 * locks are involved, it is sufficient to read the reset_counter before
1021 * unlocking the lock that protects the seqno. For lockless tricks, the
1022 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1025 * Returns 0 if the seqno was found within the alloted time. Else returns the
1026 * errno with remaining time filled in timeout argument.
1028 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1029 unsigned reset_counter,
1031 struct timespec *timeout,
1032 struct drm_i915_file_private *file_priv)
1034 struct drm_device *dev = ring->dev;
1035 struct drm_i915_private *dev_priv = dev->dev_private;
1036 const bool irq_test_in_progress =
1037 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1038 struct timespec before, now;
1040 unsigned long timeout_expire;
1043 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1045 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1048 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1050 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1051 gen6_rps_boost(dev_priv);
1053 mod_delayed_work(dev_priv->wq,
1054 &file_priv->mm.idle_work,
1055 msecs_to_jiffies(100));
1058 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1061 /* Record current time in case interrupted by signal, or wedged */
1062 trace_i915_gem_request_wait_begin(ring, seqno);
1063 getrawmonotonic(&before);
1065 struct timer_list timer;
1067 prepare_to_wait(&ring->irq_queue, &wait,
1068 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1070 /* We need to check whether any gpu reset happened in between
1071 * the caller grabbing the seqno and now ... */
1072 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1073 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1074 * is truely gone. */
1075 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1081 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1086 if (interruptible && signal_pending(current)) {
1091 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1096 timer.function = NULL;
1097 if (timeout || missed_irq(dev_priv, ring)) {
1098 unsigned long expire;
1100 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1101 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1102 mod_timer(&timer, expire);
1107 if (timer.function) {
1108 del_singleshot_timer_sync(&timer);
1109 destroy_timer_on_stack(&timer);
1112 getrawmonotonic(&now);
1113 trace_i915_gem_request_wait_end(ring, seqno);
1115 if (!irq_test_in_progress)
1116 ring->irq_put(ring);
1118 finish_wait(&ring->irq_queue, &wait);
1121 struct timespec sleep_time = timespec_sub(now, before);
1122 *timeout = timespec_sub(*timeout, sleep_time);
1123 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1124 set_normalized_timespec(timeout, 0, 0);
1131 * Waits for a sequence number to be signaled, and cleans up the
1132 * request and object lists appropriately for that event.
1135 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1137 struct drm_device *dev = ring->dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139 bool interruptible = dev_priv->mm.interruptible;
1142 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1145 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1149 ret = i915_gem_check_olr(ring, seqno);
1153 return __wait_seqno(ring, seqno,
1154 atomic_read(&dev_priv->gpu_error.reset_counter),
1155 interruptible, NULL, NULL);
1159 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1160 struct intel_ring_buffer *ring)
1165 /* Manually manage the write flush as we may have not yet
1166 * retired the buffer.
1168 * Note that the last_write_seqno is always the earlier of
1169 * the two (read/write) seqno, so if we haved successfully waited,
1170 * we know we have passed the last write.
1172 obj->last_write_seqno = 0;
1178 * Ensures that all rendering to the object has completed and the object is
1179 * safe to unbind from the GTT or access from the CPU.
1181 static __must_check int
1182 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1185 struct intel_ring_buffer *ring = obj->ring;
1189 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1193 ret = i915_wait_seqno(ring, seqno);
1197 return i915_gem_object_wait_rendering__tail(obj, ring);
1200 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1201 * as the object state may change during this call.
1203 static __must_check int
1204 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1205 struct drm_i915_file_private *file_priv,
1208 struct drm_device *dev = obj->base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210 struct intel_ring_buffer *ring = obj->ring;
1211 unsigned reset_counter;
1215 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1216 BUG_ON(!dev_priv->mm.interruptible);
1218 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1222 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1226 ret = i915_gem_check_olr(ring, seqno);
1230 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1231 mutex_unlock(&dev->struct_mutex);
1232 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1233 mutex_lock(&dev->struct_mutex);
1237 return i915_gem_object_wait_rendering__tail(obj, ring);
1241 * Called when user space prepares to use an object with the CPU, either
1242 * through the mmap ioctl's mapping or a GTT mapping.
1245 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1246 struct drm_file *file)
1248 struct drm_i915_gem_set_domain *args = data;
1249 struct drm_i915_gem_object *obj;
1250 uint32_t read_domains = args->read_domains;
1251 uint32_t write_domain = args->write_domain;
1254 /* Only handle setting domains to types used by the CPU. */
1255 if (write_domain & I915_GEM_GPU_DOMAINS)
1258 if (read_domains & I915_GEM_GPU_DOMAINS)
1261 /* Having something in the write domain implies it's in the read
1262 * domain, and only that read domain. Enforce that in the request.
1264 if (write_domain != 0 && read_domains != write_domain)
1267 ret = i915_mutex_lock_interruptible(dev);
1271 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1272 if (&obj->base == NULL) {
1277 /* Try to flush the object off the GPU without holding the lock.
1278 * We will repeat the flush holding the lock in the normal manner
1279 * to catch cases where we are gazumped.
1281 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1287 if (read_domains & I915_GEM_DOMAIN_GTT) {
1288 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1290 /* Silently promote "you're not bound, there was nothing to do"
1291 * to success, since the client was just asking us to
1292 * make sure everything was done.
1297 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1301 drm_gem_object_unreference(&obj->base);
1303 mutex_unlock(&dev->struct_mutex);
1308 * Called when user space has done writes to this buffer
1311 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1312 struct drm_file *file)
1314 struct drm_i915_gem_sw_finish *args = data;
1315 struct drm_i915_gem_object *obj;
1318 ret = i915_mutex_lock_interruptible(dev);
1322 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1323 if (&obj->base == NULL) {
1328 /* Pinned buffers may be scanout, so flush the cache */
1329 if (obj->pin_display)
1330 i915_gem_object_flush_cpu_write_domain(obj, true);
1332 drm_gem_object_unreference(&obj->base);
1334 mutex_unlock(&dev->struct_mutex);
1339 * Maps the contents of an object, returning the address it is mapped
1342 * While the mapping holds a reference on the contents of the object, it doesn't
1343 * imply a ref on the object itself.
1346 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1347 struct drm_file *file)
1349 struct drm_i915_gem_mmap *args = data;
1350 struct drm_gem_object *obj;
1353 obj = drm_gem_object_lookup(dev, file, args->handle);
1357 /* prime objects have no backing filp to GEM mmap
1361 drm_gem_object_unreference_unlocked(obj);
1365 addr = vm_mmap(obj->filp, 0, args->size,
1366 PROT_READ | PROT_WRITE, MAP_SHARED,
1368 drm_gem_object_unreference_unlocked(obj);
1369 if (IS_ERR((void *)addr))
1372 args->addr_ptr = (uint64_t) addr;
1378 * i915_gem_fault - fault a page into the GTT
1379 * vma: VMA in question
1382 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1383 * from userspace. The fault handler takes care of binding the object to
1384 * the GTT (if needed), allocating and programming a fence register (again,
1385 * only if needed based on whether the old reg is still valid or the object
1386 * is tiled) and inserting a new PTE into the faulting process.
1388 * Note that the faulting process may involve evicting existing objects
1389 * from the GTT and/or fence registers to make room. So performance may
1390 * suffer if the GTT working set is large or there are few fence registers
1393 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1395 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1396 struct drm_device *dev = obj->base.dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 pgoff_t page_offset;
1401 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1403 intel_runtime_pm_get(dev_priv);
1405 /* We don't use vmf->pgoff since that has the fake offset */
1406 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1409 ret = i915_mutex_lock_interruptible(dev);
1413 trace_i915_gem_object_fault(obj, page_offset, true, write);
1415 /* Try to flush the object off the GPU first without holding the lock.
1416 * Upon reacquiring the lock, we will perform our sanity checks and then
1417 * repeat the flush holding the lock in the normal manner to catch cases
1418 * where we are gazumped.
1420 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1424 /* Access to snoopable pages through the GTT is incoherent. */
1425 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1430 /* Now bind it into the GTT if needed */
1431 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1435 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1439 ret = i915_gem_object_get_fence(obj);
1443 obj->fault_mappable = true;
1445 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1449 /* Finally, remap it using the new GTT offset */
1450 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1452 i915_gem_object_ggtt_unpin(obj);
1454 mutex_unlock(&dev->struct_mutex);
1458 /* If this -EIO is due to a gpu hang, give the reset code a
1459 * chance to clean up the mess. Otherwise return the proper
1461 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1462 ret = VM_FAULT_SIGBUS;
1467 * EAGAIN means the gpu is hung and we'll wait for the error
1468 * handler to reset everything when re-faulting in
1469 * i915_mutex_lock_interruptible.
1476 * EBUSY is ok: this just means that another thread
1477 * already did the job.
1479 ret = VM_FAULT_NOPAGE;
1486 ret = VM_FAULT_SIGBUS;
1489 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1490 ret = VM_FAULT_SIGBUS;
1494 intel_runtime_pm_put(dev_priv);
1498 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1500 struct i915_vma *vma;
1503 * Only the global gtt is relevant for gtt memory mappings, so restrict
1504 * list traversal to objects bound into the global address space. Note
1505 * that the active list should be empty, but better safe than sorry.
1507 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1508 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1509 i915_gem_release_mmap(vma->obj);
1510 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1511 i915_gem_release_mmap(vma->obj);
1515 * i915_gem_release_mmap - remove physical page mappings
1516 * @obj: obj in question
1518 * Preserve the reservation of the mmapping with the DRM core code, but
1519 * relinquish ownership of the pages back to the system.
1521 * It is vital that we remove the page mapping if we have mapped a tiled
1522 * object through the GTT and then lose the fence register due to
1523 * resource pressure. Similarly if the object has been moved out of the
1524 * aperture, than pages mapped into userspace must be revoked. Removing the
1525 * mapping will then trigger a page fault on the next user access, allowing
1526 * fixup by i915_gem_fault().
1529 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1531 if (!obj->fault_mappable)
1534 drm_vma_node_unmap(&obj->base.vma_node,
1535 obj->base.dev->anon_inode->i_mapping);
1536 obj->fault_mappable = false;
1540 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1544 if (INTEL_INFO(dev)->gen >= 4 ||
1545 tiling_mode == I915_TILING_NONE)
1548 /* Previous chips need a power-of-two fence region when tiling */
1549 if (INTEL_INFO(dev)->gen == 3)
1550 gtt_size = 1024*1024;
1552 gtt_size = 512*1024;
1554 while (gtt_size < size)
1561 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1562 * @obj: object to check
1564 * Return the required GTT alignment for an object, taking into account
1565 * potential fence register mapping.
1568 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1569 int tiling_mode, bool fenced)
1572 * Minimum alignment is 4k (GTT page size), but might be greater
1573 * if a fence register is needed for the object.
1575 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1576 tiling_mode == I915_TILING_NONE)
1580 * Previous chips need to be aligned to the size of the smallest
1581 * fence register that can contain the object.
1583 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1586 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1588 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1591 if (drm_vma_node_has_offset(&obj->base.vma_node))
1594 dev_priv->mm.shrinker_no_lock_stealing = true;
1596 ret = drm_gem_create_mmap_offset(&obj->base);
1600 /* Badly fragmented mmap space? The only way we can recover
1601 * space is by destroying unwanted objects. We can't randomly release
1602 * mmap_offsets as userspace expects them to be persistent for the
1603 * lifetime of the objects. The closest we can is to release the
1604 * offsets on purgeable objects by truncating it and marking it purged,
1605 * which prevents userspace from ever using that object again.
1607 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1608 ret = drm_gem_create_mmap_offset(&obj->base);
1612 i915_gem_shrink_all(dev_priv);
1613 ret = drm_gem_create_mmap_offset(&obj->base);
1615 dev_priv->mm.shrinker_no_lock_stealing = false;
1620 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1622 drm_gem_free_mmap_offset(&obj->base);
1626 i915_gem_mmap_gtt(struct drm_file *file,
1627 struct drm_device *dev,
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 struct drm_i915_gem_object *obj;
1635 ret = i915_mutex_lock_interruptible(dev);
1639 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1640 if (&obj->base == NULL) {
1645 if (obj->base.size > dev_priv->gtt.mappable_end) {
1650 if (obj->madv != I915_MADV_WILLNEED) {
1651 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1656 ret = i915_gem_object_create_mmap_offset(obj);
1660 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1663 drm_gem_object_unreference(&obj->base);
1665 mutex_unlock(&dev->struct_mutex);
1670 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1672 * @data: GTT mapping ioctl data
1673 * @file: GEM object info
1675 * Simply returns the fake offset to userspace so it can mmap it.
1676 * The mmap call will end up in drm_gem_mmap(), which will set things
1677 * up so we can get faults in the handler above.
1679 * The fault handler will take care of binding the object into the GTT
1680 * (since it may have been evicted to make room for something), allocating
1681 * a fence register, and mapping the appropriate aperture address into
1685 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1686 struct drm_file *file)
1688 struct drm_i915_gem_mmap_gtt *args = data;
1690 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1694 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1696 return obj->madv == I915_MADV_DONTNEED;
1699 /* Immediately discard the backing storage */
1701 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1703 i915_gem_object_free_mmap_offset(obj);
1705 if (obj->base.filp == NULL)
1708 /* Our goal here is to return as much of the memory as
1709 * is possible back to the system as we are called from OOM.
1710 * To do this we must instruct the shmfs to drop all of its
1711 * backing pages, *now*.
1713 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1714 obj->madv = __I915_MADV_PURGED;
1717 /* Try to discard unwanted pages */
1719 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1721 struct address_space *mapping;
1723 switch (obj->madv) {
1724 case I915_MADV_DONTNEED:
1725 i915_gem_object_truncate(obj);
1726 case __I915_MADV_PURGED:
1730 if (obj->base.filp == NULL)
1733 mapping = file_inode(obj->base.filp)->i_mapping,
1734 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1738 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1740 struct sg_page_iter sg_iter;
1743 BUG_ON(obj->madv == __I915_MADV_PURGED);
1745 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1747 /* In the event of a disaster, abandon all caches and
1748 * hope for the best.
1750 WARN_ON(ret != -EIO);
1751 i915_gem_clflush_object(obj, true);
1752 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1755 if (i915_gem_object_needs_bit17_swizzle(obj))
1756 i915_gem_object_save_bit_17_swizzle(obj);
1758 if (obj->madv == I915_MADV_DONTNEED)
1761 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1762 struct page *page = sg_page_iter_page(&sg_iter);
1765 set_page_dirty(page);
1767 if (obj->madv == I915_MADV_WILLNEED)
1768 mark_page_accessed(page);
1770 page_cache_release(page);
1774 sg_free_table(obj->pages);
1779 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1781 const struct drm_i915_gem_object_ops *ops = obj->ops;
1783 if (obj->pages == NULL)
1786 if (obj->pages_pin_count)
1789 BUG_ON(i915_gem_obj_bound_any(obj));
1791 /* ->put_pages might need to allocate memory for the bit17 swizzle
1792 * array, hence protect them from being reaped by removing them from gtt
1794 list_del(&obj->global_list);
1796 ops->put_pages(obj);
1799 i915_gem_object_invalidate(obj);
1804 static unsigned long
1805 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1806 bool purgeable_only)
1808 struct list_head still_in_list;
1809 struct drm_i915_gem_object *obj;
1810 unsigned long count = 0;
1813 * As we may completely rewrite the (un)bound list whilst unbinding
1814 * (due to retiring requests) we have to strictly process only
1815 * one element of the list at the time, and recheck the list
1816 * on every iteration.
1818 * In particular, we must hold a reference whilst removing the
1819 * object as we may end up waiting for and/or retiring the objects.
1820 * This might release the final reference (held by the active list)
1821 * and result in the object being freed from under us. This is
1822 * similar to the precautions the eviction code must take whilst
1825 * Also note that although these lists do not hold a reference to
1826 * the object we can safely grab one here: The final object
1827 * unreferencing and the bound_list are both protected by the
1828 * dev->struct_mutex and so we won't ever be able to observe an
1829 * object on the bound_list with a reference count equals 0.
1831 INIT_LIST_HEAD(&still_in_list);
1832 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1833 obj = list_first_entry(&dev_priv->mm.unbound_list,
1834 typeof(*obj), global_list);
1835 list_move_tail(&obj->global_list, &still_in_list);
1837 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1840 drm_gem_object_reference(&obj->base);
1842 if (i915_gem_object_put_pages(obj) == 0)
1843 count += obj->base.size >> PAGE_SHIFT;
1845 drm_gem_object_unreference(&obj->base);
1847 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1849 INIT_LIST_HEAD(&still_in_list);
1850 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1851 struct i915_vma *vma, *v;
1853 obj = list_first_entry(&dev_priv->mm.bound_list,
1854 typeof(*obj), global_list);
1855 list_move_tail(&obj->global_list, &still_in_list);
1857 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1860 drm_gem_object_reference(&obj->base);
1862 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1863 if (i915_vma_unbind(vma))
1866 if (i915_gem_object_put_pages(obj) == 0)
1867 count += obj->base.size >> PAGE_SHIFT;
1869 drm_gem_object_unreference(&obj->base);
1871 list_splice(&still_in_list, &dev_priv->mm.bound_list);
1876 static unsigned long
1877 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1879 return __i915_gem_shrink(dev_priv, target, true);
1882 static unsigned long
1883 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1885 i915_gem_evict_everything(dev_priv->dev);
1886 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
1890 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1892 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1894 struct address_space *mapping;
1895 struct sg_table *st;
1896 struct scatterlist *sg;
1897 struct sg_page_iter sg_iter;
1899 unsigned long last_pfn = 0; /* suppress gcc warning */
1902 /* Assert that the object is not currently in any GPU domain. As it
1903 * wasn't in the GTT, there shouldn't be any way it could have been in
1906 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1907 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1909 st = kmalloc(sizeof(*st), GFP_KERNEL);
1913 page_count = obj->base.size / PAGE_SIZE;
1914 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1919 /* Get the list of pages out of our struct file. They'll be pinned
1920 * at this point until we release them.
1922 * Fail silently without starting the shrinker
1924 mapping = file_inode(obj->base.filp)->i_mapping;
1925 gfp = mapping_gfp_mask(mapping);
1926 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1927 gfp &= ~(__GFP_IO | __GFP_WAIT);
1930 for (i = 0; i < page_count; i++) {
1931 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1933 i915_gem_purge(dev_priv, page_count);
1934 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1937 /* We've tried hard to allocate the memory by reaping
1938 * our own buffer, now let the real VM do its job and
1939 * go down in flames if truly OOM.
1941 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1942 gfp |= __GFP_IO | __GFP_WAIT;
1944 i915_gem_shrink_all(dev_priv);
1945 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1949 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1950 gfp &= ~(__GFP_IO | __GFP_WAIT);
1952 #ifdef CONFIG_SWIOTLB
1953 if (swiotlb_nr_tbl()) {
1955 sg_set_page(sg, page, PAGE_SIZE, 0);
1960 if (!i || page_to_pfn(page) != last_pfn + 1) {
1964 sg_set_page(sg, page, PAGE_SIZE, 0);
1966 sg->length += PAGE_SIZE;
1968 last_pfn = page_to_pfn(page);
1970 /* Check that the i965g/gm workaround works. */
1971 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1973 #ifdef CONFIG_SWIOTLB
1974 if (!swiotlb_nr_tbl())
1979 if (i915_gem_object_needs_bit17_swizzle(obj))
1980 i915_gem_object_do_bit_17_swizzle(obj);
1986 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1987 page_cache_release(sg_page_iter_page(&sg_iter));
1991 /* shmemfs first checks if there is enough memory to allocate the page
1992 * and reports ENOSPC should there be insufficient, along with the usual
1993 * ENOMEM for a genuine allocation failure.
1995 * We use ENOSPC in our driver to mean that we have run out of aperture
1996 * space and so want to translate the error from shmemfs back to our
1997 * usual understanding of ENOMEM.
1999 if (PTR_ERR(page) == -ENOSPC)
2002 return PTR_ERR(page);
2005 /* Ensure that the associated pages are gathered from the backing storage
2006 * and pinned into our object. i915_gem_object_get_pages() may be called
2007 * multiple times before they are released by a single call to
2008 * i915_gem_object_put_pages() - once the pages are no longer referenced
2009 * either as a result of memory pressure (reaping pages under the shrinker)
2010 * or as the object is itself released.
2013 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2015 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2016 const struct drm_i915_gem_object_ops *ops = obj->ops;
2022 if (obj->madv != I915_MADV_WILLNEED) {
2023 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2027 BUG_ON(obj->pages_pin_count);
2029 ret = ops->get_pages(obj);
2033 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2038 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2039 struct intel_ring_buffer *ring)
2041 struct drm_device *dev = obj->base.dev;
2042 struct drm_i915_private *dev_priv = dev->dev_private;
2043 u32 seqno = intel_ring_get_seqno(ring);
2045 BUG_ON(ring == NULL);
2046 if (obj->ring != ring && obj->last_write_seqno) {
2047 /* Keep the seqno relative to the current ring */
2048 obj->last_write_seqno = seqno;
2052 /* Add a reference if we're newly entering the active list. */
2054 drm_gem_object_reference(&obj->base);
2058 list_move_tail(&obj->ring_list, &ring->active_list);
2060 obj->last_read_seqno = seqno;
2062 if (obj->fenced_gpu_access) {
2063 obj->last_fenced_seqno = seqno;
2065 /* Bump MRU to take account of the delayed flush */
2066 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2067 struct drm_i915_fence_reg *reg;
2069 reg = &dev_priv->fence_regs[obj->fence_reg];
2070 list_move_tail(®->lru_list,
2071 &dev_priv->mm.fence_list);
2076 void i915_vma_move_to_active(struct i915_vma *vma,
2077 struct intel_ring_buffer *ring)
2079 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2080 return i915_gem_object_move_to_active(vma->obj, ring);
2084 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2086 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2087 struct i915_address_space *vm;
2088 struct i915_vma *vma;
2090 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2091 BUG_ON(!obj->active);
2093 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2094 vma = i915_gem_obj_to_vma(obj, vm);
2095 if (vma && !list_empty(&vma->mm_list))
2096 list_move_tail(&vma->mm_list, &vm->inactive_list);
2099 list_del_init(&obj->ring_list);
2102 obj->last_read_seqno = 0;
2103 obj->last_write_seqno = 0;
2104 obj->base.write_domain = 0;
2106 obj->last_fenced_seqno = 0;
2107 obj->fenced_gpu_access = false;
2110 drm_gem_object_unreference(&obj->base);
2112 WARN_ON(i915_verify_lists(dev));
2116 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2118 struct intel_ring_buffer *ring = obj->ring;
2123 if (i915_seqno_passed(ring->get_seqno(ring, true),
2124 obj->last_read_seqno))
2125 i915_gem_object_move_to_inactive(obj);
2129 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 struct intel_ring_buffer *ring;
2135 /* Carefully retire all requests without writing to the rings */
2136 for_each_ring(ring, dev_priv, i) {
2137 ret = intel_ring_idle(ring);
2141 i915_gem_retire_requests(dev);
2143 /* Finally reset hw state */
2144 for_each_ring(ring, dev_priv, i) {
2145 intel_ring_init_seqno(ring, seqno);
2147 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2148 ring->semaphore.sync_seqno[j] = 0;
2154 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2162 /* HWS page needs to be set less than what we
2163 * will inject to ring
2165 ret = i915_gem_init_seqno(dev, seqno - 1);
2169 /* Carefully set the last_seqno value so that wrap
2170 * detection still works
2172 dev_priv->next_seqno = seqno;
2173 dev_priv->last_seqno = seqno - 1;
2174 if (dev_priv->last_seqno == 0)
2175 dev_priv->last_seqno--;
2181 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2183 struct drm_i915_private *dev_priv = dev->dev_private;
2185 /* reserve 0 for non-seqno */
2186 if (dev_priv->next_seqno == 0) {
2187 int ret = i915_gem_init_seqno(dev, 0);
2191 dev_priv->next_seqno = 1;
2194 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2198 int __i915_add_request(struct intel_ring_buffer *ring,
2199 struct drm_file *file,
2200 struct drm_i915_gem_object *obj,
2203 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2204 struct drm_i915_gem_request *request;
2205 u32 request_ring_position, request_start;
2208 request_start = intel_ring_get_tail(ring);
2210 * Emit any outstanding flushes - execbuf can fail to emit the flush
2211 * after having emitted the batchbuffer command. Hence we need to fix
2212 * things up similar to emitting the lazy request. The difference here
2213 * is that the flush _must_ happen before the next request, no matter
2216 ret = intel_ring_flush_all_caches(ring);
2220 request = ring->preallocated_lazy_request;
2221 if (WARN_ON(request == NULL))
2224 /* Record the position of the start of the request so that
2225 * should we detect the updated seqno part-way through the
2226 * GPU processing the request, we never over-estimate the
2227 * position of the head.
2229 request_ring_position = intel_ring_get_tail(ring);
2231 ret = ring->add_request(ring);
2235 request->seqno = intel_ring_get_seqno(ring);
2236 request->ring = ring;
2237 request->head = request_start;
2238 request->tail = request_ring_position;
2240 /* Whilst this request exists, batch_obj will be on the
2241 * active_list, and so will hold the active reference. Only when this
2242 * request is retired will the the batch_obj be moved onto the
2243 * inactive_list and lose its active reference. Hence we do not need
2244 * to explicitly hold another reference here.
2246 request->batch_obj = obj;
2248 /* Hold a reference to the current context so that we can inspect
2249 * it later in case a hangcheck error event fires.
2251 request->ctx = ring->last_context;
2253 i915_gem_context_reference(request->ctx);
2255 request->emitted_jiffies = jiffies;
2256 list_add_tail(&request->list, &ring->request_list);
2257 request->file_priv = NULL;
2260 struct drm_i915_file_private *file_priv = file->driver_priv;
2262 spin_lock(&file_priv->mm.lock);
2263 request->file_priv = file_priv;
2264 list_add_tail(&request->client_list,
2265 &file_priv->mm.request_list);
2266 spin_unlock(&file_priv->mm.lock);
2269 trace_i915_gem_request_add(ring, request->seqno);
2270 ring->outstanding_lazy_seqno = 0;
2271 ring->preallocated_lazy_request = NULL;
2273 if (!dev_priv->ums.mm_suspended) {
2274 i915_queue_hangcheck(ring->dev);
2276 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2277 queue_delayed_work(dev_priv->wq,
2278 &dev_priv->mm.retire_work,
2279 round_jiffies_up_relative(HZ));
2280 intel_mark_busy(dev_priv->dev);
2284 *out_seqno = request->seqno;
2289 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2291 struct drm_i915_file_private *file_priv = request->file_priv;
2296 spin_lock(&file_priv->mm.lock);
2297 list_del(&request->client_list);
2298 request->file_priv = NULL;
2299 spin_unlock(&file_priv->mm.lock);
2302 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2303 const struct i915_hw_context *ctx)
2305 unsigned long elapsed;
2307 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2309 if (ctx->hang_stats.banned)
2312 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2313 if (!i915_gem_context_is_default(ctx)) {
2314 DRM_DEBUG("context hanging too fast, banning!\n");
2316 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2317 if (i915_stop_ring_allow_warn(dev_priv))
2318 DRM_ERROR("gpu hanging too fast, banning!\n");
2326 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2327 struct i915_hw_context *ctx,
2330 struct i915_ctx_hang_stats *hs;
2335 hs = &ctx->hang_stats;
2338 hs->banned = i915_context_is_banned(dev_priv, ctx);
2340 hs->guilty_ts = get_seconds();
2342 hs->batch_pending++;
2346 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2348 list_del(&request->list);
2349 i915_gem_request_remove_from_client(request);
2352 i915_gem_context_unreference(request->ctx);
2357 struct drm_i915_gem_request *
2358 i915_gem_find_active_request(struct intel_ring_buffer *ring)
2360 struct drm_i915_gem_request *request;
2361 u32 completed_seqno;
2363 completed_seqno = ring->get_seqno(ring, false);
2365 list_for_each_entry(request, &ring->request_list, list) {
2366 if (i915_seqno_passed(completed_seqno, request->seqno))
2375 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2376 struct intel_ring_buffer *ring)
2378 struct drm_i915_gem_request *request;
2381 request = i915_gem_find_active_request(ring);
2383 if (request == NULL)
2386 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2388 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2390 list_for_each_entry_continue(request, &ring->request_list, list)
2391 i915_set_reset_status(dev_priv, request->ctx, false);
2394 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2395 struct intel_ring_buffer *ring)
2397 while (!list_empty(&ring->active_list)) {
2398 struct drm_i915_gem_object *obj;
2400 obj = list_first_entry(&ring->active_list,
2401 struct drm_i915_gem_object,
2404 i915_gem_object_move_to_inactive(obj);
2408 * We must free the requests after all the corresponding objects have
2409 * been moved off active lists. Which is the same order as the normal
2410 * retire_requests function does. This is important if object hold
2411 * implicit references on things like e.g. ppgtt address spaces through
2414 while (!list_empty(&ring->request_list)) {
2415 struct drm_i915_gem_request *request;
2417 request = list_first_entry(&ring->request_list,
2418 struct drm_i915_gem_request,
2421 i915_gem_free_request(request);
2424 /* These may not have been flush before the reset, do so now */
2425 kfree(ring->preallocated_lazy_request);
2426 ring->preallocated_lazy_request = NULL;
2427 ring->outstanding_lazy_seqno = 0;
2430 void i915_gem_restore_fences(struct drm_device *dev)
2432 struct drm_i915_private *dev_priv = dev->dev_private;
2435 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2436 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2439 * Commit delayed tiling changes if we have an object still
2440 * attached to the fence, otherwise just clear the fence.
2443 i915_gem_object_update_fence(reg->obj, reg,
2444 reg->obj->tiling_mode);
2446 i915_gem_write_fence(dev, i, NULL);
2451 void i915_gem_reset(struct drm_device *dev)
2453 struct drm_i915_private *dev_priv = dev->dev_private;
2454 struct intel_ring_buffer *ring;
2458 * Before we free the objects from the requests, we need to inspect
2459 * them for finding the guilty party. As the requests only borrow
2460 * their reference to the objects, the inspection must be done first.
2462 for_each_ring(ring, dev_priv, i)
2463 i915_gem_reset_ring_status(dev_priv, ring);
2465 for_each_ring(ring, dev_priv, i)
2466 i915_gem_reset_ring_cleanup(dev_priv, ring);
2468 i915_gem_context_reset(dev);
2470 i915_gem_restore_fences(dev);
2474 * This function clears the request list as sequence numbers are passed.
2477 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2481 if (list_empty(&ring->request_list))
2484 WARN_ON(i915_verify_lists(ring->dev));
2486 seqno = ring->get_seqno(ring, true);
2488 /* Move any buffers on the active list that are no longer referenced
2489 * by the ringbuffer to the flushing/inactive lists as appropriate,
2490 * before we free the context associated with the requests.
2492 while (!list_empty(&ring->active_list)) {
2493 struct drm_i915_gem_object *obj;
2495 obj = list_first_entry(&ring->active_list,
2496 struct drm_i915_gem_object,
2499 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2502 i915_gem_object_move_to_inactive(obj);
2506 while (!list_empty(&ring->request_list)) {
2507 struct drm_i915_gem_request *request;
2509 request = list_first_entry(&ring->request_list,
2510 struct drm_i915_gem_request,
2513 if (!i915_seqno_passed(seqno, request->seqno))
2516 trace_i915_gem_request_retire(ring, request->seqno);
2517 /* We know the GPU must have read the request to have
2518 * sent us the seqno + interrupt, so use the position
2519 * of tail of the request to update the last known position
2522 ring->last_retired_head = request->tail;
2524 i915_gem_free_request(request);
2527 if (unlikely(ring->trace_irq_seqno &&
2528 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2529 ring->irq_put(ring);
2530 ring->trace_irq_seqno = 0;
2533 WARN_ON(i915_verify_lists(ring->dev));
2537 i915_gem_retire_requests(struct drm_device *dev)
2539 struct drm_i915_private *dev_priv = dev->dev_private;
2540 struct intel_ring_buffer *ring;
2544 for_each_ring(ring, dev_priv, i) {
2545 i915_gem_retire_requests_ring(ring);
2546 idle &= list_empty(&ring->request_list);
2550 mod_delayed_work(dev_priv->wq,
2551 &dev_priv->mm.idle_work,
2552 msecs_to_jiffies(100));
2558 i915_gem_retire_work_handler(struct work_struct *work)
2560 struct drm_i915_private *dev_priv =
2561 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2562 struct drm_device *dev = dev_priv->dev;
2565 /* Come back later if the device is busy... */
2567 if (mutex_trylock(&dev->struct_mutex)) {
2568 idle = i915_gem_retire_requests(dev);
2569 mutex_unlock(&dev->struct_mutex);
2572 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2573 round_jiffies_up_relative(HZ));
2577 i915_gem_idle_work_handler(struct work_struct *work)
2579 struct drm_i915_private *dev_priv =
2580 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2582 intel_mark_idle(dev_priv->dev);
2586 * Ensures that an object will eventually get non-busy by flushing any required
2587 * write domains, emitting any outstanding lazy request and retiring and
2588 * completed requests.
2591 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2596 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2600 i915_gem_retire_requests_ring(obj->ring);
2607 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2608 * @DRM_IOCTL_ARGS: standard ioctl arguments
2610 * Returns 0 if successful, else an error is returned with the remaining time in
2611 * the timeout parameter.
2612 * -ETIME: object is still busy after timeout
2613 * -ERESTARTSYS: signal interrupted the wait
2614 * -ENONENT: object doesn't exist
2615 * Also possible, but rare:
2616 * -EAGAIN: GPU wedged
2618 * -ENODEV: Internal IRQ fail
2619 * -E?: The add request failed
2621 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2622 * non-zero timeout parameter the wait ioctl will wait for the given number of
2623 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2624 * without holding struct_mutex the object may become re-busied before this
2625 * function completes. A similar but shorter * race condition exists in the busy
2629 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2631 struct drm_i915_private *dev_priv = dev->dev_private;
2632 struct drm_i915_gem_wait *args = data;
2633 struct drm_i915_gem_object *obj;
2634 struct intel_ring_buffer *ring = NULL;
2635 struct timespec timeout_stack, *timeout = NULL;
2636 unsigned reset_counter;
2640 if (args->timeout_ns >= 0) {
2641 timeout_stack = ns_to_timespec(args->timeout_ns);
2642 timeout = &timeout_stack;
2645 ret = i915_mutex_lock_interruptible(dev);
2649 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2650 if (&obj->base == NULL) {
2651 mutex_unlock(&dev->struct_mutex);
2655 /* Need to make sure the object gets inactive eventually. */
2656 ret = i915_gem_object_flush_active(obj);
2661 seqno = obj->last_read_seqno;
2668 /* Do this after OLR check to make sure we make forward progress polling
2669 * on this IOCTL with a 0 timeout (like busy ioctl)
2671 if (!args->timeout_ns) {
2676 drm_gem_object_unreference(&obj->base);
2677 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2678 mutex_unlock(&dev->struct_mutex);
2680 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2682 args->timeout_ns = timespec_to_ns(timeout);
2686 drm_gem_object_unreference(&obj->base);
2687 mutex_unlock(&dev->struct_mutex);
2692 * i915_gem_object_sync - sync an object to a ring.
2694 * @obj: object which may be in use on another ring.
2695 * @to: ring we wish to use the object on. May be NULL.
2697 * This code is meant to abstract object synchronization with the GPU.
2698 * Calling with NULL implies synchronizing the object with the CPU
2699 * rather than a particular GPU ring.
2701 * Returns 0 if successful, else propagates up the lower layer error.
2704 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2705 struct intel_ring_buffer *to)
2707 struct intel_ring_buffer *from = obj->ring;
2711 if (from == NULL || to == from)
2714 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2715 return i915_gem_object_wait_rendering(obj, false);
2717 idx = intel_ring_sync_index(from, to);
2719 seqno = obj->last_read_seqno;
2720 if (seqno <= from->semaphore.sync_seqno[idx])
2723 ret = i915_gem_check_olr(obj->ring, seqno);
2727 trace_i915_gem_ring_sync_to(from, to, seqno);
2728 ret = to->semaphore.sync_to(to, from, seqno);
2730 /* We use last_read_seqno because sync_to()
2731 * might have just caused seqno wrap under
2734 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2739 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2741 u32 old_write_domain, old_read_domains;
2743 /* Force a pagefault for domain tracking on next user access */
2744 i915_gem_release_mmap(obj);
2746 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2749 /* Wait for any direct GTT access to complete */
2752 old_read_domains = obj->base.read_domains;
2753 old_write_domain = obj->base.write_domain;
2755 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2756 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2758 trace_i915_gem_object_change_domain(obj,
2763 int i915_vma_unbind(struct i915_vma *vma)
2765 struct drm_i915_gem_object *obj = vma->obj;
2766 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2769 if (list_empty(&vma->vma_link))
2772 if (!drm_mm_node_allocated(&vma->node)) {
2773 i915_gem_vma_destroy(vma);
2780 BUG_ON(obj->pages == NULL);
2782 ret = i915_gem_object_finish_gpu(obj);
2785 /* Continue on if we fail due to EIO, the GPU is hung so we
2786 * should be safe and we need to cleanup or else we might
2787 * cause memory corruption through use-after-free.
2790 if (i915_is_ggtt(vma->vm)) {
2791 i915_gem_object_finish_gtt(obj);
2793 /* release the fence reg _after_ flushing */
2794 ret = i915_gem_object_put_fence(obj);
2799 trace_i915_vma_unbind(vma);
2801 vma->unbind_vma(vma);
2803 i915_gem_gtt_finish_object(obj);
2805 list_del_init(&vma->mm_list);
2806 /* Avoid an unnecessary call to unbind on rebind. */
2807 if (i915_is_ggtt(vma->vm))
2808 obj->map_and_fenceable = true;
2810 drm_mm_remove_node(&vma->node);
2811 i915_gem_vma_destroy(vma);
2813 /* Since the unbound list is global, only move to that list if
2814 * no more VMAs exist. */
2815 if (list_empty(&obj->vma_list))
2816 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2818 /* And finally now the object is completely decoupled from this vma,
2819 * we can drop its hold on the backing storage and allow it to be
2820 * reaped by the shrinker.
2822 i915_gem_object_unpin_pages(obj);
2827 int i915_gpu_idle(struct drm_device *dev)
2829 struct drm_i915_private *dev_priv = dev->dev_private;
2830 struct intel_ring_buffer *ring;
2833 /* Flush everything onto the inactive list. */
2834 for_each_ring(ring, dev_priv, i) {
2835 ret = i915_switch_context(ring, ring->default_context);
2839 ret = intel_ring_idle(ring);
2847 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2848 struct drm_i915_gem_object *obj)
2850 struct drm_i915_private *dev_priv = dev->dev_private;
2852 int fence_pitch_shift;
2854 if (INTEL_INFO(dev)->gen >= 6) {
2855 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2856 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2858 fence_reg = FENCE_REG_965_0;
2859 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2862 fence_reg += reg * 8;
2864 /* To w/a incoherency with non-atomic 64-bit register updates,
2865 * we split the 64-bit update into two 32-bit writes. In order
2866 * for a partial fence not to be evaluated between writes, we
2867 * precede the update with write to turn off the fence register,
2868 * and only enable the fence as the last step.
2870 * For extra levels of paranoia, we make sure each step lands
2871 * before applying the next step.
2873 I915_WRITE(fence_reg, 0);
2874 POSTING_READ(fence_reg);
2877 u32 size = i915_gem_obj_ggtt_size(obj);
2880 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2882 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2883 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2884 if (obj->tiling_mode == I915_TILING_Y)
2885 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2886 val |= I965_FENCE_REG_VALID;
2888 I915_WRITE(fence_reg + 4, val >> 32);
2889 POSTING_READ(fence_reg + 4);
2891 I915_WRITE(fence_reg + 0, val);
2892 POSTING_READ(fence_reg);
2894 I915_WRITE(fence_reg + 4, 0);
2895 POSTING_READ(fence_reg + 4);
2899 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2900 struct drm_i915_gem_object *obj)
2902 struct drm_i915_private *dev_priv = dev->dev_private;
2906 u32 size = i915_gem_obj_ggtt_size(obj);
2910 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2911 (size & -size) != size ||
2912 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2913 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2914 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2916 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2921 /* Note: pitch better be a power of two tile widths */
2922 pitch_val = obj->stride / tile_width;
2923 pitch_val = ffs(pitch_val) - 1;
2925 val = i915_gem_obj_ggtt_offset(obj);
2926 if (obj->tiling_mode == I915_TILING_Y)
2927 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2928 val |= I915_FENCE_SIZE_BITS(size);
2929 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2930 val |= I830_FENCE_REG_VALID;
2935 reg = FENCE_REG_830_0 + reg * 4;
2937 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2939 I915_WRITE(reg, val);
2943 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2944 struct drm_i915_gem_object *obj)
2946 struct drm_i915_private *dev_priv = dev->dev_private;
2950 u32 size = i915_gem_obj_ggtt_size(obj);
2953 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2954 (size & -size) != size ||
2955 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2956 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2957 i915_gem_obj_ggtt_offset(obj), size);
2959 pitch_val = obj->stride / 128;
2960 pitch_val = ffs(pitch_val) - 1;
2962 val = i915_gem_obj_ggtt_offset(obj);
2963 if (obj->tiling_mode == I915_TILING_Y)
2964 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2965 val |= I830_FENCE_SIZE_BITS(size);
2966 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2967 val |= I830_FENCE_REG_VALID;
2971 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2972 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2975 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2977 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2980 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2981 struct drm_i915_gem_object *obj)
2983 struct drm_i915_private *dev_priv = dev->dev_private;
2985 /* Ensure that all CPU reads are completed before installing a fence
2986 * and all writes before removing the fence.
2988 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2991 WARN(obj && (!obj->stride || !obj->tiling_mode),
2992 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2993 obj->stride, obj->tiling_mode);
2995 switch (INTEL_INFO(dev)->gen) {
3000 case 4: i965_write_fence_reg(dev, reg, obj); break;
3001 case 3: i915_write_fence_reg(dev, reg, obj); break;
3002 case 2: i830_write_fence_reg(dev, reg, obj); break;
3006 /* And similarly be paranoid that no direct access to this region
3007 * is reordered to before the fence is installed.
3009 if (i915_gem_object_needs_mb(obj))
3013 static inline int fence_number(struct drm_i915_private *dev_priv,
3014 struct drm_i915_fence_reg *fence)
3016 return fence - dev_priv->fence_regs;
3019 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3020 struct drm_i915_fence_reg *fence,
3023 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3024 int reg = fence_number(dev_priv, fence);
3026 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3029 obj->fence_reg = reg;
3031 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3033 obj->fence_reg = I915_FENCE_REG_NONE;
3035 list_del_init(&fence->lru_list);
3037 obj->fence_dirty = false;
3041 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3043 if (obj->last_fenced_seqno) {
3044 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3048 obj->last_fenced_seqno = 0;
3051 obj->fenced_gpu_access = false;
3056 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3058 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3059 struct drm_i915_fence_reg *fence;
3062 ret = i915_gem_object_wait_fence(obj);
3066 if (obj->fence_reg == I915_FENCE_REG_NONE)
3069 fence = &dev_priv->fence_regs[obj->fence_reg];
3071 if (WARN_ON(fence->pin_count))
3074 i915_gem_object_fence_lost(obj);
3075 i915_gem_object_update_fence(obj, fence, false);
3080 static struct drm_i915_fence_reg *
3081 i915_find_fence_reg(struct drm_device *dev)
3083 struct drm_i915_private *dev_priv = dev->dev_private;
3084 struct drm_i915_fence_reg *reg, *avail;
3087 /* First try to find a free reg */
3089 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3090 reg = &dev_priv->fence_regs[i];
3094 if (!reg->pin_count)
3101 /* None available, try to steal one or wait for a user to finish */
3102 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3110 /* Wait for completion of pending flips which consume fences */
3111 if (intel_has_pending_fb_unpin(dev))
3112 return ERR_PTR(-EAGAIN);
3114 return ERR_PTR(-EDEADLK);
3118 * i915_gem_object_get_fence - set up fencing for an object
3119 * @obj: object to map through a fence reg
3121 * When mapping objects through the GTT, userspace wants to be able to write
3122 * to them without having to worry about swizzling if the object is tiled.
3123 * This function walks the fence regs looking for a free one for @obj,
3124 * stealing one if it can't find any.
3126 * It then sets up the reg based on the object's properties: address, pitch
3127 * and tiling format.
3129 * For an untiled surface, this removes any existing fence.
3132 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3134 struct drm_device *dev = obj->base.dev;
3135 struct drm_i915_private *dev_priv = dev->dev_private;
3136 bool enable = obj->tiling_mode != I915_TILING_NONE;
3137 struct drm_i915_fence_reg *reg;
3140 /* Have we updated the tiling parameters upon the object and so
3141 * will need to serialise the write to the associated fence register?
3143 if (obj->fence_dirty) {
3144 ret = i915_gem_object_wait_fence(obj);
3149 /* Just update our place in the LRU if our fence is getting reused. */
3150 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3151 reg = &dev_priv->fence_regs[obj->fence_reg];
3152 if (!obj->fence_dirty) {
3153 list_move_tail(®->lru_list,
3154 &dev_priv->mm.fence_list);
3157 } else if (enable) {
3158 reg = i915_find_fence_reg(dev);
3160 return PTR_ERR(reg);
3163 struct drm_i915_gem_object *old = reg->obj;
3165 ret = i915_gem_object_wait_fence(old);
3169 i915_gem_object_fence_lost(old);
3174 i915_gem_object_update_fence(obj, reg, enable);
3179 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3180 struct drm_mm_node *gtt_space,
3181 unsigned long cache_level)
3183 struct drm_mm_node *other;
3185 /* On non-LLC machines we have to be careful when putting differing
3186 * types of snoopable memory together to avoid the prefetcher
3187 * crossing memory domains and dying.
3192 if (!drm_mm_node_allocated(gtt_space))
3195 if (list_empty(>t_space->node_list))
3198 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3199 if (other->allocated && !other->hole_follows && other->color != cache_level)
3202 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3203 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3209 static void i915_gem_verify_gtt(struct drm_device *dev)
3212 struct drm_i915_private *dev_priv = dev->dev_private;
3213 struct drm_i915_gem_object *obj;
3216 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3217 if (obj->gtt_space == NULL) {
3218 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3223 if (obj->cache_level != obj->gtt_space->color) {
3224 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3225 i915_gem_obj_ggtt_offset(obj),
3226 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3228 obj->gtt_space->color);
3233 if (!i915_gem_valid_gtt_space(dev,
3235 obj->cache_level)) {
3236 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3237 i915_gem_obj_ggtt_offset(obj),
3238 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3250 * Finds free space in the GTT aperture and binds the object there.
3252 static struct i915_vma *
3253 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3254 struct i915_address_space *vm,
3258 struct drm_device *dev = obj->base.dev;
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 u32 size, fence_size, fence_alignment, unfenced_alignment;
3262 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3263 struct i915_vma *vma;
3266 fence_size = i915_gem_get_gtt_size(dev,
3269 fence_alignment = i915_gem_get_gtt_alignment(dev,
3271 obj->tiling_mode, true);
3272 unfenced_alignment =
3273 i915_gem_get_gtt_alignment(dev,
3275 obj->tiling_mode, false);
3278 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3280 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3281 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3282 return ERR_PTR(-EINVAL);
3285 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3287 /* If the object is bigger than the entire aperture, reject it early
3288 * before evicting everything in a vain attempt to find space.
3290 if (obj->base.size > gtt_max) {
3291 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3293 flags & PIN_MAPPABLE ? "mappable" : "total",
3295 return ERR_PTR(-E2BIG);
3298 ret = i915_gem_object_get_pages(obj);
3300 return ERR_PTR(ret);
3302 i915_gem_object_pin_pages(obj);
3304 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3309 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3311 obj->cache_level, 0, gtt_max,
3312 DRM_MM_SEARCH_DEFAULT,
3313 DRM_MM_CREATE_DEFAULT);
3315 ret = i915_gem_evict_something(dev, vm, size, alignment,
3316 obj->cache_level, flags);
3322 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3323 obj->cache_level))) {
3325 goto err_remove_node;
3328 ret = i915_gem_gtt_prepare_object(obj);
3330 goto err_remove_node;
3332 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3333 list_add_tail(&vma->mm_list, &vm->inactive_list);
3335 if (i915_is_ggtt(vm)) {
3336 bool mappable, fenceable;
3338 fenceable = (vma->node.size == fence_size &&
3339 (vma->node.start & (fence_alignment - 1)) == 0);
3341 mappable = (vma->node.start + obj->base.size <=
3342 dev_priv->gtt.mappable_end);
3344 obj->map_and_fenceable = mappable && fenceable;
3347 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3349 trace_i915_vma_bind(vma, flags);
3350 vma->bind_vma(vma, obj->cache_level,
3351 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3353 i915_gem_verify_gtt(dev);
3357 drm_mm_remove_node(&vma->node);
3359 i915_gem_vma_destroy(vma);
3362 i915_gem_object_unpin_pages(obj);
3367 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3370 /* If we don't have a page list set up, then we're not pinned
3371 * to GPU, and we can ignore the cache flush because it'll happen
3372 * again at bind time.
3374 if (obj->pages == NULL)
3378 * Stolen memory is always coherent with the GPU as it is explicitly
3379 * marked as wc by the system, or the system is cache-coherent.
3384 /* If the GPU is snooping the contents of the CPU cache,
3385 * we do not need to manually clear the CPU cache lines. However,
3386 * the caches are only snooped when the render cache is
3387 * flushed/invalidated. As we always have to emit invalidations
3388 * and flushes when moving into and out of the RENDER domain, correct
3389 * snooping behaviour occurs naturally as the result of our domain
3392 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3395 trace_i915_gem_object_clflush(obj);
3396 drm_clflush_sg(obj->pages);
3401 /** Flushes the GTT write domain for the object if it's dirty. */
3403 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3405 uint32_t old_write_domain;
3407 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3410 /* No actual flushing is required for the GTT write domain. Writes
3411 * to it immediately go to main memory as far as we know, so there's
3412 * no chipset flush. It also doesn't land in render cache.
3414 * However, we do have to enforce the order so that all writes through
3415 * the GTT land before any writes to the device, such as updates to
3420 old_write_domain = obj->base.write_domain;
3421 obj->base.write_domain = 0;
3423 trace_i915_gem_object_change_domain(obj,
3424 obj->base.read_domains,
3428 /** Flushes the CPU write domain for the object if it's dirty. */
3430 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3433 uint32_t old_write_domain;
3435 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3438 if (i915_gem_clflush_object(obj, force))
3439 i915_gem_chipset_flush(obj->base.dev);
3441 old_write_domain = obj->base.write_domain;
3442 obj->base.write_domain = 0;
3444 trace_i915_gem_object_change_domain(obj,
3445 obj->base.read_domains,
3450 * Moves a single object to the GTT read, and possibly write domain.
3452 * This function returns when the move is complete, including waiting on
3456 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3458 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3459 uint32_t old_write_domain, old_read_domains;
3462 /* Not valid to be called on unbound objects. */
3463 if (!i915_gem_obj_bound_any(obj))
3466 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3469 ret = i915_gem_object_wait_rendering(obj, !write);
3473 i915_gem_object_retire(obj);
3474 i915_gem_object_flush_cpu_write_domain(obj, false);
3476 /* Serialise direct access to this object with the barriers for
3477 * coherent writes from the GPU, by effectively invalidating the
3478 * GTT domain upon first access.
3480 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3483 old_write_domain = obj->base.write_domain;
3484 old_read_domains = obj->base.read_domains;
3486 /* It should now be out of any other write domains, and we can update
3487 * the domain values for our changes.
3489 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3490 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3492 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3493 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3497 trace_i915_gem_object_change_domain(obj,
3501 /* And bump the LRU for this access */
3502 if (i915_gem_object_is_inactive(obj)) {
3503 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3505 list_move_tail(&vma->mm_list,
3506 &dev_priv->gtt.base.inactive_list);
3513 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3514 enum i915_cache_level cache_level)
3516 struct drm_device *dev = obj->base.dev;
3517 struct i915_vma *vma, *next;
3520 if (obj->cache_level == cache_level)
3523 if (i915_gem_obj_is_pinned(obj)) {
3524 DRM_DEBUG("can not change the cache level of pinned objects\n");
3528 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3529 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3530 ret = i915_vma_unbind(vma);
3536 if (i915_gem_obj_bound_any(obj)) {
3537 ret = i915_gem_object_finish_gpu(obj);
3541 i915_gem_object_finish_gtt(obj);
3543 /* Before SandyBridge, you could not use tiling or fence
3544 * registers with snooped memory, so relinquish any fences
3545 * currently pointing to our region in the aperture.
3547 if (INTEL_INFO(dev)->gen < 6) {
3548 ret = i915_gem_object_put_fence(obj);
3553 list_for_each_entry(vma, &obj->vma_list, vma_link)
3554 if (drm_mm_node_allocated(&vma->node))
3555 vma->bind_vma(vma, cache_level,
3556 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3559 list_for_each_entry(vma, &obj->vma_list, vma_link)
3560 vma->node.color = cache_level;
3561 obj->cache_level = cache_level;
3563 if (cpu_write_needs_clflush(obj)) {
3564 u32 old_read_domains, old_write_domain;
3566 /* If we're coming from LLC cached, then we haven't
3567 * actually been tracking whether the data is in the
3568 * CPU cache or not, since we only allow one bit set
3569 * in obj->write_domain and have been skipping the clflushes.
3570 * Just set it to the CPU cache for now.
3572 i915_gem_object_retire(obj);
3573 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3575 old_read_domains = obj->base.read_domains;
3576 old_write_domain = obj->base.write_domain;
3578 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3579 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3581 trace_i915_gem_object_change_domain(obj,
3586 i915_gem_verify_gtt(dev);
3590 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3591 struct drm_file *file)
3593 struct drm_i915_gem_caching *args = data;
3594 struct drm_i915_gem_object *obj;
3597 ret = i915_mutex_lock_interruptible(dev);
3601 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3602 if (&obj->base == NULL) {
3607 switch (obj->cache_level) {
3608 case I915_CACHE_LLC:
3609 case I915_CACHE_L3_LLC:
3610 args->caching = I915_CACHING_CACHED;
3614 args->caching = I915_CACHING_DISPLAY;
3618 args->caching = I915_CACHING_NONE;
3622 drm_gem_object_unreference(&obj->base);
3624 mutex_unlock(&dev->struct_mutex);
3628 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3629 struct drm_file *file)
3631 struct drm_i915_gem_caching *args = data;
3632 struct drm_i915_gem_object *obj;
3633 enum i915_cache_level level;
3636 switch (args->caching) {
3637 case I915_CACHING_NONE:
3638 level = I915_CACHE_NONE;
3640 case I915_CACHING_CACHED:
3641 level = I915_CACHE_LLC;
3643 case I915_CACHING_DISPLAY:
3644 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3650 ret = i915_mutex_lock_interruptible(dev);
3654 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3655 if (&obj->base == NULL) {
3660 ret = i915_gem_object_set_cache_level(obj, level);
3662 drm_gem_object_unreference(&obj->base);
3664 mutex_unlock(&dev->struct_mutex);
3668 static bool is_pin_display(struct drm_i915_gem_object *obj)
3670 struct i915_vma *vma;
3672 if (list_empty(&obj->vma_list))
3675 vma = i915_gem_obj_to_ggtt(obj);
3679 /* There are 3 sources that pin objects:
3680 * 1. The display engine (scanouts, sprites, cursors);
3681 * 2. Reservations for execbuffer;
3684 * We can ignore reservations as we hold the struct_mutex and
3685 * are only called outside of the reservation path. The user
3686 * can only increment pin_count once, and so if after
3687 * subtracting the potential reference by the user, any pin_count
3688 * remains, it must be due to another use by the display engine.
3690 return vma->pin_count - !!obj->user_pin_count;
3694 * Prepare buffer for display plane (scanout, cursors, etc).
3695 * Can be called from an uninterruptible phase (modesetting) and allows
3696 * any flushes to be pipelined (for pageflips).
3699 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3701 struct intel_ring_buffer *pipelined)
3703 u32 old_read_domains, old_write_domain;
3704 bool was_pin_display;
3707 if (pipelined != obj->ring) {
3708 ret = i915_gem_object_sync(obj, pipelined);
3713 /* Mark the pin_display early so that we account for the
3714 * display coherency whilst setting up the cache domains.
3716 was_pin_display = obj->pin_display;
3717 obj->pin_display = true;
3719 /* The display engine is not coherent with the LLC cache on gen6. As
3720 * a result, we make sure that the pinning that is about to occur is
3721 * done with uncached PTEs. This is lowest common denominator for all
3724 * However for gen6+, we could do better by using the GFDT bit instead
3725 * of uncaching, which would allow us to flush all the LLC-cached data
3726 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3728 ret = i915_gem_object_set_cache_level(obj,
3729 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3731 goto err_unpin_display;
3733 /* As the user may map the buffer once pinned in the display plane
3734 * (e.g. libkms for the bootup splash), we have to ensure that we
3735 * always use map_and_fenceable for all scanout buffers.
3737 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3739 goto err_unpin_display;
3741 i915_gem_object_flush_cpu_write_domain(obj, true);
3743 old_write_domain = obj->base.write_domain;
3744 old_read_domains = obj->base.read_domains;
3746 /* It should now be out of any other write domains, and we can update
3747 * the domain values for our changes.
3749 obj->base.write_domain = 0;
3750 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3752 trace_i915_gem_object_change_domain(obj,
3759 WARN_ON(was_pin_display != is_pin_display(obj));
3760 obj->pin_display = was_pin_display;
3765 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3767 i915_gem_object_ggtt_unpin(obj);
3768 obj->pin_display = is_pin_display(obj);
3772 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3776 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3779 ret = i915_gem_object_wait_rendering(obj, false);
3783 /* Ensure that we invalidate the GPU's caches and TLBs. */
3784 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3789 * Moves a single object to the CPU read, and possibly write domain.
3791 * This function returns when the move is complete, including waiting on
3795 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3797 uint32_t old_write_domain, old_read_domains;
3800 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3803 ret = i915_gem_object_wait_rendering(obj, !write);
3807 i915_gem_object_retire(obj);
3808 i915_gem_object_flush_gtt_write_domain(obj);
3810 old_write_domain = obj->base.write_domain;
3811 old_read_domains = obj->base.read_domains;
3813 /* Flush the CPU cache if it's still invalid. */
3814 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3815 i915_gem_clflush_object(obj, false);
3817 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3820 /* It should now be out of any other write domains, and we can update
3821 * the domain values for our changes.
3823 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3825 /* If we're writing through the CPU, then the GPU read domains will
3826 * need to be invalidated at next use.
3829 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3830 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3833 trace_i915_gem_object_change_domain(obj,
3840 /* Throttle our rendering by waiting until the ring has completed our requests
3841 * emitted over 20 msec ago.
3843 * Note that if we were to use the current jiffies each time around the loop,
3844 * we wouldn't escape the function with any frames outstanding if the time to
3845 * render a frame was over 20ms.
3847 * This should get us reasonable parallelism between CPU and GPU but also
3848 * relatively low latency when blocking on a particular request to finish.
3851 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3853 struct drm_i915_private *dev_priv = dev->dev_private;
3854 struct drm_i915_file_private *file_priv = file->driver_priv;
3855 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3856 struct drm_i915_gem_request *request;
3857 struct intel_ring_buffer *ring = NULL;
3858 unsigned reset_counter;
3862 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3866 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3870 spin_lock(&file_priv->mm.lock);
3871 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3872 if (time_after_eq(request->emitted_jiffies, recent_enough))
3875 ring = request->ring;
3876 seqno = request->seqno;
3878 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3879 spin_unlock(&file_priv->mm.lock);
3884 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3886 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3892 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3893 struct i915_address_space *vm,
3897 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3898 struct i915_vma *vma;
3901 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3904 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3907 vma = i915_gem_obj_to_vma(obj, vm);
3909 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3913 vma->node.start & (alignment - 1)) ||
3914 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3915 WARN(vma->pin_count,
3916 "bo is already pinned with incorrect alignment:"
3917 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3918 " obj->map_and_fenceable=%d\n",
3919 i915_gem_obj_offset(obj, vm), alignment,
3920 flags & PIN_MAPPABLE,
3921 obj->map_and_fenceable);
3922 ret = i915_vma_unbind(vma);
3930 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3931 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3933 return PTR_ERR(vma);
3936 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3937 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
3940 if (flags & PIN_MAPPABLE)
3941 obj->pin_mappable |= true;
3947 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3949 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3952 BUG_ON(vma->pin_count == 0);
3953 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3955 if (--vma->pin_count == 0)
3956 obj->pin_mappable = false;
3960 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
3962 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3963 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3964 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
3966 WARN_ON(!ggtt_vma ||
3967 dev_priv->fence_regs[obj->fence_reg].pin_count >
3968 ggtt_vma->pin_count);
3969 dev_priv->fence_regs[obj->fence_reg].pin_count++;
3976 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
3978 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3979 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3980 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
3981 dev_priv->fence_regs[obj->fence_reg].pin_count--;
3986 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3987 struct drm_file *file)
3989 struct drm_i915_gem_pin *args = data;
3990 struct drm_i915_gem_object *obj;
3993 if (INTEL_INFO(dev)->gen >= 6)
3996 ret = i915_mutex_lock_interruptible(dev);
4000 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4001 if (&obj->base == NULL) {
4006 if (obj->madv != I915_MADV_WILLNEED) {
4007 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4012 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4013 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4019 if (obj->user_pin_count == ULONG_MAX) {
4024 if (obj->user_pin_count == 0) {
4025 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4030 obj->user_pin_count++;
4031 obj->pin_filp = file;
4033 args->offset = i915_gem_obj_ggtt_offset(obj);
4035 drm_gem_object_unreference(&obj->base);
4037 mutex_unlock(&dev->struct_mutex);
4042 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4043 struct drm_file *file)
4045 struct drm_i915_gem_pin *args = data;
4046 struct drm_i915_gem_object *obj;
4049 ret = i915_mutex_lock_interruptible(dev);
4053 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4054 if (&obj->base == NULL) {
4059 if (obj->pin_filp != file) {
4060 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4065 obj->user_pin_count--;
4066 if (obj->user_pin_count == 0) {
4067 obj->pin_filp = NULL;
4068 i915_gem_object_ggtt_unpin(obj);
4072 drm_gem_object_unreference(&obj->base);
4074 mutex_unlock(&dev->struct_mutex);
4079 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4080 struct drm_file *file)
4082 struct drm_i915_gem_busy *args = data;
4083 struct drm_i915_gem_object *obj;
4086 ret = i915_mutex_lock_interruptible(dev);
4090 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4091 if (&obj->base == NULL) {
4096 /* Count all active objects as busy, even if they are currently not used
4097 * by the gpu. Users of this interface expect objects to eventually
4098 * become non-busy without any further actions, therefore emit any
4099 * necessary flushes here.
4101 ret = i915_gem_object_flush_active(obj);
4103 args->busy = obj->active;
4105 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4106 args->busy |= intel_ring_flag(obj->ring) << 16;
4109 drm_gem_object_unreference(&obj->base);
4111 mutex_unlock(&dev->struct_mutex);
4116 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4117 struct drm_file *file_priv)
4119 return i915_gem_ring_throttle(dev, file_priv);
4123 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4124 struct drm_file *file_priv)
4126 struct drm_i915_gem_madvise *args = data;
4127 struct drm_i915_gem_object *obj;
4130 switch (args->madv) {
4131 case I915_MADV_DONTNEED:
4132 case I915_MADV_WILLNEED:
4138 ret = i915_mutex_lock_interruptible(dev);
4142 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4143 if (&obj->base == NULL) {
4148 if (i915_gem_obj_is_pinned(obj)) {
4153 if (obj->madv != __I915_MADV_PURGED)
4154 obj->madv = args->madv;
4156 /* if the object is no longer attached, discard its backing storage */
4157 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4158 i915_gem_object_truncate(obj);
4160 args->retained = obj->madv != __I915_MADV_PURGED;
4163 drm_gem_object_unreference(&obj->base);
4165 mutex_unlock(&dev->struct_mutex);
4169 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4170 const struct drm_i915_gem_object_ops *ops)
4172 INIT_LIST_HEAD(&obj->global_list);
4173 INIT_LIST_HEAD(&obj->ring_list);
4174 INIT_LIST_HEAD(&obj->obj_exec_link);
4175 INIT_LIST_HEAD(&obj->vma_list);
4179 obj->fence_reg = I915_FENCE_REG_NONE;
4180 obj->madv = I915_MADV_WILLNEED;
4181 /* Avoid an unnecessary call to unbind on the first bind. */
4182 obj->map_and_fenceable = true;
4184 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4187 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4188 .get_pages = i915_gem_object_get_pages_gtt,
4189 .put_pages = i915_gem_object_put_pages_gtt,
4192 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4195 struct drm_i915_gem_object *obj;
4196 struct address_space *mapping;
4199 obj = i915_gem_object_alloc(dev);
4203 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4204 i915_gem_object_free(obj);
4208 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4209 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4210 /* 965gm cannot relocate objects above 4GiB. */
4211 mask &= ~__GFP_HIGHMEM;
4212 mask |= __GFP_DMA32;
4215 mapping = file_inode(obj->base.filp)->i_mapping;
4216 mapping_set_gfp_mask(mapping, mask);
4218 i915_gem_object_init(obj, &i915_gem_object_ops);
4220 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4221 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4224 /* On some devices, we can have the GPU use the LLC (the CPU
4225 * cache) for about a 10% performance improvement
4226 * compared to uncached. Graphics requests other than
4227 * display scanout are coherent with the CPU in
4228 * accessing this cache. This means in this mode we
4229 * don't need to clflush on the CPU side, and on the
4230 * GPU side we only need to flush internal caches to
4231 * get data visible to the CPU.
4233 * However, we maintain the display planes as UC, and so
4234 * need to rebind when first used as such.
4236 obj->cache_level = I915_CACHE_LLC;
4238 obj->cache_level = I915_CACHE_NONE;
4240 trace_i915_gem_object_create(obj);
4245 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4247 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4248 struct drm_device *dev = obj->base.dev;
4249 struct drm_i915_private *dev_priv = dev->dev_private;
4250 struct i915_vma *vma, *next;
4252 intel_runtime_pm_get(dev_priv);
4254 trace_i915_gem_object_destroy(obj);
4257 i915_gem_detach_phys_object(dev, obj);
4259 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4263 ret = i915_vma_unbind(vma);
4264 if (WARN_ON(ret == -ERESTARTSYS)) {
4265 bool was_interruptible;
4267 was_interruptible = dev_priv->mm.interruptible;
4268 dev_priv->mm.interruptible = false;
4270 WARN_ON(i915_vma_unbind(vma));
4272 dev_priv->mm.interruptible = was_interruptible;
4276 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4277 * before progressing. */
4279 i915_gem_object_unpin_pages(obj);
4281 if (WARN_ON(obj->pages_pin_count))
4282 obj->pages_pin_count = 0;
4283 if (obj->madv != __I915_MADV_PURGED)
4284 obj->madv = I915_MADV_DONTNEED;
4285 i915_gem_object_put_pages(obj);
4286 i915_gem_object_free_mmap_offset(obj);
4287 i915_gem_object_release_stolen(obj);
4291 if (obj->base.import_attach)
4292 drm_prime_gem_destroy(&obj->base, NULL);
4294 if (obj->ops->release)
4295 obj->ops->release(obj);
4297 drm_gem_object_release(&obj->base);
4298 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4301 i915_gem_object_free(obj);
4303 intel_runtime_pm_put(dev_priv);
4306 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4307 struct i915_address_space *vm)
4309 struct i915_vma *vma;
4310 list_for_each_entry(vma, &obj->vma_list, vma_link)
4317 void i915_gem_vma_destroy(struct i915_vma *vma)
4319 WARN_ON(vma->node.allocated);
4321 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4322 if (!list_empty(&vma->exec_list))
4325 list_del(&vma->vma_link);
4331 i915_gem_stop_ringbuffers(struct drm_device *dev)
4333 struct drm_i915_private *dev_priv = dev->dev_private;
4334 struct intel_ring_buffer *ring;
4337 for_each_ring(ring, dev_priv, i)
4338 intel_stop_ring_buffer(ring);
4342 i915_gem_suspend(struct drm_device *dev)
4344 struct drm_i915_private *dev_priv = dev->dev_private;
4347 mutex_lock(&dev->struct_mutex);
4348 if (dev_priv->ums.mm_suspended)
4351 ret = i915_gpu_idle(dev);
4355 i915_gem_retire_requests(dev);
4357 /* Under UMS, be paranoid and evict. */
4358 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4359 i915_gem_evict_everything(dev);
4361 i915_kernel_lost_context(dev);
4362 i915_gem_stop_ringbuffers(dev);
4364 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4365 * We need to replace this with a semaphore, or something.
4366 * And not confound ums.mm_suspended!
4368 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4370 mutex_unlock(&dev->struct_mutex);
4372 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4373 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4374 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4379 mutex_unlock(&dev->struct_mutex);
4383 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4385 struct drm_device *dev = ring->dev;
4386 struct drm_i915_private *dev_priv = dev->dev_private;
4387 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4388 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4391 if (!HAS_L3_DPF(dev) || !remap_info)
4394 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4399 * Note: We do not worry about the concurrent register cacheline hang
4400 * here because no other code should access these registers other than
4401 * at initialization time.
4403 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4404 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4405 intel_ring_emit(ring, reg_base + i);
4406 intel_ring_emit(ring, remap_info[i/4]);
4409 intel_ring_advance(ring);
4414 void i915_gem_init_swizzling(struct drm_device *dev)
4416 struct drm_i915_private *dev_priv = dev->dev_private;
4418 if (INTEL_INFO(dev)->gen < 5 ||
4419 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4422 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4423 DISP_TILE_SURFACE_SWIZZLING);
4428 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4430 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4431 else if (IS_GEN7(dev))
4432 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4433 else if (IS_GEN8(dev))
4434 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4440 intel_enable_blt(struct drm_device *dev)
4445 /* The blitter was dysfunctional on early prototypes */
4446 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4447 DRM_INFO("BLT not supported on this pre-production hardware;"
4448 " graphics performance will be degraded.\n");
4455 static int i915_gem_init_rings(struct drm_device *dev)
4457 struct drm_i915_private *dev_priv = dev->dev_private;
4460 ret = intel_init_render_ring_buffer(dev);
4465 ret = intel_init_bsd_ring_buffer(dev);
4467 goto cleanup_render_ring;
4470 if (intel_enable_blt(dev)) {
4471 ret = intel_init_blt_ring_buffer(dev);
4473 goto cleanup_bsd_ring;
4476 if (HAS_VEBOX(dev)) {
4477 ret = intel_init_vebox_ring_buffer(dev);
4479 goto cleanup_blt_ring;
4482 if (HAS_BSD2(dev)) {
4483 ret = intel_init_bsd2_ring_buffer(dev);
4485 goto cleanup_vebox_ring;
4488 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4490 goto cleanup_bsd2_ring;
4495 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4497 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4499 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4501 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4502 cleanup_render_ring:
4503 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4509 i915_gem_init_hw(struct drm_device *dev)
4511 struct drm_i915_private *dev_priv = dev->dev_private;
4514 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4517 if (dev_priv->ellc_size)
4518 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4520 if (IS_HASWELL(dev))
4521 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4522 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4524 if (HAS_PCH_NOP(dev)) {
4525 if (IS_IVYBRIDGE(dev)) {
4526 u32 temp = I915_READ(GEN7_MSG_CTL);
4527 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4528 I915_WRITE(GEN7_MSG_CTL, temp);
4529 } else if (INTEL_INFO(dev)->gen >= 7) {
4530 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4531 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4532 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4536 i915_gem_init_swizzling(dev);
4538 ret = i915_gem_init_rings(dev);
4542 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4543 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4546 * XXX: Contexts should only be initialized once. Doing a switch to the
4547 * default context switch however is something we'd like to do after
4548 * reset or thaw (the latter may not actually be necessary for HW, but
4549 * goes with our code better). Context switching requires rings (for
4550 * the do_switch), but before enabling PPGTT. So don't move this.
4552 ret = i915_gem_context_enable(dev_priv);
4553 if (ret && ret != -EIO) {
4554 DRM_ERROR("Context enable failed %d\n", ret);
4555 i915_gem_cleanup_ringbuffer(dev);
4561 int i915_gem_init(struct drm_device *dev)
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4566 mutex_lock(&dev->struct_mutex);
4568 if (IS_VALLEYVIEW(dev)) {
4569 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4570 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4571 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4572 VLV_GTLC_ALLOWWAKEACK), 10))
4573 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4576 i915_gem_init_userptr(dev);
4577 i915_gem_init_global_gtt(dev);
4579 ret = i915_gem_context_init(dev);
4581 mutex_unlock(&dev->struct_mutex);
4585 ret = i915_gem_init_hw(dev);
4587 /* Allow ring initialisation to fail by marking the GPU as
4588 * wedged. But we only want to do this where the GPU is angry,
4589 * for all other failure, such as an allocation failure, bail.
4591 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4592 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4595 mutex_unlock(&dev->struct_mutex);
4597 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4598 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4599 dev_priv->dri1.allow_batchbuffer = 1;
4604 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4606 struct drm_i915_private *dev_priv = dev->dev_private;
4607 struct intel_ring_buffer *ring;
4610 for_each_ring(ring, dev_priv, i)
4611 intel_cleanup_ring_buffer(ring);
4615 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4616 struct drm_file *file_priv)
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4621 if (drm_core_check_feature(dev, DRIVER_MODESET))
4624 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4625 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4626 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4629 mutex_lock(&dev->struct_mutex);
4630 dev_priv->ums.mm_suspended = 0;
4632 ret = i915_gem_init_hw(dev);
4634 mutex_unlock(&dev->struct_mutex);
4638 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4640 ret = drm_irq_install(dev, dev->pdev->irq);
4642 goto cleanup_ringbuffer;
4643 mutex_unlock(&dev->struct_mutex);
4648 i915_gem_cleanup_ringbuffer(dev);
4649 dev_priv->ums.mm_suspended = 1;
4650 mutex_unlock(&dev->struct_mutex);
4656 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4657 struct drm_file *file_priv)
4659 if (drm_core_check_feature(dev, DRIVER_MODESET))
4662 mutex_lock(&dev->struct_mutex);
4663 drm_irq_uninstall(dev);
4664 mutex_unlock(&dev->struct_mutex);
4666 return i915_gem_suspend(dev);
4670 i915_gem_lastclose(struct drm_device *dev)
4674 if (drm_core_check_feature(dev, DRIVER_MODESET))
4677 ret = i915_gem_suspend(dev);
4679 DRM_ERROR("failed to idle hardware: %d\n", ret);
4683 init_ring_lists(struct intel_ring_buffer *ring)
4685 INIT_LIST_HEAD(&ring->active_list);
4686 INIT_LIST_HEAD(&ring->request_list);
4689 void i915_init_vm(struct drm_i915_private *dev_priv,
4690 struct i915_address_space *vm)
4692 if (!i915_is_ggtt(vm))
4693 drm_mm_init(&vm->mm, vm->start, vm->total);
4694 vm->dev = dev_priv->dev;
4695 INIT_LIST_HEAD(&vm->active_list);
4696 INIT_LIST_HEAD(&vm->inactive_list);
4697 INIT_LIST_HEAD(&vm->global_link);
4698 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4702 i915_gem_load(struct drm_device *dev)
4704 struct drm_i915_private *dev_priv = dev->dev_private;
4708 kmem_cache_create("i915_gem_object",
4709 sizeof(struct drm_i915_gem_object), 0,
4713 INIT_LIST_HEAD(&dev_priv->vm_list);
4714 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4716 INIT_LIST_HEAD(&dev_priv->context_list);
4717 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4718 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4719 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4720 for (i = 0; i < I915_NUM_RINGS; i++)
4721 init_ring_lists(&dev_priv->ring[i]);
4722 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4723 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4724 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4725 i915_gem_retire_work_handler);
4726 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4727 i915_gem_idle_work_handler);
4728 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4730 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4732 I915_WRITE(MI_ARB_STATE,
4733 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4736 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4738 /* Old X drivers will take 0-2 for front, back, depth buffers */
4739 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4740 dev_priv->fence_reg_start = 3;
4742 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4743 dev_priv->num_fence_regs = 32;
4744 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4745 dev_priv->num_fence_regs = 16;
4747 dev_priv->num_fence_regs = 8;
4749 /* Initialize fence registers to zero */
4750 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4751 i915_gem_restore_fences(dev);
4753 i915_gem_detect_bit_6_swizzle(dev);
4754 init_waitqueue_head(&dev_priv->pending_flip_queue);
4756 dev_priv->mm.interruptible = true;
4758 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4759 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4760 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4761 register_shrinker(&dev_priv->mm.shrinker);
4765 * Create a physically contiguous memory object for this object
4766 * e.g. for cursor + overlay regs
4768 static int i915_gem_init_phys_object(struct drm_device *dev,
4769 int id, int size, int align)
4771 struct drm_i915_private *dev_priv = dev->dev_private;
4772 struct drm_i915_gem_phys_object *phys_obj;
4775 if (dev_priv->mm.phys_objs[id - 1] || !size)
4778 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4784 phys_obj->handle = drm_pci_alloc(dev, size, align);
4785 if (!phys_obj->handle) {
4790 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4793 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4801 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4803 struct drm_i915_private *dev_priv = dev->dev_private;
4804 struct drm_i915_gem_phys_object *phys_obj;
4806 if (!dev_priv->mm.phys_objs[id - 1])
4809 phys_obj = dev_priv->mm.phys_objs[id - 1];
4810 if (phys_obj->cur_obj) {
4811 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4815 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4817 drm_pci_free(dev, phys_obj->handle);
4819 dev_priv->mm.phys_objs[id - 1] = NULL;
4822 void i915_gem_free_all_phys_object(struct drm_device *dev)
4826 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4827 i915_gem_free_phys_object(dev, i);
4830 void i915_gem_detach_phys_object(struct drm_device *dev,
4831 struct drm_i915_gem_object *obj)
4833 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4840 vaddr = obj->phys_obj->handle->vaddr;
4842 page_count = obj->base.size / PAGE_SIZE;
4843 for (i = 0; i < page_count; i++) {
4844 struct page *page = shmem_read_mapping_page(mapping, i);
4845 if (!IS_ERR(page)) {
4846 char *dst = kmap_atomic(page);
4847 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4850 drm_clflush_pages(&page, 1);
4852 set_page_dirty(page);
4853 mark_page_accessed(page);
4854 page_cache_release(page);
4857 i915_gem_chipset_flush(dev);
4859 obj->phys_obj->cur_obj = NULL;
4860 obj->phys_obj = NULL;
4864 i915_gem_attach_phys_object(struct drm_device *dev,
4865 struct drm_i915_gem_object *obj,
4869 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4870 struct drm_i915_private *dev_priv = dev->dev_private;
4875 if (id > I915_MAX_PHYS_OBJECT)
4878 if (obj->phys_obj) {
4879 if (obj->phys_obj->id == id)
4881 i915_gem_detach_phys_object(dev, obj);
4884 /* create a new object */
4885 if (!dev_priv->mm.phys_objs[id - 1]) {
4886 ret = i915_gem_init_phys_object(dev, id,
4887 obj->base.size, align);
4889 DRM_ERROR("failed to init phys object %d size: %zu\n",
4890 id, obj->base.size);
4895 /* bind to the object */
4896 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4897 obj->phys_obj->cur_obj = obj;
4899 page_count = obj->base.size / PAGE_SIZE;
4901 for (i = 0; i < page_count; i++) {
4905 page = shmem_read_mapping_page(mapping, i);
4907 return PTR_ERR(page);
4909 src = kmap_atomic(page);
4910 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4911 memcpy(dst, src, PAGE_SIZE);
4914 mark_page_accessed(page);
4915 page_cache_release(page);
4922 i915_gem_phys_pwrite(struct drm_device *dev,
4923 struct drm_i915_gem_object *obj,
4924 struct drm_i915_gem_pwrite *args,
4925 struct drm_file *file_priv)
4927 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4928 char __user *user_data = to_user_ptr(args->data_ptr);
4930 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4931 unsigned long unwritten;
4933 /* The physical object once assigned is fixed for the lifetime
4934 * of the obj, so we can safely drop the lock and continue
4937 mutex_unlock(&dev->struct_mutex);
4938 unwritten = copy_from_user(vaddr, user_data, args->size);
4939 mutex_lock(&dev->struct_mutex);
4944 i915_gem_chipset_flush(dev);
4948 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4950 struct drm_i915_file_private *file_priv = file->driver_priv;
4952 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4954 /* Clean up our request list when the client is going away, so that
4955 * later retire_requests won't dereference our soon-to-be-gone
4958 spin_lock(&file_priv->mm.lock);
4959 while (!list_empty(&file_priv->mm.request_list)) {
4960 struct drm_i915_gem_request *request;
4962 request = list_first_entry(&file_priv->mm.request_list,
4963 struct drm_i915_gem_request,
4965 list_del(&request->client_list);
4966 request->file_priv = NULL;
4968 spin_unlock(&file_priv->mm.lock);
4972 i915_gem_file_idle_work_handler(struct work_struct *work)
4974 struct drm_i915_file_private *file_priv =
4975 container_of(work, typeof(*file_priv), mm.idle_work.work);
4977 atomic_set(&file_priv->rps_wait_boost, false);
4980 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4982 struct drm_i915_file_private *file_priv;
4985 DRM_DEBUG_DRIVER("\n");
4987 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4991 file->driver_priv = file_priv;
4992 file_priv->dev_priv = dev->dev_private;
4993 file_priv->file = file;
4995 spin_lock_init(&file_priv->mm.lock);
4996 INIT_LIST_HEAD(&file_priv->mm.request_list);
4997 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4998 i915_gem_file_idle_work_handler);
5000 ret = i915_gem_context_open(dev, file);
5007 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5009 if (!mutex_is_locked(mutex))
5012 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5013 return mutex->owner == task;
5015 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5020 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5022 if (!mutex_trylock(&dev->struct_mutex)) {
5023 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5026 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5036 static int num_vma_bound(struct drm_i915_gem_object *obj)
5038 struct i915_vma *vma;
5041 list_for_each_entry(vma, &obj->vma_list, vma_link)
5042 if (drm_mm_node_allocated(&vma->node))
5048 static unsigned long
5049 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5051 struct drm_i915_private *dev_priv =
5052 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5053 struct drm_device *dev = dev_priv->dev;
5054 struct drm_i915_gem_object *obj;
5055 unsigned long count;
5058 if (!i915_gem_shrinker_lock(dev, &unlock))
5062 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5063 if (obj->pages_pin_count == 0)
5064 count += obj->base.size >> PAGE_SHIFT;
5066 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5067 if (!i915_gem_obj_is_pinned(obj) &&
5068 obj->pages_pin_count == num_vma_bound(obj))
5069 count += obj->base.size >> PAGE_SHIFT;
5073 mutex_unlock(&dev->struct_mutex);
5078 /* All the new VM stuff */
5079 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5080 struct i915_address_space *vm)
5082 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5083 struct i915_vma *vma;
5085 if (!dev_priv->mm.aliasing_ppgtt ||
5086 vm == &dev_priv->mm.aliasing_ppgtt->base)
5087 vm = &dev_priv->gtt.base;
5089 BUG_ON(list_empty(&o->vma_list));
5090 list_for_each_entry(vma, &o->vma_list, vma_link) {
5092 return vma->node.start;
5098 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5099 struct i915_address_space *vm)
5101 struct i915_vma *vma;
5103 list_for_each_entry(vma, &o->vma_list, vma_link)
5104 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5110 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5112 struct i915_vma *vma;
5114 list_for_each_entry(vma, &o->vma_list, vma_link)
5115 if (drm_mm_node_allocated(&vma->node))
5121 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5122 struct i915_address_space *vm)
5124 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5125 struct i915_vma *vma;
5127 if (!dev_priv->mm.aliasing_ppgtt ||
5128 vm == &dev_priv->mm.aliasing_ppgtt->base)
5129 vm = &dev_priv->gtt.base;
5131 BUG_ON(list_empty(&o->vma_list));
5133 list_for_each_entry(vma, &o->vma_list, vma_link)
5135 return vma->node.size;
5140 static unsigned long
5141 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5143 struct drm_i915_private *dev_priv =
5144 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5145 struct drm_device *dev = dev_priv->dev;
5146 unsigned long freed;
5149 if (!i915_gem_shrinker_lock(dev, &unlock))
5152 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5153 if (freed < sc->nr_to_scan)
5154 freed += __i915_gem_shrink(dev_priv,
5155 sc->nr_to_scan - freed,
5157 if (freed < sc->nr_to_scan)
5158 freed += i915_gem_shrink_all(dev_priv);
5161 mutex_unlock(&dev->struct_mutex);
5166 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5168 struct i915_vma *vma;
5170 /* This WARN has probably outlived its usefulness (callers already
5171 * WARN if they don't find the GGTT vma they expect). When removing,
5172 * remember to remove the pre-check in is_pin_display() as well */
5173 if (WARN_ON(list_empty(&obj->vma_list)))
5176 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5177 if (vma->vm != obj_to_ggtt(obj))