drm/i915: Flush request queue when waiting for ring space
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42                                                    bool force);
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45                                bool readonly);
46 static void
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50                                 struct drm_i915_gem_object *obj,
51                                 struct drm_i915_gem_pwrite *args,
52                                 struct drm_file *file);
53
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55                                  struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57                                          struct drm_i915_fence_reg *fence,
58                                          bool enable);
59
60 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61                                              struct shrink_control *sc);
62 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63                                             struct shrink_control *sc);
64 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
67
68 static bool cpu_cache_is_coherent(struct drm_device *dev,
69                                   enum i915_cache_level level)
70 {
71         return HAS_LLC(dev) || level != I915_CACHE_NONE;
72 }
73
74 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
75 {
76         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
77                 return true;
78
79         return obj->pin_display;
80 }
81
82 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
83 {
84         if (obj->tiling_mode)
85                 i915_gem_release_mmap(obj);
86
87         /* As we do not have an associated fence register, we will force
88          * a tiling change if we ever need to acquire one.
89          */
90         obj->fence_dirty = false;
91         obj->fence_reg = I915_FENCE_REG_NONE;
92 }
93
94 /* some bookkeeping */
95 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
96                                   size_t size)
97 {
98         spin_lock(&dev_priv->mm.object_stat_lock);
99         dev_priv->mm.object_count++;
100         dev_priv->mm.object_memory += size;
101         spin_unlock(&dev_priv->mm.object_stat_lock);
102 }
103
104 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
105                                      size_t size)
106 {
107         spin_lock(&dev_priv->mm.object_stat_lock);
108         dev_priv->mm.object_count--;
109         dev_priv->mm.object_memory -= size;
110         spin_unlock(&dev_priv->mm.object_stat_lock);
111 }
112
113 static int
114 i915_gem_wait_for_error(struct i915_gpu_error *error)
115 {
116         int ret;
117
118 #define EXIT_COND (!i915_reset_in_progress(error) || \
119                    i915_terminally_wedged(error))
120         if (EXIT_COND)
121                 return 0;
122
123         /*
124          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
125          * userspace. If it takes that long something really bad is going on and
126          * we should simply try to bail out and fail as gracefully as possible.
127          */
128         ret = wait_event_interruptible_timeout(error->reset_queue,
129                                                EXIT_COND,
130                                                10*HZ);
131         if (ret == 0) {
132                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
133                 return -EIO;
134         } else if (ret < 0) {
135                 return ret;
136         }
137 #undef EXIT_COND
138
139         return 0;
140 }
141
142 int i915_mutex_lock_interruptible(struct drm_device *dev)
143 {
144         struct drm_i915_private *dev_priv = dev->dev_private;
145         int ret;
146
147         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
148         if (ret)
149                 return ret;
150
151         ret = mutex_lock_interruptible(&dev->struct_mutex);
152         if (ret)
153                 return ret;
154
155         WARN_ON(i915_verify_lists(dev));
156         return 0;
157 }
158
159 static inline bool
160 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
161 {
162         return i915_gem_obj_bound_any(obj) && !obj->active;
163 }
164
165 int
166 i915_gem_init_ioctl(struct drm_device *dev, void *data,
167                     struct drm_file *file)
168 {
169         struct drm_i915_private *dev_priv = dev->dev_private;
170         struct drm_i915_gem_init *args = data;
171
172         if (drm_core_check_feature(dev, DRIVER_MODESET))
173                 return -ENODEV;
174
175         if (args->gtt_start >= args->gtt_end ||
176             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
177                 return -EINVAL;
178
179         /* GEM with user mode setting was never supported on ilk and later. */
180         if (INTEL_INFO(dev)->gen >= 5)
181                 return -ENODEV;
182
183         mutex_lock(&dev->struct_mutex);
184         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
185                                   args->gtt_end);
186         dev_priv->gtt.mappable_end = args->gtt_end;
187         mutex_unlock(&dev->struct_mutex);
188
189         return 0;
190 }
191
192 int
193 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
194                             struct drm_file *file)
195 {
196         struct drm_i915_private *dev_priv = dev->dev_private;
197         struct drm_i915_gem_get_aperture *args = data;
198         struct drm_i915_gem_object *obj;
199         size_t pinned;
200
201         pinned = 0;
202         mutex_lock(&dev->struct_mutex);
203         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
204                 if (i915_gem_obj_is_pinned(obj))
205                         pinned += i915_gem_obj_ggtt_size(obj);
206         mutex_unlock(&dev->struct_mutex);
207
208         args->aper_size = dev_priv->gtt.base.total;
209         args->aper_available_size = args->aper_size - pinned;
210
211         return 0;
212 }
213
214 void *i915_gem_object_alloc(struct drm_device *dev)
215 {
216         struct drm_i915_private *dev_priv = dev->dev_private;
217         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
218 }
219
220 void i915_gem_object_free(struct drm_i915_gem_object *obj)
221 {
222         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
223         kmem_cache_free(dev_priv->slab, obj);
224 }
225
226 static int
227 i915_gem_create(struct drm_file *file,
228                 struct drm_device *dev,
229                 uint64_t size,
230                 uint32_t *handle_p)
231 {
232         struct drm_i915_gem_object *obj;
233         int ret;
234         u32 handle;
235
236         size = roundup(size, PAGE_SIZE);
237         if (size == 0)
238                 return -EINVAL;
239
240         /* Allocate the new object */
241         obj = i915_gem_alloc_object(dev, size);
242         if (obj == NULL)
243                 return -ENOMEM;
244
245         ret = drm_gem_handle_create(file, &obj->base, &handle);
246         /* drop reference from allocate - handle holds it now */
247         drm_gem_object_unreference_unlocked(&obj->base);
248         if (ret)
249                 return ret;
250
251         *handle_p = handle;
252         return 0;
253 }
254
255 int
256 i915_gem_dumb_create(struct drm_file *file,
257                      struct drm_device *dev,
258                      struct drm_mode_create_dumb *args)
259 {
260         /* have to work out size/pitch and return them */
261         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
262         args->size = args->pitch * args->height;
263         return i915_gem_create(file, dev,
264                                args->size, &args->handle);
265 }
266
267 /**
268  * Creates a new mm object and returns a handle to it.
269  */
270 int
271 i915_gem_create_ioctl(struct drm_device *dev, void *data,
272                       struct drm_file *file)
273 {
274         struct drm_i915_gem_create *args = data;
275
276         return i915_gem_create(file, dev,
277                                args->size, &args->handle);
278 }
279
280 static inline int
281 __copy_to_user_swizzled(char __user *cpu_vaddr,
282                         const char *gpu_vaddr, int gpu_offset,
283                         int length)
284 {
285         int ret, cpu_offset = 0;
286
287         while (length > 0) {
288                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
289                 int this_length = min(cacheline_end - gpu_offset, length);
290                 int swizzled_gpu_offset = gpu_offset ^ 64;
291
292                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
293                                      gpu_vaddr + swizzled_gpu_offset,
294                                      this_length);
295                 if (ret)
296                         return ret + length;
297
298                 cpu_offset += this_length;
299                 gpu_offset += this_length;
300                 length -= this_length;
301         }
302
303         return 0;
304 }
305
306 static inline int
307 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
308                           const char __user *cpu_vaddr,
309                           int length)
310 {
311         int ret, cpu_offset = 0;
312
313         while (length > 0) {
314                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
315                 int this_length = min(cacheline_end - gpu_offset, length);
316                 int swizzled_gpu_offset = gpu_offset ^ 64;
317
318                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
319                                        cpu_vaddr + cpu_offset,
320                                        this_length);
321                 if (ret)
322                         return ret + length;
323
324                 cpu_offset += this_length;
325                 gpu_offset += this_length;
326                 length -= this_length;
327         }
328
329         return 0;
330 }
331
332 /*
333  * Pins the specified object's pages and synchronizes the object with
334  * GPU accesses. Sets needs_clflush to non-zero if the caller should
335  * flush the object from the CPU cache.
336  */
337 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
338                                     int *needs_clflush)
339 {
340         int ret;
341
342         *needs_clflush = 0;
343
344         if (!obj->base.filp)
345                 return -EINVAL;
346
347         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
348                 /* If we're not in the cpu read domain, set ourself into the gtt
349                  * read domain and manually flush cachelines (if required). This
350                  * optimizes for the case when the gpu will dirty the data
351                  * anyway again before the next pread happens. */
352                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
353                                                         obj->cache_level);
354                 ret = i915_gem_object_wait_rendering(obj, true);
355                 if (ret)
356                         return ret;
357
358                 i915_gem_object_retire(obj);
359         }
360
361         ret = i915_gem_object_get_pages(obj);
362         if (ret)
363                 return ret;
364
365         i915_gem_object_pin_pages(obj);
366
367         return ret;
368 }
369
370 /* Per-page copy function for the shmem pread fastpath.
371  * Flushes invalid cachelines before reading the target if
372  * needs_clflush is set. */
373 static int
374 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
375                  char __user *user_data,
376                  bool page_do_bit17_swizzling, bool needs_clflush)
377 {
378         char *vaddr;
379         int ret;
380
381         if (unlikely(page_do_bit17_swizzling))
382                 return -EINVAL;
383
384         vaddr = kmap_atomic(page);
385         if (needs_clflush)
386                 drm_clflush_virt_range(vaddr + shmem_page_offset,
387                                        page_length);
388         ret = __copy_to_user_inatomic(user_data,
389                                       vaddr + shmem_page_offset,
390                                       page_length);
391         kunmap_atomic(vaddr);
392
393         return ret ? -EFAULT : 0;
394 }
395
396 static void
397 shmem_clflush_swizzled_range(char *addr, unsigned long length,
398                              bool swizzled)
399 {
400         if (unlikely(swizzled)) {
401                 unsigned long start = (unsigned long) addr;
402                 unsigned long end = (unsigned long) addr + length;
403
404                 /* For swizzling simply ensure that we always flush both
405                  * channels. Lame, but simple and it works. Swizzled
406                  * pwrite/pread is far from a hotpath - current userspace
407                  * doesn't use it at all. */
408                 start = round_down(start, 128);
409                 end = round_up(end, 128);
410
411                 drm_clflush_virt_range((void *)start, end - start);
412         } else {
413                 drm_clflush_virt_range(addr, length);
414         }
415
416 }
417
418 /* Only difference to the fast-path function is that this can handle bit17
419  * and uses non-atomic copy and kmap functions. */
420 static int
421 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
422                  char __user *user_data,
423                  bool page_do_bit17_swizzling, bool needs_clflush)
424 {
425         char *vaddr;
426         int ret;
427
428         vaddr = kmap(page);
429         if (needs_clflush)
430                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
431                                              page_length,
432                                              page_do_bit17_swizzling);
433
434         if (page_do_bit17_swizzling)
435                 ret = __copy_to_user_swizzled(user_data,
436                                               vaddr, shmem_page_offset,
437                                               page_length);
438         else
439                 ret = __copy_to_user(user_data,
440                                      vaddr + shmem_page_offset,
441                                      page_length);
442         kunmap(page);
443
444         return ret ? - EFAULT : 0;
445 }
446
447 static int
448 i915_gem_shmem_pread(struct drm_device *dev,
449                      struct drm_i915_gem_object *obj,
450                      struct drm_i915_gem_pread *args,
451                      struct drm_file *file)
452 {
453         char __user *user_data;
454         ssize_t remain;
455         loff_t offset;
456         int shmem_page_offset, page_length, ret = 0;
457         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
458         int prefaulted = 0;
459         int needs_clflush = 0;
460         struct sg_page_iter sg_iter;
461
462         user_data = to_user_ptr(args->data_ptr);
463         remain = args->size;
464
465         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
466
467         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
468         if (ret)
469                 return ret;
470
471         offset = args->offset;
472
473         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
474                          offset >> PAGE_SHIFT) {
475                 struct page *page = sg_page_iter_page(&sg_iter);
476
477                 if (remain <= 0)
478                         break;
479
480                 /* Operation in this page
481                  *
482                  * shmem_page_offset = offset within page in shmem file
483                  * page_length = bytes to copy for this page
484                  */
485                 shmem_page_offset = offset_in_page(offset);
486                 page_length = remain;
487                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
488                         page_length = PAGE_SIZE - shmem_page_offset;
489
490                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
491                         (page_to_phys(page) & (1 << 17)) != 0;
492
493                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
494                                        user_data, page_do_bit17_swizzling,
495                                        needs_clflush);
496                 if (ret == 0)
497                         goto next_page;
498
499                 mutex_unlock(&dev->struct_mutex);
500
501                 if (likely(!i915.prefault_disable) && !prefaulted) {
502                         ret = fault_in_multipages_writeable(user_data, remain);
503                         /* Userspace is tricking us, but we've already clobbered
504                          * its pages with the prefault and promised to write the
505                          * data up to the first fault. Hence ignore any errors
506                          * and just continue. */
507                         (void)ret;
508                         prefaulted = 1;
509                 }
510
511                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
512                                        user_data, page_do_bit17_swizzling,
513                                        needs_clflush);
514
515                 mutex_lock(&dev->struct_mutex);
516
517                 if (ret)
518                         goto out;
519
520 next_page:
521                 remain -= page_length;
522                 user_data += page_length;
523                 offset += page_length;
524         }
525
526 out:
527         i915_gem_object_unpin_pages(obj);
528
529         return ret;
530 }
531
532 /**
533  * Reads data from the object referenced by handle.
534  *
535  * On error, the contents of *data are undefined.
536  */
537 int
538 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
539                      struct drm_file *file)
540 {
541         struct drm_i915_gem_pread *args = data;
542         struct drm_i915_gem_object *obj;
543         int ret = 0;
544
545         if (args->size == 0)
546                 return 0;
547
548         if (!access_ok(VERIFY_WRITE,
549                        to_user_ptr(args->data_ptr),
550                        args->size))
551                 return -EFAULT;
552
553         ret = i915_mutex_lock_interruptible(dev);
554         if (ret)
555                 return ret;
556
557         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
558         if (&obj->base == NULL) {
559                 ret = -ENOENT;
560                 goto unlock;
561         }
562
563         /* Bounds check source.  */
564         if (args->offset > obj->base.size ||
565             args->size > obj->base.size - args->offset) {
566                 ret = -EINVAL;
567                 goto out;
568         }
569
570         /* prime objects have no backing filp to GEM pread/pwrite
571          * pages from.
572          */
573         if (!obj->base.filp) {
574                 ret = -EINVAL;
575                 goto out;
576         }
577
578         trace_i915_gem_object_pread(obj, args->offset, args->size);
579
580         ret = i915_gem_shmem_pread(dev, obj, args, file);
581
582 out:
583         drm_gem_object_unreference(&obj->base);
584 unlock:
585         mutex_unlock(&dev->struct_mutex);
586         return ret;
587 }
588
589 /* This is the fast write path which cannot handle
590  * page faults in the source data
591  */
592
593 static inline int
594 fast_user_write(struct io_mapping *mapping,
595                 loff_t page_base, int page_offset,
596                 char __user *user_data,
597                 int length)
598 {
599         void __iomem *vaddr_atomic;
600         void *vaddr;
601         unsigned long unwritten;
602
603         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
604         /* We can use the cpu mem copy function because this is X86. */
605         vaddr = (void __force*)vaddr_atomic + page_offset;
606         unwritten = __copy_from_user_inatomic_nocache(vaddr,
607                                                       user_data, length);
608         io_mapping_unmap_atomic(vaddr_atomic);
609         return unwritten;
610 }
611
612 /**
613  * This is the fast pwrite path, where we copy the data directly from the
614  * user into the GTT, uncached.
615  */
616 static int
617 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
618                          struct drm_i915_gem_object *obj,
619                          struct drm_i915_gem_pwrite *args,
620                          struct drm_file *file)
621 {
622         struct drm_i915_private *dev_priv = dev->dev_private;
623         ssize_t remain;
624         loff_t offset, page_base;
625         char __user *user_data;
626         int page_offset, page_length, ret;
627
628         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
629         if (ret)
630                 goto out;
631
632         ret = i915_gem_object_set_to_gtt_domain(obj, true);
633         if (ret)
634                 goto out_unpin;
635
636         ret = i915_gem_object_put_fence(obj);
637         if (ret)
638                 goto out_unpin;
639
640         user_data = to_user_ptr(args->data_ptr);
641         remain = args->size;
642
643         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
644
645         while (remain > 0) {
646                 /* Operation in this page
647                  *
648                  * page_base = page offset within aperture
649                  * page_offset = offset within page
650                  * page_length = bytes to copy for this page
651                  */
652                 page_base = offset & PAGE_MASK;
653                 page_offset = offset_in_page(offset);
654                 page_length = remain;
655                 if ((page_offset + remain) > PAGE_SIZE)
656                         page_length = PAGE_SIZE - page_offset;
657
658                 /* If we get a fault while copying data, then (presumably) our
659                  * source page isn't available.  Return the error and we'll
660                  * retry in the slow path.
661                  */
662                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
663                                     page_offset, user_data, page_length)) {
664                         ret = -EFAULT;
665                         goto out_unpin;
666                 }
667
668                 remain -= page_length;
669                 user_data += page_length;
670                 offset += page_length;
671         }
672
673 out_unpin:
674         i915_gem_object_ggtt_unpin(obj);
675 out:
676         return ret;
677 }
678
679 /* Per-page copy function for the shmem pwrite fastpath.
680  * Flushes invalid cachelines before writing to the target if
681  * needs_clflush_before is set and flushes out any written cachelines after
682  * writing if needs_clflush is set. */
683 static int
684 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
685                   char __user *user_data,
686                   bool page_do_bit17_swizzling,
687                   bool needs_clflush_before,
688                   bool needs_clflush_after)
689 {
690         char *vaddr;
691         int ret;
692
693         if (unlikely(page_do_bit17_swizzling))
694                 return -EINVAL;
695
696         vaddr = kmap_atomic(page);
697         if (needs_clflush_before)
698                 drm_clflush_virt_range(vaddr + shmem_page_offset,
699                                        page_length);
700         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
701                                         user_data, page_length);
702         if (needs_clflush_after)
703                 drm_clflush_virt_range(vaddr + shmem_page_offset,
704                                        page_length);
705         kunmap_atomic(vaddr);
706
707         return ret ? -EFAULT : 0;
708 }
709
710 /* Only difference to the fast-path function is that this can handle bit17
711  * and uses non-atomic copy and kmap functions. */
712 static int
713 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
714                   char __user *user_data,
715                   bool page_do_bit17_swizzling,
716                   bool needs_clflush_before,
717                   bool needs_clflush_after)
718 {
719         char *vaddr;
720         int ret;
721
722         vaddr = kmap(page);
723         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
724                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
725                                              page_length,
726                                              page_do_bit17_swizzling);
727         if (page_do_bit17_swizzling)
728                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
729                                                 user_data,
730                                                 page_length);
731         else
732                 ret = __copy_from_user(vaddr + shmem_page_offset,
733                                        user_data,
734                                        page_length);
735         if (needs_clflush_after)
736                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
737                                              page_length,
738                                              page_do_bit17_swizzling);
739         kunmap(page);
740
741         return ret ? -EFAULT : 0;
742 }
743
744 static int
745 i915_gem_shmem_pwrite(struct drm_device *dev,
746                       struct drm_i915_gem_object *obj,
747                       struct drm_i915_gem_pwrite *args,
748                       struct drm_file *file)
749 {
750         ssize_t remain;
751         loff_t offset;
752         char __user *user_data;
753         int shmem_page_offset, page_length, ret = 0;
754         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
755         int hit_slowpath = 0;
756         int needs_clflush_after = 0;
757         int needs_clflush_before = 0;
758         struct sg_page_iter sg_iter;
759
760         user_data = to_user_ptr(args->data_ptr);
761         remain = args->size;
762
763         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
764
765         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
766                 /* If we're not in the cpu write domain, set ourself into the gtt
767                  * write domain and manually flush cachelines (if required). This
768                  * optimizes for the case when the gpu will use the data
769                  * right away and we therefore have to clflush anyway. */
770                 needs_clflush_after = cpu_write_needs_clflush(obj);
771                 ret = i915_gem_object_wait_rendering(obj, false);
772                 if (ret)
773                         return ret;
774
775                 i915_gem_object_retire(obj);
776         }
777         /* Same trick applies to invalidate partially written cachelines read
778          * before writing. */
779         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
780                 needs_clflush_before =
781                         !cpu_cache_is_coherent(dev, obj->cache_level);
782
783         ret = i915_gem_object_get_pages(obj);
784         if (ret)
785                 return ret;
786
787         i915_gem_object_pin_pages(obj);
788
789         offset = args->offset;
790         obj->dirty = 1;
791
792         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
793                          offset >> PAGE_SHIFT) {
794                 struct page *page = sg_page_iter_page(&sg_iter);
795                 int partial_cacheline_write;
796
797                 if (remain <= 0)
798                         break;
799
800                 /* Operation in this page
801                  *
802                  * shmem_page_offset = offset within page in shmem file
803                  * page_length = bytes to copy for this page
804                  */
805                 shmem_page_offset = offset_in_page(offset);
806
807                 page_length = remain;
808                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
809                         page_length = PAGE_SIZE - shmem_page_offset;
810
811                 /* If we don't overwrite a cacheline completely we need to be
812                  * careful to have up-to-date data by first clflushing. Don't
813                  * overcomplicate things and flush the entire patch. */
814                 partial_cacheline_write = needs_clflush_before &&
815                         ((shmem_page_offset | page_length)
816                                 & (boot_cpu_data.x86_clflush_size - 1));
817
818                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
819                         (page_to_phys(page) & (1 << 17)) != 0;
820
821                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
822                                         user_data, page_do_bit17_swizzling,
823                                         partial_cacheline_write,
824                                         needs_clflush_after);
825                 if (ret == 0)
826                         goto next_page;
827
828                 hit_slowpath = 1;
829                 mutex_unlock(&dev->struct_mutex);
830                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
831                                         user_data, page_do_bit17_swizzling,
832                                         partial_cacheline_write,
833                                         needs_clflush_after);
834
835                 mutex_lock(&dev->struct_mutex);
836
837                 if (ret)
838                         goto out;
839
840 next_page:
841                 remain -= page_length;
842                 user_data += page_length;
843                 offset += page_length;
844         }
845
846 out:
847         i915_gem_object_unpin_pages(obj);
848
849         if (hit_slowpath) {
850                 /*
851                  * Fixup: Flush cpu caches in case we didn't flush the dirty
852                  * cachelines in-line while writing and the object moved
853                  * out of the cpu write domain while we've dropped the lock.
854                  */
855                 if (!needs_clflush_after &&
856                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
857                         if (i915_gem_clflush_object(obj, obj->pin_display))
858                                 i915_gem_chipset_flush(dev);
859                 }
860         }
861
862         if (needs_clflush_after)
863                 i915_gem_chipset_flush(dev);
864
865         return ret;
866 }
867
868 /**
869  * Writes data to the object referenced by handle.
870  *
871  * On error, the contents of the buffer that were to be modified are undefined.
872  */
873 int
874 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
875                       struct drm_file *file)
876 {
877         struct drm_i915_gem_pwrite *args = data;
878         struct drm_i915_gem_object *obj;
879         int ret;
880
881         if (args->size == 0)
882                 return 0;
883
884         if (!access_ok(VERIFY_READ,
885                        to_user_ptr(args->data_ptr),
886                        args->size))
887                 return -EFAULT;
888
889         if (likely(!i915.prefault_disable)) {
890                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
891                                                    args->size);
892                 if (ret)
893                         return -EFAULT;
894         }
895
896         ret = i915_mutex_lock_interruptible(dev);
897         if (ret)
898                 return ret;
899
900         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
901         if (&obj->base == NULL) {
902                 ret = -ENOENT;
903                 goto unlock;
904         }
905
906         /* Bounds check destination. */
907         if (args->offset > obj->base.size ||
908             args->size > obj->base.size - args->offset) {
909                 ret = -EINVAL;
910                 goto out;
911         }
912
913         /* prime objects have no backing filp to GEM pread/pwrite
914          * pages from.
915          */
916         if (!obj->base.filp) {
917                 ret = -EINVAL;
918                 goto out;
919         }
920
921         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
922
923         ret = -EFAULT;
924         /* We can only do the GTT pwrite on untiled buffers, as otherwise
925          * it would end up going through the fenced access, and we'll get
926          * different detiling behavior between reading and writing.
927          * pread/pwrite currently are reading and writing from the CPU
928          * perspective, requiring manual detiling by the client.
929          */
930         if (obj->phys_obj) {
931                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
932                 goto out;
933         }
934
935         if (obj->tiling_mode == I915_TILING_NONE &&
936             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
937             cpu_write_needs_clflush(obj)) {
938                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
939                 /* Note that the gtt paths might fail with non-page-backed user
940                  * pointers (e.g. gtt mappings when moving data between
941                  * textures). Fallback to the shmem path in that case. */
942         }
943
944         if (ret == -EFAULT || ret == -ENOSPC)
945                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
946
947 out:
948         drm_gem_object_unreference(&obj->base);
949 unlock:
950         mutex_unlock(&dev->struct_mutex);
951         return ret;
952 }
953
954 int
955 i915_gem_check_wedge(struct i915_gpu_error *error,
956                      bool interruptible)
957 {
958         if (i915_reset_in_progress(error)) {
959                 /* Non-interruptible callers can't handle -EAGAIN, hence return
960                  * -EIO unconditionally for these. */
961                 if (!interruptible)
962                         return -EIO;
963
964                 /* Recovery complete, but the reset failed ... */
965                 if (i915_terminally_wedged(error))
966                         return -EIO;
967
968                 return -EAGAIN;
969         }
970
971         return 0;
972 }
973
974 /*
975  * Compare seqno against outstanding lazy request. Emit a request if they are
976  * equal.
977  */
978 static int
979 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
980 {
981         int ret;
982
983         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
984
985         ret = 0;
986         if (seqno == ring->outstanding_lazy_seqno)
987                 ret = i915_add_request(ring, NULL);
988
989         return ret;
990 }
991
992 static void fake_irq(unsigned long data)
993 {
994         wake_up_process((struct task_struct *)data);
995 }
996
997 static bool missed_irq(struct drm_i915_private *dev_priv,
998                        struct intel_ring_buffer *ring)
999 {
1000         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1001 }
1002
1003 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1004 {
1005         if (file_priv == NULL)
1006                 return true;
1007
1008         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1009 }
1010
1011 /**
1012  * __wait_seqno - wait until execution of seqno has finished
1013  * @ring: the ring expected to report seqno
1014  * @seqno: duh!
1015  * @reset_counter: reset sequence associated with the given seqno
1016  * @interruptible: do an interruptible wait (normally yes)
1017  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1018  *
1019  * Note: It is of utmost importance that the passed in seqno and reset_counter
1020  * values have been read by the caller in an smp safe manner. Where read-side
1021  * locks are involved, it is sufficient to read the reset_counter before
1022  * unlocking the lock that protects the seqno. For lockless tricks, the
1023  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1024  * inserted.
1025  *
1026  * Returns 0 if the seqno was found within the alloted time. Else returns the
1027  * errno with remaining time filled in timeout argument.
1028  */
1029 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1030                         unsigned reset_counter,
1031                         bool interruptible,
1032                         struct timespec *timeout,
1033                         struct drm_i915_file_private *file_priv)
1034 {
1035         struct drm_device *dev = ring->dev;
1036         struct drm_i915_private *dev_priv = dev->dev_private;
1037         const bool irq_test_in_progress =
1038                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1039         struct timespec before, now;
1040         DEFINE_WAIT(wait);
1041         unsigned long timeout_expire;
1042         int ret;
1043
1044         WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1045
1046         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1047                 return 0;
1048
1049         timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1050
1051         if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1052                 gen6_rps_boost(dev_priv);
1053                 if (file_priv)
1054                         mod_delayed_work(dev_priv->wq,
1055                                          &file_priv->mm.idle_work,
1056                                          msecs_to_jiffies(100));
1057         }
1058
1059         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1060                 return -ENODEV;
1061
1062         /* Record current time in case interrupted by signal, or wedged */
1063         trace_i915_gem_request_wait_begin(ring, seqno);
1064         getrawmonotonic(&before);
1065         for (;;) {
1066                 struct timer_list timer;
1067
1068                 prepare_to_wait(&ring->irq_queue, &wait,
1069                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1070
1071                 /* We need to check whether any gpu reset happened in between
1072                  * the caller grabbing the seqno and now ... */
1073                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1074                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1075                          * is truely gone. */
1076                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1077                         if (ret == 0)
1078                                 ret = -EAGAIN;
1079                         break;
1080                 }
1081
1082                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1083                         ret = 0;
1084                         break;
1085                 }
1086
1087                 if (interruptible && signal_pending(current)) {
1088                         ret = -ERESTARTSYS;
1089                         break;
1090                 }
1091
1092                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1093                         ret = -ETIME;
1094                         break;
1095                 }
1096
1097                 timer.function = NULL;
1098                 if (timeout || missed_irq(dev_priv, ring)) {
1099                         unsigned long expire;
1100
1101                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1102                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1103                         mod_timer(&timer, expire);
1104                 }
1105
1106                 io_schedule();
1107
1108                 if (timer.function) {
1109                         del_singleshot_timer_sync(&timer);
1110                         destroy_timer_on_stack(&timer);
1111                 }
1112         }
1113         getrawmonotonic(&now);
1114         trace_i915_gem_request_wait_end(ring, seqno);
1115
1116         if (!irq_test_in_progress)
1117                 ring->irq_put(ring);
1118
1119         finish_wait(&ring->irq_queue, &wait);
1120
1121         if (timeout) {
1122                 struct timespec sleep_time = timespec_sub(now, before);
1123                 *timeout = timespec_sub(*timeout, sleep_time);
1124                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1125                         set_normalized_timespec(timeout, 0, 0);
1126         }
1127
1128         return ret;
1129 }
1130
1131 /**
1132  * Waits for a sequence number to be signaled, and cleans up the
1133  * request and object lists appropriately for that event.
1134  */
1135 int
1136 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1137 {
1138         struct drm_device *dev = ring->dev;
1139         struct drm_i915_private *dev_priv = dev->dev_private;
1140         bool interruptible = dev_priv->mm.interruptible;
1141         int ret;
1142
1143         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144         BUG_ON(seqno == 0);
1145
1146         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1147         if (ret)
1148                 return ret;
1149
1150         ret = i915_gem_check_olr(ring, seqno);
1151         if (ret)
1152                 return ret;
1153
1154         return __wait_seqno(ring, seqno,
1155                             atomic_read(&dev_priv->gpu_error.reset_counter),
1156                             interruptible, NULL, NULL);
1157 }
1158
1159 static int
1160 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1161                                      struct intel_ring_buffer *ring)
1162 {
1163         if (!obj->active)
1164                 return 0;
1165
1166         /* Manually manage the write flush as we may have not yet
1167          * retired the buffer.
1168          *
1169          * Note that the last_write_seqno is always the earlier of
1170          * the two (read/write) seqno, so if we haved successfully waited,
1171          * we know we have passed the last write.
1172          */
1173         obj->last_write_seqno = 0;
1174
1175         return 0;
1176 }
1177
1178 /**
1179  * Ensures that all rendering to the object has completed and the object is
1180  * safe to unbind from the GTT or access from the CPU.
1181  */
1182 static __must_check int
1183 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1184                                bool readonly)
1185 {
1186         struct intel_ring_buffer *ring = obj->ring;
1187         u32 seqno;
1188         int ret;
1189
1190         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1191         if (seqno == 0)
1192                 return 0;
1193
1194         ret = i915_wait_seqno(ring, seqno);
1195         if (ret)
1196                 return ret;
1197
1198         return i915_gem_object_wait_rendering__tail(obj, ring);
1199 }
1200
1201 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1202  * as the object state may change during this call.
1203  */
1204 static __must_check int
1205 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1206                                             struct drm_i915_file_private *file_priv,
1207                                             bool readonly)
1208 {
1209         struct drm_device *dev = obj->base.dev;
1210         struct drm_i915_private *dev_priv = dev->dev_private;
1211         struct intel_ring_buffer *ring = obj->ring;
1212         unsigned reset_counter;
1213         u32 seqno;
1214         int ret;
1215
1216         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1217         BUG_ON(!dev_priv->mm.interruptible);
1218
1219         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1220         if (seqno == 0)
1221                 return 0;
1222
1223         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1224         if (ret)
1225                 return ret;
1226
1227         ret = i915_gem_check_olr(ring, seqno);
1228         if (ret)
1229                 return ret;
1230
1231         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1232         mutex_unlock(&dev->struct_mutex);
1233         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1234         mutex_lock(&dev->struct_mutex);
1235         if (ret)
1236                 return ret;
1237
1238         return i915_gem_object_wait_rendering__tail(obj, ring);
1239 }
1240
1241 /**
1242  * Called when user space prepares to use an object with the CPU, either
1243  * through the mmap ioctl's mapping or a GTT mapping.
1244  */
1245 int
1246 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1247                           struct drm_file *file)
1248 {
1249         struct drm_i915_gem_set_domain *args = data;
1250         struct drm_i915_gem_object *obj;
1251         uint32_t read_domains = args->read_domains;
1252         uint32_t write_domain = args->write_domain;
1253         int ret;
1254
1255         /* Only handle setting domains to types used by the CPU. */
1256         if (write_domain & I915_GEM_GPU_DOMAINS)
1257                 return -EINVAL;
1258
1259         if (read_domains & I915_GEM_GPU_DOMAINS)
1260                 return -EINVAL;
1261
1262         /* Having something in the write domain implies it's in the read
1263          * domain, and only that read domain.  Enforce that in the request.
1264          */
1265         if (write_domain != 0 && read_domains != write_domain)
1266                 return -EINVAL;
1267
1268         ret = i915_mutex_lock_interruptible(dev);
1269         if (ret)
1270                 return ret;
1271
1272         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1273         if (&obj->base == NULL) {
1274                 ret = -ENOENT;
1275                 goto unlock;
1276         }
1277
1278         /* Try to flush the object off the GPU without holding the lock.
1279          * We will repeat the flush holding the lock in the normal manner
1280          * to catch cases where we are gazumped.
1281          */
1282         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1283                                                           file->driver_priv,
1284                                                           !write_domain);
1285         if (ret)
1286                 goto unref;
1287
1288         if (read_domains & I915_GEM_DOMAIN_GTT) {
1289                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1290
1291                 /* Silently promote "you're not bound, there was nothing to do"
1292                  * to success, since the client was just asking us to
1293                  * make sure everything was done.
1294                  */
1295                 if (ret == -EINVAL)
1296                         ret = 0;
1297         } else {
1298                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1299         }
1300
1301 unref:
1302         drm_gem_object_unreference(&obj->base);
1303 unlock:
1304         mutex_unlock(&dev->struct_mutex);
1305         return ret;
1306 }
1307
1308 /**
1309  * Called when user space has done writes to this buffer
1310  */
1311 int
1312 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1313                          struct drm_file *file)
1314 {
1315         struct drm_i915_gem_sw_finish *args = data;
1316         struct drm_i915_gem_object *obj;
1317         int ret = 0;
1318
1319         ret = i915_mutex_lock_interruptible(dev);
1320         if (ret)
1321                 return ret;
1322
1323         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1324         if (&obj->base == NULL) {
1325                 ret = -ENOENT;
1326                 goto unlock;
1327         }
1328
1329         /* Pinned buffers may be scanout, so flush the cache */
1330         if (obj->pin_display)
1331                 i915_gem_object_flush_cpu_write_domain(obj, true);
1332
1333         drm_gem_object_unreference(&obj->base);
1334 unlock:
1335         mutex_unlock(&dev->struct_mutex);
1336         return ret;
1337 }
1338
1339 /**
1340  * Maps the contents of an object, returning the address it is mapped
1341  * into.
1342  *
1343  * While the mapping holds a reference on the contents of the object, it doesn't
1344  * imply a ref on the object itself.
1345  */
1346 int
1347 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1348                     struct drm_file *file)
1349 {
1350         struct drm_i915_gem_mmap *args = data;
1351         struct drm_gem_object *obj;
1352         unsigned long addr;
1353
1354         obj = drm_gem_object_lookup(dev, file, args->handle);
1355         if (obj == NULL)
1356                 return -ENOENT;
1357
1358         /* prime objects have no backing filp to GEM mmap
1359          * pages from.
1360          */
1361         if (!obj->filp) {
1362                 drm_gem_object_unreference_unlocked(obj);
1363                 return -EINVAL;
1364         }
1365
1366         addr = vm_mmap(obj->filp, 0, args->size,
1367                        PROT_READ | PROT_WRITE, MAP_SHARED,
1368                        args->offset);
1369         drm_gem_object_unreference_unlocked(obj);
1370         if (IS_ERR((void *)addr))
1371                 return addr;
1372
1373         args->addr_ptr = (uint64_t) addr;
1374
1375         return 0;
1376 }
1377
1378 /**
1379  * i915_gem_fault - fault a page into the GTT
1380  * vma: VMA in question
1381  * vmf: fault info
1382  *
1383  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1384  * from userspace.  The fault handler takes care of binding the object to
1385  * the GTT (if needed), allocating and programming a fence register (again,
1386  * only if needed based on whether the old reg is still valid or the object
1387  * is tiled) and inserting a new PTE into the faulting process.
1388  *
1389  * Note that the faulting process may involve evicting existing objects
1390  * from the GTT and/or fence registers to make room.  So performance may
1391  * suffer if the GTT working set is large or there are few fence registers
1392  * left.
1393  */
1394 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1395 {
1396         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1397         struct drm_device *dev = obj->base.dev;
1398         struct drm_i915_private *dev_priv = dev->dev_private;
1399         pgoff_t page_offset;
1400         unsigned long pfn;
1401         int ret = 0;
1402         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1403
1404         intel_runtime_pm_get(dev_priv);
1405
1406         /* We don't use vmf->pgoff since that has the fake offset */
1407         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1408                 PAGE_SHIFT;
1409
1410         ret = i915_mutex_lock_interruptible(dev);
1411         if (ret)
1412                 goto out;
1413
1414         trace_i915_gem_object_fault(obj, page_offset, true, write);
1415
1416         /* Try to flush the object off the GPU first without holding the lock.
1417          * Upon reacquiring the lock, we will perform our sanity checks and then
1418          * repeat the flush holding the lock in the normal manner to catch cases
1419          * where we are gazumped.
1420          */
1421         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1422         if (ret)
1423                 goto unlock;
1424
1425         /* Access to snoopable pages through the GTT is incoherent. */
1426         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1427                 ret = -EINVAL;
1428                 goto unlock;
1429         }
1430
1431         /* Now bind it into the GTT if needed */
1432         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1433         if (ret)
1434                 goto unlock;
1435
1436         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1437         if (ret)
1438                 goto unpin;
1439
1440         ret = i915_gem_object_get_fence(obj);
1441         if (ret)
1442                 goto unpin;
1443
1444         obj->fault_mappable = true;
1445
1446         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1447         pfn >>= PAGE_SHIFT;
1448         pfn += page_offset;
1449
1450         /* Finally, remap it using the new GTT offset */
1451         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1452 unpin:
1453         i915_gem_object_ggtt_unpin(obj);
1454 unlock:
1455         mutex_unlock(&dev->struct_mutex);
1456 out:
1457         switch (ret) {
1458         case -EIO:
1459                 /* If this -EIO is due to a gpu hang, give the reset code a
1460                  * chance to clean up the mess. Otherwise return the proper
1461                  * SIGBUS. */
1462                 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1463                         ret = VM_FAULT_SIGBUS;
1464                         break;
1465                 }
1466         case -EAGAIN:
1467                 /*
1468                  * EAGAIN means the gpu is hung and we'll wait for the error
1469                  * handler to reset everything when re-faulting in
1470                  * i915_mutex_lock_interruptible.
1471                  */
1472         case 0:
1473         case -ERESTARTSYS:
1474         case -EINTR:
1475         case -EBUSY:
1476                 /*
1477                  * EBUSY is ok: this just means that another thread
1478                  * already did the job.
1479                  */
1480                 ret = VM_FAULT_NOPAGE;
1481                 break;
1482         case -ENOMEM:
1483                 ret = VM_FAULT_OOM;
1484                 break;
1485         case -ENOSPC:
1486         case -EFAULT:
1487                 ret = VM_FAULT_SIGBUS;
1488                 break;
1489         default:
1490                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1491                 ret = VM_FAULT_SIGBUS;
1492                 break;
1493         }
1494
1495         intel_runtime_pm_put(dev_priv);
1496         return ret;
1497 }
1498
1499 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1500 {
1501         struct i915_vma *vma;
1502
1503         /*
1504          * Only the global gtt is relevant for gtt memory mappings, so restrict
1505          * list traversal to objects bound into the global address space. Note
1506          * that the active list should be empty, but better safe than sorry.
1507          */
1508         WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1509         list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1510                 i915_gem_release_mmap(vma->obj);
1511         list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1512                 i915_gem_release_mmap(vma->obj);
1513 }
1514
1515 /**
1516  * i915_gem_release_mmap - remove physical page mappings
1517  * @obj: obj in question
1518  *
1519  * Preserve the reservation of the mmapping with the DRM core code, but
1520  * relinquish ownership of the pages back to the system.
1521  *
1522  * It is vital that we remove the page mapping if we have mapped a tiled
1523  * object through the GTT and then lose the fence register due to
1524  * resource pressure. Similarly if the object has been moved out of the
1525  * aperture, than pages mapped into userspace must be revoked. Removing the
1526  * mapping will then trigger a page fault on the next user access, allowing
1527  * fixup by i915_gem_fault().
1528  */
1529 void
1530 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1531 {
1532         if (!obj->fault_mappable)
1533                 return;
1534
1535         drm_vma_node_unmap(&obj->base.vma_node,
1536                            obj->base.dev->anon_inode->i_mapping);
1537         obj->fault_mappable = false;
1538 }
1539
1540 uint32_t
1541 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1542 {
1543         uint32_t gtt_size;
1544
1545         if (INTEL_INFO(dev)->gen >= 4 ||
1546             tiling_mode == I915_TILING_NONE)
1547                 return size;
1548
1549         /* Previous chips need a power-of-two fence region when tiling */
1550         if (INTEL_INFO(dev)->gen == 3)
1551                 gtt_size = 1024*1024;
1552         else
1553                 gtt_size = 512*1024;
1554
1555         while (gtt_size < size)
1556                 gtt_size <<= 1;
1557
1558         return gtt_size;
1559 }
1560
1561 /**
1562  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1563  * @obj: object to check
1564  *
1565  * Return the required GTT alignment for an object, taking into account
1566  * potential fence register mapping.
1567  */
1568 uint32_t
1569 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1570                            int tiling_mode, bool fenced)
1571 {
1572         /*
1573          * Minimum alignment is 4k (GTT page size), but might be greater
1574          * if a fence register is needed for the object.
1575          */
1576         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1577             tiling_mode == I915_TILING_NONE)
1578                 return 4096;
1579
1580         /*
1581          * Previous chips need to be aligned to the size of the smallest
1582          * fence register that can contain the object.
1583          */
1584         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1585 }
1586
1587 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1588 {
1589         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1590         int ret;
1591
1592         if (drm_vma_node_has_offset(&obj->base.vma_node))
1593                 return 0;
1594
1595         dev_priv->mm.shrinker_no_lock_stealing = true;
1596
1597         ret = drm_gem_create_mmap_offset(&obj->base);
1598         if (ret != -ENOSPC)
1599                 goto out;
1600
1601         /* Badly fragmented mmap space? The only way we can recover
1602          * space is by destroying unwanted objects. We can't randomly release
1603          * mmap_offsets as userspace expects them to be persistent for the
1604          * lifetime of the objects. The closest we can is to release the
1605          * offsets on purgeable objects by truncating it and marking it purged,
1606          * which prevents userspace from ever using that object again.
1607          */
1608         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1609         ret = drm_gem_create_mmap_offset(&obj->base);
1610         if (ret != -ENOSPC)
1611                 goto out;
1612
1613         i915_gem_shrink_all(dev_priv);
1614         ret = drm_gem_create_mmap_offset(&obj->base);
1615 out:
1616         dev_priv->mm.shrinker_no_lock_stealing = false;
1617
1618         return ret;
1619 }
1620
1621 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1622 {
1623         drm_gem_free_mmap_offset(&obj->base);
1624 }
1625
1626 int
1627 i915_gem_mmap_gtt(struct drm_file *file,
1628                   struct drm_device *dev,
1629                   uint32_t handle,
1630                   uint64_t *offset)
1631 {
1632         struct drm_i915_private *dev_priv = dev->dev_private;
1633         struct drm_i915_gem_object *obj;
1634         int ret;
1635
1636         ret = i915_mutex_lock_interruptible(dev);
1637         if (ret)
1638                 return ret;
1639
1640         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1641         if (&obj->base == NULL) {
1642                 ret = -ENOENT;
1643                 goto unlock;
1644         }
1645
1646         if (obj->base.size > dev_priv->gtt.mappable_end) {
1647                 ret = -E2BIG;
1648                 goto out;
1649         }
1650
1651         if (obj->madv != I915_MADV_WILLNEED) {
1652                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1653                 ret = -EFAULT;
1654                 goto out;
1655         }
1656
1657         ret = i915_gem_object_create_mmap_offset(obj);
1658         if (ret)
1659                 goto out;
1660
1661         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1662
1663 out:
1664         drm_gem_object_unreference(&obj->base);
1665 unlock:
1666         mutex_unlock(&dev->struct_mutex);
1667         return ret;
1668 }
1669
1670 /**
1671  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1672  * @dev: DRM device
1673  * @data: GTT mapping ioctl data
1674  * @file: GEM object info
1675  *
1676  * Simply returns the fake offset to userspace so it can mmap it.
1677  * The mmap call will end up in drm_gem_mmap(), which will set things
1678  * up so we can get faults in the handler above.
1679  *
1680  * The fault handler will take care of binding the object into the GTT
1681  * (since it may have been evicted to make room for something), allocating
1682  * a fence register, and mapping the appropriate aperture address into
1683  * userspace.
1684  */
1685 int
1686 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1687                         struct drm_file *file)
1688 {
1689         struct drm_i915_gem_mmap_gtt *args = data;
1690
1691         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1692 }
1693
1694 /* Immediately discard the backing storage */
1695 static void
1696 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1697 {
1698         struct inode *inode;
1699
1700         i915_gem_object_free_mmap_offset(obj);
1701
1702         if (obj->base.filp == NULL)
1703                 return;
1704
1705         /* Our goal here is to return as much of the memory as
1706          * is possible back to the system as we are called from OOM.
1707          * To do this we must instruct the shmfs to drop all of its
1708          * backing pages, *now*.
1709          */
1710         inode = file_inode(obj->base.filp);
1711         shmem_truncate_range(inode, 0, (loff_t)-1);
1712
1713         obj->madv = __I915_MADV_PURGED;
1714 }
1715
1716 static inline int
1717 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1718 {
1719         return obj->madv == I915_MADV_DONTNEED;
1720 }
1721
1722 static void
1723 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1724 {
1725         struct sg_page_iter sg_iter;
1726         int ret;
1727
1728         BUG_ON(obj->madv == __I915_MADV_PURGED);
1729
1730         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1731         if (ret) {
1732                 /* In the event of a disaster, abandon all caches and
1733                  * hope for the best.
1734                  */
1735                 WARN_ON(ret != -EIO);
1736                 i915_gem_clflush_object(obj, true);
1737                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1738         }
1739
1740         if (i915_gem_object_needs_bit17_swizzle(obj))
1741                 i915_gem_object_save_bit_17_swizzle(obj);
1742
1743         if (obj->madv == I915_MADV_DONTNEED)
1744                 obj->dirty = 0;
1745
1746         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1747                 struct page *page = sg_page_iter_page(&sg_iter);
1748
1749                 if (obj->dirty)
1750                         set_page_dirty(page);
1751
1752                 if (obj->madv == I915_MADV_WILLNEED)
1753                         mark_page_accessed(page);
1754
1755                 page_cache_release(page);
1756         }
1757         obj->dirty = 0;
1758
1759         sg_free_table(obj->pages);
1760         kfree(obj->pages);
1761 }
1762
1763 int
1764 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1765 {
1766         const struct drm_i915_gem_object_ops *ops = obj->ops;
1767
1768         if (obj->pages == NULL)
1769                 return 0;
1770
1771         if (obj->pages_pin_count)
1772                 return -EBUSY;
1773
1774         BUG_ON(i915_gem_obj_bound_any(obj));
1775
1776         /* ->put_pages might need to allocate memory for the bit17 swizzle
1777          * array, hence protect them from being reaped by removing them from gtt
1778          * lists early. */
1779         list_del(&obj->global_list);
1780
1781         ops->put_pages(obj);
1782         obj->pages = NULL;
1783
1784         if (i915_gem_object_is_purgeable(obj))
1785                 i915_gem_object_truncate(obj);
1786
1787         return 0;
1788 }
1789
1790 static unsigned long
1791 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1792                   bool purgeable_only)
1793 {
1794         struct list_head still_in_list;
1795         struct drm_i915_gem_object *obj;
1796         unsigned long count = 0;
1797
1798         /*
1799          * As we may completely rewrite the (un)bound list whilst unbinding
1800          * (due to retiring requests) we have to strictly process only
1801          * one element of the list at the time, and recheck the list
1802          * on every iteration.
1803          *
1804          * In particular, we must hold a reference whilst removing the
1805          * object as we may end up waiting for and/or retiring the objects.
1806          * This might release the final reference (held by the active list)
1807          * and result in the object being freed from under us. This is
1808          * similar to the precautions the eviction code must take whilst
1809          * removing objects.
1810          *
1811          * Also note that although these lists do not hold a reference to
1812          * the object we can safely grab one here: The final object
1813          * unreferencing and the bound_list are both protected by the
1814          * dev->struct_mutex and so we won't ever be able to observe an
1815          * object on the bound_list with a reference count equals 0.
1816          */
1817         INIT_LIST_HEAD(&still_in_list);
1818         while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1819                 obj = list_first_entry(&dev_priv->mm.unbound_list,
1820                                        typeof(*obj), global_list);
1821                 list_move_tail(&obj->global_list, &still_in_list);
1822
1823                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1824                         continue;
1825
1826                 drm_gem_object_reference(&obj->base);
1827
1828                 if (i915_gem_object_put_pages(obj) == 0)
1829                         count += obj->base.size >> PAGE_SHIFT;
1830
1831                 drm_gem_object_unreference(&obj->base);
1832         }
1833         list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1834
1835         INIT_LIST_HEAD(&still_in_list);
1836         while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1837                 struct i915_vma *vma, *v;
1838
1839                 obj = list_first_entry(&dev_priv->mm.bound_list,
1840                                        typeof(*obj), global_list);
1841                 list_move_tail(&obj->global_list, &still_in_list);
1842
1843                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1844                         continue;
1845
1846                 drm_gem_object_reference(&obj->base);
1847
1848                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1849                         if (i915_vma_unbind(vma))
1850                                 break;
1851
1852                 if (i915_gem_object_put_pages(obj) == 0)
1853                         count += obj->base.size >> PAGE_SHIFT;
1854
1855                 drm_gem_object_unreference(&obj->base);
1856         }
1857         list_splice(&still_in_list, &dev_priv->mm.bound_list);
1858
1859         return count;
1860 }
1861
1862 static unsigned long
1863 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1864 {
1865         return __i915_gem_shrink(dev_priv, target, true);
1866 }
1867
1868 static unsigned long
1869 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1870 {
1871         i915_gem_evict_everything(dev_priv->dev);
1872         return __i915_gem_shrink(dev_priv, LONG_MAX, false);
1873 }
1874
1875 static int
1876 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1877 {
1878         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1879         int page_count, i;
1880         struct address_space *mapping;
1881         struct sg_table *st;
1882         struct scatterlist *sg;
1883         struct sg_page_iter sg_iter;
1884         struct page *page;
1885         unsigned long last_pfn = 0;     /* suppress gcc warning */
1886         gfp_t gfp;
1887
1888         /* Assert that the object is not currently in any GPU domain. As it
1889          * wasn't in the GTT, there shouldn't be any way it could have been in
1890          * a GPU cache
1891          */
1892         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1893         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1894
1895         st = kmalloc(sizeof(*st), GFP_KERNEL);
1896         if (st == NULL)
1897                 return -ENOMEM;
1898
1899         page_count = obj->base.size / PAGE_SIZE;
1900         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1901                 kfree(st);
1902                 return -ENOMEM;
1903         }
1904
1905         /* Get the list of pages out of our struct file.  They'll be pinned
1906          * at this point until we release them.
1907          *
1908          * Fail silently without starting the shrinker
1909          */
1910         mapping = file_inode(obj->base.filp)->i_mapping;
1911         gfp = mapping_gfp_mask(mapping);
1912         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1913         gfp &= ~(__GFP_IO | __GFP_WAIT);
1914         sg = st->sgl;
1915         st->nents = 0;
1916         for (i = 0; i < page_count; i++) {
1917                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1918                 if (IS_ERR(page)) {
1919                         i915_gem_purge(dev_priv, page_count);
1920                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1921                 }
1922                 if (IS_ERR(page)) {
1923                         /* We've tried hard to allocate the memory by reaping
1924                          * our own buffer, now let the real VM do its job and
1925                          * go down in flames if truly OOM.
1926                          */
1927                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1928                         gfp |= __GFP_IO | __GFP_WAIT;
1929
1930                         i915_gem_shrink_all(dev_priv);
1931                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1932                         if (IS_ERR(page))
1933                                 goto err_pages;
1934
1935                         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1936                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1937                 }
1938 #ifdef CONFIG_SWIOTLB
1939                 if (swiotlb_nr_tbl()) {
1940                         st->nents++;
1941                         sg_set_page(sg, page, PAGE_SIZE, 0);
1942                         sg = sg_next(sg);
1943                         continue;
1944                 }
1945 #endif
1946                 if (!i || page_to_pfn(page) != last_pfn + 1) {
1947                         if (i)
1948                                 sg = sg_next(sg);
1949                         st->nents++;
1950                         sg_set_page(sg, page, PAGE_SIZE, 0);
1951                 } else {
1952                         sg->length += PAGE_SIZE;
1953                 }
1954                 last_pfn = page_to_pfn(page);
1955
1956                 /* Check that the i965g/gm workaround works. */
1957                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1958         }
1959 #ifdef CONFIG_SWIOTLB
1960         if (!swiotlb_nr_tbl())
1961 #endif
1962                 sg_mark_end(sg);
1963         obj->pages = st;
1964
1965         if (i915_gem_object_needs_bit17_swizzle(obj))
1966                 i915_gem_object_do_bit_17_swizzle(obj);
1967
1968         return 0;
1969
1970 err_pages:
1971         sg_mark_end(sg);
1972         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1973                 page_cache_release(sg_page_iter_page(&sg_iter));
1974         sg_free_table(st);
1975         kfree(st);
1976         return PTR_ERR(page);
1977 }
1978
1979 /* Ensure that the associated pages are gathered from the backing storage
1980  * and pinned into our object. i915_gem_object_get_pages() may be called
1981  * multiple times before they are released by a single call to
1982  * i915_gem_object_put_pages() - once the pages are no longer referenced
1983  * either as a result of memory pressure (reaping pages under the shrinker)
1984  * or as the object is itself released.
1985  */
1986 int
1987 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1988 {
1989         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1990         const struct drm_i915_gem_object_ops *ops = obj->ops;
1991         int ret;
1992
1993         if (obj->pages)
1994                 return 0;
1995
1996         if (obj->madv != I915_MADV_WILLNEED) {
1997                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
1998                 return -EFAULT;
1999         }
2000
2001         BUG_ON(obj->pages_pin_count);
2002
2003         ret = ops->get_pages(obj);
2004         if (ret)
2005                 return ret;
2006
2007         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2008         return 0;
2009 }
2010
2011 static void
2012 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2013                                struct intel_ring_buffer *ring)
2014 {
2015         struct drm_device *dev = obj->base.dev;
2016         struct drm_i915_private *dev_priv = dev->dev_private;
2017         u32 seqno = intel_ring_get_seqno(ring);
2018
2019         BUG_ON(ring == NULL);
2020         if (obj->ring != ring && obj->last_write_seqno) {
2021                 /* Keep the seqno relative to the current ring */
2022                 obj->last_write_seqno = seqno;
2023         }
2024         obj->ring = ring;
2025
2026         /* Add a reference if we're newly entering the active list. */
2027         if (!obj->active) {
2028                 drm_gem_object_reference(&obj->base);
2029                 obj->active = 1;
2030         }
2031
2032         list_move_tail(&obj->ring_list, &ring->active_list);
2033
2034         obj->last_read_seqno = seqno;
2035
2036         if (obj->fenced_gpu_access) {
2037                 obj->last_fenced_seqno = seqno;
2038
2039                 /* Bump MRU to take account of the delayed flush */
2040                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2041                         struct drm_i915_fence_reg *reg;
2042
2043                         reg = &dev_priv->fence_regs[obj->fence_reg];
2044                         list_move_tail(&reg->lru_list,
2045                                        &dev_priv->mm.fence_list);
2046                 }
2047         }
2048 }
2049
2050 void i915_vma_move_to_active(struct i915_vma *vma,
2051                              struct intel_ring_buffer *ring)
2052 {
2053         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2054         return i915_gem_object_move_to_active(vma->obj, ring);
2055 }
2056
2057 static void
2058 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2059 {
2060         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2061         struct i915_address_space *vm;
2062         struct i915_vma *vma;
2063
2064         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2065         BUG_ON(!obj->active);
2066
2067         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2068                 vma = i915_gem_obj_to_vma(obj, vm);
2069                 if (vma && !list_empty(&vma->mm_list))
2070                         list_move_tail(&vma->mm_list, &vm->inactive_list);
2071         }
2072
2073         list_del_init(&obj->ring_list);
2074         obj->ring = NULL;
2075
2076         obj->last_read_seqno = 0;
2077         obj->last_write_seqno = 0;
2078         obj->base.write_domain = 0;
2079
2080         obj->last_fenced_seqno = 0;
2081         obj->fenced_gpu_access = false;
2082
2083         obj->active = 0;
2084         drm_gem_object_unreference(&obj->base);
2085
2086         WARN_ON(i915_verify_lists(dev));
2087 }
2088
2089 static void
2090 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2091 {
2092         struct intel_ring_buffer *ring = obj->ring;
2093
2094         if (ring == NULL)
2095                 return;
2096
2097         if (i915_seqno_passed(ring->get_seqno(ring, true),
2098                               obj->last_read_seqno))
2099                 i915_gem_object_move_to_inactive(obj);
2100 }
2101
2102 static int
2103 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2104 {
2105         struct drm_i915_private *dev_priv = dev->dev_private;
2106         struct intel_ring_buffer *ring;
2107         int ret, i, j;
2108
2109         /* Carefully retire all requests without writing to the rings */
2110         for_each_ring(ring, dev_priv, i) {
2111                 ret = intel_ring_idle(ring);
2112                 if (ret)
2113                         return ret;
2114         }
2115         i915_gem_retire_requests(dev);
2116
2117         /* Finally reset hw state */
2118         for_each_ring(ring, dev_priv, i) {
2119                 intel_ring_init_seqno(ring, seqno);
2120
2121                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2122                         ring->semaphore.sync_seqno[j] = 0;
2123         }
2124
2125         return 0;
2126 }
2127
2128 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2129 {
2130         struct drm_i915_private *dev_priv = dev->dev_private;
2131         int ret;
2132
2133         if (seqno == 0)
2134                 return -EINVAL;
2135
2136         /* HWS page needs to be set less than what we
2137          * will inject to ring
2138          */
2139         ret = i915_gem_init_seqno(dev, seqno - 1);
2140         if (ret)
2141                 return ret;
2142
2143         /* Carefully set the last_seqno value so that wrap
2144          * detection still works
2145          */
2146         dev_priv->next_seqno = seqno;
2147         dev_priv->last_seqno = seqno - 1;
2148         if (dev_priv->last_seqno == 0)
2149                 dev_priv->last_seqno--;
2150
2151         return 0;
2152 }
2153
2154 int
2155 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2156 {
2157         struct drm_i915_private *dev_priv = dev->dev_private;
2158
2159         /* reserve 0 for non-seqno */
2160         if (dev_priv->next_seqno == 0) {
2161                 int ret = i915_gem_init_seqno(dev, 0);
2162                 if (ret)
2163                         return ret;
2164
2165                 dev_priv->next_seqno = 1;
2166         }
2167
2168         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2169         return 0;
2170 }
2171
2172 int __i915_add_request(struct intel_ring_buffer *ring,
2173                        struct drm_file *file,
2174                        struct drm_i915_gem_object *obj,
2175                        u32 *out_seqno)
2176 {
2177         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2178         struct drm_i915_gem_request *request;
2179         u32 request_ring_position, request_start;
2180         int ret;
2181
2182         request_start = intel_ring_get_tail(ring);
2183         /*
2184          * Emit any outstanding flushes - execbuf can fail to emit the flush
2185          * after having emitted the batchbuffer command. Hence we need to fix
2186          * things up similar to emitting the lazy request. The difference here
2187          * is that the flush _must_ happen before the next request, no matter
2188          * what.
2189          */
2190         ret = intel_ring_flush_all_caches(ring);
2191         if (ret)
2192                 return ret;
2193
2194         request = ring->preallocated_lazy_request;
2195         if (WARN_ON(request == NULL))
2196                 return -ENOMEM;
2197
2198         /* Record the position of the start of the request so that
2199          * should we detect the updated seqno part-way through the
2200          * GPU processing the request, we never over-estimate the
2201          * position of the head.
2202          */
2203         request_ring_position = intel_ring_get_tail(ring);
2204
2205         ret = ring->add_request(ring);
2206         if (ret)
2207                 return ret;
2208
2209         request->seqno = intel_ring_get_seqno(ring);
2210         request->ring = ring;
2211         request->head = request_start;
2212         request->tail = request_ring_position;
2213
2214         /* Whilst this request exists, batch_obj will be on the
2215          * active_list, and so will hold the active reference. Only when this
2216          * request is retired will the the batch_obj be moved onto the
2217          * inactive_list and lose its active reference. Hence we do not need
2218          * to explicitly hold another reference here.
2219          */
2220         request->batch_obj = obj;
2221
2222         /* Hold a reference to the current context so that we can inspect
2223          * it later in case a hangcheck error event fires.
2224          */
2225         request->ctx = ring->last_context;
2226         if (request->ctx)
2227                 i915_gem_context_reference(request->ctx);
2228
2229         request->emitted_jiffies = jiffies;
2230         list_add_tail(&request->list, &ring->request_list);
2231         request->file_priv = NULL;
2232
2233         if (file) {
2234                 struct drm_i915_file_private *file_priv = file->driver_priv;
2235
2236                 spin_lock(&file_priv->mm.lock);
2237                 request->file_priv = file_priv;
2238                 list_add_tail(&request->client_list,
2239                               &file_priv->mm.request_list);
2240                 spin_unlock(&file_priv->mm.lock);
2241         }
2242
2243         trace_i915_gem_request_add(ring, request->seqno);
2244         ring->outstanding_lazy_seqno = 0;
2245         ring->preallocated_lazy_request = NULL;
2246
2247         if (!dev_priv->ums.mm_suspended) {
2248                 i915_queue_hangcheck(ring->dev);
2249
2250                 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2251                 queue_delayed_work(dev_priv->wq,
2252                                    &dev_priv->mm.retire_work,
2253                                    round_jiffies_up_relative(HZ));
2254                 intel_mark_busy(dev_priv->dev);
2255         }
2256
2257         if (out_seqno)
2258                 *out_seqno = request->seqno;
2259         return 0;
2260 }
2261
2262 static inline void
2263 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2264 {
2265         struct drm_i915_file_private *file_priv = request->file_priv;
2266
2267         if (!file_priv)
2268                 return;
2269
2270         spin_lock(&file_priv->mm.lock);
2271         list_del(&request->client_list);
2272         request->file_priv = NULL;
2273         spin_unlock(&file_priv->mm.lock);
2274 }
2275
2276 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2277                                    const struct i915_hw_context *ctx)
2278 {
2279         unsigned long elapsed;
2280
2281         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2282
2283         if (ctx->hang_stats.banned)
2284                 return true;
2285
2286         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2287                 if (!i915_gem_context_is_default(ctx)) {
2288                         DRM_DEBUG("context hanging too fast, banning!\n");
2289                         return true;
2290                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2291                         if (i915_stop_ring_allow_warn(dev_priv))
2292                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2293                         return true;
2294                 }
2295         }
2296
2297         return false;
2298 }
2299
2300 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2301                                   struct i915_hw_context *ctx,
2302                                   const bool guilty)
2303 {
2304         struct i915_ctx_hang_stats *hs;
2305
2306         if (WARN_ON(!ctx))
2307                 return;
2308
2309         hs = &ctx->hang_stats;
2310
2311         if (guilty) {
2312                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2313                 hs->batch_active++;
2314                 hs->guilty_ts = get_seconds();
2315         } else {
2316                 hs->batch_pending++;
2317         }
2318 }
2319
2320 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2321 {
2322         list_del(&request->list);
2323         i915_gem_request_remove_from_client(request);
2324
2325         if (request->ctx)
2326                 i915_gem_context_unreference(request->ctx);
2327
2328         kfree(request);
2329 }
2330
2331 struct drm_i915_gem_request *
2332 i915_gem_find_active_request(struct intel_ring_buffer *ring)
2333 {
2334         struct drm_i915_gem_request *request;
2335         u32 completed_seqno;
2336
2337         completed_seqno = ring->get_seqno(ring, false);
2338
2339         list_for_each_entry(request, &ring->request_list, list) {
2340                 if (i915_seqno_passed(completed_seqno, request->seqno))
2341                         continue;
2342
2343                 return request;
2344         }
2345
2346         return NULL;
2347 }
2348
2349 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2350                                        struct intel_ring_buffer *ring)
2351 {
2352         struct drm_i915_gem_request *request;
2353         bool ring_hung;
2354
2355         request = i915_gem_find_active_request(ring);
2356
2357         if (request == NULL)
2358                 return;
2359
2360         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2361
2362         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2363
2364         list_for_each_entry_continue(request, &ring->request_list, list)
2365                 i915_set_reset_status(dev_priv, request->ctx, false);
2366 }
2367
2368 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2369                                         struct intel_ring_buffer *ring)
2370 {
2371         while (!list_empty(&ring->active_list)) {
2372                 struct drm_i915_gem_object *obj;
2373
2374                 obj = list_first_entry(&ring->active_list,
2375                                        struct drm_i915_gem_object,
2376                                        ring_list);
2377
2378                 i915_gem_object_move_to_inactive(obj);
2379         }
2380
2381         /*
2382          * We must free the requests after all the corresponding objects have
2383          * been moved off active lists. Which is the same order as the normal
2384          * retire_requests function does. This is important if object hold
2385          * implicit references on things like e.g. ppgtt address spaces through
2386          * the request.
2387          */
2388         while (!list_empty(&ring->request_list)) {
2389                 struct drm_i915_gem_request *request;
2390
2391                 request = list_first_entry(&ring->request_list,
2392                                            struct drm_i915_gem_request,
2393                                            list);
2394
2395                 i915_gem_free_request(request);
2396         }
2397
2398         /* These may not have been flush before the reset, do so now */
2399         kfree(ring->preallocated_lazy_request);
2400         ring->preallocated_lazy_request = NULL;
2401         ring->outstanding_lazy_seqno = 0;
2402 }
2403
2404 void i915_gem_restore_fences(struct drm_device *dev)
2405 {
2406         struct drm_i915_private *dev_priv = dev->dev_private;
2407         int i;
2408
2409         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2410                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2411
2412                 /*
2413                  * Commit delayed tiling changes if we have an object still
2414                  * attached to the fence, otherwise just clear the fence.
2415                  */
2416                 if (reg->obj) {
2417                         i915_gem_object_update_fence(reg->obj, reg,
2418                                                      reg->obj->tiling_mode);
2419                 } else {
2420                         i915_gem_write_fence(dev, i, NULL);
2421                 }
2422         }
2423 }
2424
2425 void i915_gem_reset(struct drm_device *dev)
2426 {
2427         struct drm_i915_private *dev_priv = dev->dev_private;
2428         struct intel_ring_buffer *ring;
2429         int i;
2430
2431         /*
2432          * Before we free the objects from the requests, we need to inspect
2433          * them for finding the guilty party. As the requests only borrow
2434          * their reference to the objects, the inspection must be done first.
2435          */
2436         for_each_ring(ring, dev_priv, i)
2437                 i915_gem_reset_ring_status(dev_priv, ring);
2438
2439         for_each_ring(ring, dev_priv, i)
2440                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2441
2442         i915_gem_context_reset(dev);
2443
2444         i915_gem_restore_fences(dev);
2445 }
2446
2447 /**
2448  * This function clears the request list as sequence numbers are passed.
2449  */
2450 void
2451 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2452 {
2453         uint32_t seqno;
2454
2455         if (list_empty(&ring->request_list))
2456                 return;
2457
2458         WARN_ON(i915_verify_lists(ring->dev));
2459
2460         seqno = ring->get_seqno(ring, true);
2461
2462         /* Move any buffers on the active list that are no longer referenced
2463          * by the ringbuffer to the flushing/inactive lists as appropriate,
2464          * before we free the context associated with the requests.
2465          */
2466         while (!list_empty(&ring->active_list)) {
2467                 struct drm_i915_gem_object *obj;
2468
2469                 obj = list_first_entry(&ring->active_list,
2470                                       struct drm_i915_gem_object,
2471                                       ring_list);
2472
2473                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2474                         break;
2475
2476                 i915_gem_object_move_to_inactive(obj);
2477         }
2478
2479
2480         while (!list_empty(&ring->request_list)) {
2481                 struct drm_i915_gem_request *request;
2482
2483                 request = list_first_entry(&ring->request_list,
2484                                            struct drm_i915_gem_request,
2485                                            list);
2486
2487                 if (!i915_seqno_passed(seqno, request->seqno))
2488                         break;
2489
2490                 trace_i915_gem_request_retire(ring, request->seqno);
2491                 /* We know the GPU must have read the request to have
2492                  * sent us the seqno + interrupt, so use the position
2493                  * of tail of the request to update the last known position
2494                  * of the GPU head.
2495                  */
2496                 ring->last_retired_head = request->tail;
2497
2498                 i915_gem_free_request(request);
2499         }
2500
2501         if (unlikely(ring->trace_irq_seqno &&
2502                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2503                 ring->irq_put(ring);
2504                 ring->trace_irq_seqno = 0;
2505         }
2506
2507         WARN_ON(i915_verify_lists(ring->dev));
2508 }
2509
2510 bool
2511 i915_gem_retire_requests(struct drm_device *dev)
2512 {
2513         struct drm_i915_private *dev_priv = dev->dev_private;
2514         struct intel_ring_buffer *ring;
2515         bool idle = true;
2516         int i;
2517
2518         for_each_ring(ring, dev_priv, i) {
2519                 i915_gem_retire_requests_ring(ring);
2520                 idle &= list_empty(&ring->request_list);
2521         }
2522
2523         if (idle)
2524                 mod_delayed_work(dev_priv->wq,
2525                                    &dev_priv->mm.idle_work,
2526                                    msecs_to_jiffies(100));
2527
2528         return idle;
2529 }
2530
2531 static void
2532 i915_gem_retire_work_handler(struct work_struct *work)
2533 {
2534         struct drm_i915_private *dev_priv =
2535                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2536         struct drm_device *dev = dev_priv->dev;
2537         bool idle;
2538
2539         /* Come back later if the device is busy... */
2540         idle = false;
2541         if (mutex_trylock(&dev->struct_mutex)) {
2542                 idle = i915_gem_retire_requests(dev);
2543                 mutex_unlock(&dev->struct_mutex);
2544         }
2545         if (!idle)
2546                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2547                                    round_jiffies_up_relative(HZ));
2548 }
2549
2550 static void
2551 i915_gem_idle_work_handler(struct work_struct *work)
2552 {
2553         struct drm_i915_private *dev_priv =
2554                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2555
2556         intel_mark_idle(dev_priv->dev);
2557 }
2558
2559 /**
2560  * Ensures that an object will eventually get non-busy by flushing any required
2561  * write domains, emitting any outstanding lazy request and retiring and
2562  * completed requests.
2563  */
2564 static int
2565 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2566 {
2567         int ret;
2568
2569         if (obj->active) {
2570                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2571                 if (ret)
2572                         return ret;
2573
2574                 i915_gem_retire_requests_ring(obj->ring);
2575         }
2576
2577         return 0;
2578 }
2579
2580 /**
2581  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2582  * @DRM_IOCTL_ARGS: standard ioctl arguments
2583  *
2584  * Returns 0 if successful, else an error is returned with the remaining time in
2585  * the timeout parameter.
2586  *  -ETIME: object is still busy after timeout
2587  *  -ERESTARTSYS: signal interrupted the wait
2588  *  -ENONENT: object doesn't exist
2589  * Also possible, but rare:
2590  *  -EAGAIN: GPU wedged
2591  *  -ENOMEM: damn
2592  *  -ENODEV: Internal IRQ fail
2593  *  -E?: The add request failed
2594  *
2595  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2596  * non-zero timeout parameter the wait ioctl will wait for the given number of
2597  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2598  * without holding struct_mutex the object may become re-busied before this
2599  * function completes. A similar but shorter * race condition exists in the busy
2600  * ioctl
2601  */
2602 int
2603 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2604 {
2605         struct drm_i915_private *dev_priv = dev->dev_private;
2606         struct drm_i915_gem_wait *args = data;
2607         struct drm_i915_gem_object *obj;
2608         struct intel_ring_buffer *ring = NULL;
2609         struct timespec timeout_stack, *timeout = NULL;
2610         unsigned reset_counter;
2611         u32 seqno = 0;
2612         int ret = 0;
2613
2614         if (args->timeout_ns >= 0) {
2615                 timeout_stack = ns_to_timespec(args->timeout_ns);
2616                 timeout = &timeout_stack;
2617         }
2618
2619         ret = i915_mutex_lock_interruptible(dev);
2620         if (ret)
2621                 return ret;
2622
2623         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2624         if (&obj->base == NULL) {
2625                 mutex_unlock(&dev->struct_mutex);
2626                 return -ENOENT;
2627         }
2628
2629         /* Need to make sure the object gets inactive eventually. */
2630         ret = i915_gem_object_flush_active(obj);
2631         if (ret)
2632                 goto out;
2633
2634         if (obj->active) {
2635                 seqno = obj->last_read_seqno;
2636                 ring = obj->ring;
2637         }
2638
2639         if (seqno == 0)
2640                  goto out;
2641
2642         /* Do this after OLR check to make sure we make forward progress polling
2643          * on this IOCTL with a 0 timeout (like busy ioctl)
2644          */
2645         if (!args->timeout_ns) {
2646                 ret = -ETIME;
2647                 goto out;
2648         }
2649
2650         drm_gem_object_unreference(&obj->base);
2651         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2652         mutex_unlock(&dev->struct_mutex);
2653
2654         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2655         if (timeout)
2656                 args->timeout_ns = timespec_to_ns(timeout);
2657         return ret;
2658
2659 out:
2660         drm_gem_object_unreference(&obj->base);
2661         mutex_unlock(&dev->struct_mutex);
2662         return ret;
2663 }
2664
2665 /**
2666  * i915_gem_object_sync - sync an object to a ring.
2667  *
2668  * @obj: object which may be in use on another ring.
2669  * @to: ring we wish to use the object on. May be NULL.
2670  *
2671  * This code is meant to abstract object synchronization with the GPU.
2672  * Calling with NULL implies synchronizing the object with the CPU
2673  * rather than a particular GPU ring.
2674  *
2675  * Returns 0 if successful, else propagates up the lower layer error.
2676  */
2677 int
2678 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2679                      struct intel_ring_buffer *to)
2680 {
2681         struct intel_ring_buffer *from = obj->ring;
2682         u32 seqno;
2683         int ret, idx;
2684
2685         if (from == NULL || to == from)
2686                 return 0;
2687
2688         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2689                 return i915_gem_object_wait_rendering(obj, false);
2690
2691         idx = intel_ring_sync_index(from, to);
2692
2693         seqno = obj->last_read_seqno;
2694         if (seqno <= from->semaphore.sync_seqno[idx])
2695                 return 0;
2696
2697         ret = i915_gem_check_olr(obj->ring, seqno);
2698         if (ret)
2699                 return ret;
2700
2701         trace_i915_gem_ring_sync_to(from, to, seqno);
2702         ret = to->semaphore.sync_to(to, from, seqno);
2703         if (!ret)
2704                 /* We use last_read_seqno because sync_to()
2705                  * might have just caused seqno wrap under
2706                  * the radar.
2707                  */
2708                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2709
2710         return ret;
2711 }
2712
2713 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2714 {
2715         u32 old_write_domain, old_read_domains;
2716
2717         /* Force a pagefault for domain tracking on next user access */
2718         i915_gem_release_mmap(obj);
2719
2720         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2721                 return;
2722
2723         /* Wait for any direct GTT access to complete */
2724         mb();
2725
2726         old_read_domains = obj->base.read_domains;
2727         old_write_domain = obj->base.write_domain;
2728
2729         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2730         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2731
2732         trace_i915_gem_object_change_domain(obj,
2733                                             old_read_domains,
2734                                             old_write_domain);
2735 }
2736
2737 int i915_vma_unbind(struct i915_vma *vma)
2738 {
2739         struct drm_i915_gem_object *obj = vma->obj;
2740         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2741         int ret;
2742
2743         if (list_empty(&vma->vma_link))
2744                 return 0;
2745
2746         if (!drm_mm_node_allocated(&vma->node)) {
2747                 i915_gem_vma_destroy(vma);
2748                 return 0;
2749         }
2750
2751         if (vma->pin_count)
2752                 return -EBUSY;
2753
2754         BUG_ON(obj->pages == NULL);
2755
2756         ret = i915_gem_object_finish_gpu(obj);
2757         if (ret)
2758                 return ret;
2759         /* Continue on if we fail due to EIO, the GPU is hung so we
2760          * should be safe and we need to cleanup or else we might
2761          * cause memory corruption through use-after-free.
2762          */
2763
2764         i915_gem_object_finish_gtt(obj);
2765
2766         /* release the fence reg _after_ flushing */
2767         ret = i915_gem_object_put_fence(obj);
2768         if (ret)
2769                 return ret;
2770
2771         trace_i915_vma_unbind(vma);
2772
2773         vma->unbind_vma(vma);
2774
2775         i915_gem_gtt_finish_object(obj);
2776
2777         list_del_init(&vma->mm_list);
2778         /* Avoid an unnecessary call to unbind on rebind. */
2779         if (i915_is_ggtt(vma->vm))
2780                 obj->map_and_fenceable = true;
2781
2782         drm_mm_remove_node(&vma->node);
2783         i915_gem_vma_destroy(vma);
2784
2785         /* Since the unbound list is global, only move to that list if
2786          * no more VMAs exist. */
2787         if (list_empty(&obj->vma_list))
2788                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2789
2790         /* And finally now the object is completely decoupled from this vma,
2791          * we can drop its hold on the backing storage and allow it to be
2792          * reaped by the shrinker.
2793          */
2794         i915_gem_object_unpin_pages(obj);
2795
2796         return 0;
2797 }
2798
2799 int i915_gpu_idle(struct drm_device *dev)
2800 {
2801         struct drm_i915_private *dev_priv = dev->dev_private;
2802         struct intel_ring_buffer *ring;
2803         int ret, i;
2804
2805         /* Flush everything onto the inactive list. */
2806         for_each_ring(ring, dev_priv, i) {
2807                 ret = i915_switch_context(ring, ring->default_context);
2808                 if (ret)
2809                         return ret;
2810
2811                 ret = intel_ring_idle(ring);
2812                 if (ret)
2813                         return ret;
2814         }
2815
2816         return 0;
2817 }
2818
2819 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2820                                  struct drm_i915_gem_object *obj)
2821 {
2822         struct drm_i915_private *dev_priv = dev->dev_private;
2823         int fence_reg;
2824         int fence_pitch_shift;
2825
2826         if (INTEL_INFO(dev)->gen >= 6) {
2827                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2828                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2829         } else {
2830                 fence_reg = FENCE_REG_965_0;
2831                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2832         }
2833
2834         fence_reg += reg * 8;
2835
2836         /* To w/a incoherency with non-atomic 64-bit register updates,
2837          * we split the 64-bit update into two 32-bit writes. In order
2838          * for a partial fence not to be evaluated between writes, we
2839          * precede the update with write to turn off the fence register,
2840          * and only enable the fence as the last step.
2841          *
2842          * For extra levels of paranoia, we make sure each step lands
2843          * before applying the next step.
2844          */
2845         I915_WRITE(fence_reg, 0);
2846         POSTING_READ(fence_reg);
2847
2848         if (obj) {
2849                 u32 size = i915_gem_obj_ggtt_size(obj);
2850                 uint64_t val;
2851
2852                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2853                                  0xfffff000) << 32;
2854                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2855                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2856                 if (obj->tiling_mode == I915_TILING_Y)
2857                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2858                 val |= I965_FENCE_REG_VALID;
2859
2860                 I915_WRITE(fence_reg + 4, val >> 32);
2861                 POSTING_READ(fence_reg + 4);
2862
2863                 I915_WRITE(fence_reg + 0, val);
2864                 POSTING_READ(fence_reg);
2865         } else {
2866                 I915_WRITE(fence_reg + 4, 0);
2867                 POSTING_READ(fence_reg + 4);
2868         }
2869 }
2870
2871 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2872                                  struct drm_i915_gem_object *obj)
2873 {
2874         struct drm_i915_private *dev_priv = dev->dev_private;
2875         u32 val;
2876
2877         if (obj) {
2878                 u32 size = i915_gem_obj_ggtt_size(obj);
2879                 int pitch_val;
2880                 int tile_width;
2881
2882                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2883                      (size & -size) != size ||
2884                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2885                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2886                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2887
2888                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2889                         tile_width = 128;
2890                 else
2891                         tile_width = 512;
2892
2893                 /* Note: pitch better be a power of two tile widths */
2894                 pitch_val = obj->stride / tile_width;
2895                 pitch_val = ffs(pitch_val) - 1;
2896
2897                 val = i915_gem_obj_ggtt_offset(obj);
2898                 if (obj->tiling_mode == I915_TILING_Y)
2899                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2900                 val |= I915_FENCE_SIZE_BITS(size);
2901                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2902                 val |= I830_FENCE_REG_VALID;
2903         } else
2904                 val = 0;
2905
2906         if (reg < 8)
2907                 reg = FENCE_REG_830_0 + reg * 4;
2908         else
2909                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2910
2911         I915_WRITE(reg, val);
2912         POSTING_READ(reg);
2913 }
2914
2915 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2916                                 struct drm_i915_gem_object *obj)
2917 {
2918         struct drm_i915_private *dev_priv = dev->dev_private;
2919         uint32_t val;
2920
2921         if (obj) {
2922                 u32 size = i915_gem_obj_ggtt_size(obj);
2923                 uint32_t pitch_val;
2924
2925                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2926                      (size & -size) != size ||
2927                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2928                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2929                      i915_gem_obj_ggtt_offset(obj), size);
2930
2931                 pitch_val = obj->stride / 128;
2932                 pitch_val = ffs(pitch_val) - 1;
2933
2934                 val = i915_gem_obj_ggtt_offset(obj);
2935                 if (obj->tiling_mode == I915_TILING_Y)
2936                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2937                 val |= I830_FENCE_SIZE_BITS(size);
2938                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2939                 val |= I830_FENCE_REG_VALID;
2940         } else
2941                 val = 0;
2942
2943         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2944         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2945 }
2946
2947 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2948 {
2949         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2950 }
2951
2952 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2953                                  struct drm_i915_gem_object *obj)
2954 {
2955         struct drm_i915_private *dev_priv = dev->dev_private;
2956
2957         /* Ensure that all CPU reads are completed before installing a fence
2958          * and all writes before removing the fence.
2959          */
2960         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2961                 mb();
2962
2963         WARN(obj && (!obj->stride || !obj->tiling_mode),
2964              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2965              obj->stride, obj->tiling_mode);
2966
2967         switch (INTEL_INFO(dev)->gen) {
2968         case 8:
2969         case 7:
2970         case 6:
2971         case 5:
2972         case 4: i965_write_fence_reg(dev, reg, obj); break;
2973         case 3: i915_write_fence_reg(dev, reg, obj); break;
2974         case 2: i830_write_fence_reg(dev, reg, obj); break;
2975         default: BUG();
2976         }
2977
2978         /* And similarly be paranoid that no direct access to this region
2979          * is reordered to before the fence is installed.
2980          */
2981         if (i915_gem_object_needs_mb(obj))
2982                 mb();
2983 }
2984
2985 static inline int fence_number(struct drm_i915_private *dev_priv,
2986                                struct drm_i915_fence_reg *fence)
2987 {
2988         return fence - dev_priv->fence_regs;
2989 }
2990
2991 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2992                                          struct drm_i915_fence_reg *fence,
2993                                          bool enable)
2994 {
2995         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2996         int reg = fence_number(dev_priv, fence);
2997
2998         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2999
3000         if (enable) {
3001                 obj->fence_reg = reg;
3002                 fence->obj = obj;
3003                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3004         } else {
3005                 obj->fence_reg = I915_FENCE_REG_NONE;
3006                 fence->obj = NULL;
3007                 list_del_init(&fence->lru_list);
3008         }
3009         obj->fence_dirty = false;
3010 }
3011
3012 static int
3013 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3014 {
3015         if (obj->last_fenced_seqno) {
3016                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3017                 if (ret)
3018                         return ret;
3019
3020                 obj->last_fenced_seqno = 0;
3021         }
3022
3023         obj->fenced_gpu_access = false;
3024         return 0;
3025 }
3026
3027 int
3028 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3029 {
3030         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3031         struct drm_i915_fence_reg *fence;
3032         int ret;
3033
3034         ret = i915_gem_object_wait_fence(obj);
3035         if (ret)
3036                 return ret;
3037
3038         if (obj->fence_reg == I915_FENCE_REG_NONE)
3039                 return 0;
3040
3041         fence = &dev_priv->fence_regs[obj->fence_reg];
3042
3043         i915_gem_object_fence_lost(obj);
3044         i915_gem_object_update_fence(obj, fence, false);
3045
3046         return 0;
3047 }
3048
3049 static struct drm_i915_fence_reg *
3050 i915_find_fence_reg(struct drm_device *dev)
3051 {
3052         struct drm_i915_private *dev_priv = dev->dev_private;
3053         struct drm_i915_fence_reg *reg, *avail;
3054         int i;
3055
3056         /* First try to find a free reg */
3057         avail = NULL;
3058         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3059                 reg = &dev_priv->fence_regs[i];
3060                 if (!reg->obj)
3061                         return reg;
3062
3063                 if (!reg->pin_count)
3064                         avail = reg;
3065         }
3066
3067         if (avail == NULL)
3068                 goto deadlock;
3069
3070         /* None available, try to steal one or wait for a user to finish */
3071         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3072                 if (reg->pin_count)
3073                         continue;
3074
3075                 return reg;
3076         }
3077
3078 deadlock:
3079         /* Wait for completion of pending flips which consume fences */
3080         if (intel_has_pending_fb_unpin(dev))
3081                 return ERR_PTR(-EAGAIN);
3082
3083         return ERR_PTR(-EDEADLK);
3084 }
3085
3086 /**
3087  * i915_gem_object_get_fence - set up fencing for an object
3088  * @obj: object to map through a fence reg
3089  *
3090  * When mapping objects through the GTT, userspace wants to be able to write
3091  * to them without having to worry about swizzling if the object is tiled.
3092  * This function walks the fence regs looking for a free one for @obj,
3093  * stealing one if it can't find any.
3094  *
3095  * It then sets up the reg based on the object's properties: address, pitch
3096  * and tiling format.
3097  *
3098  * For an untiled surface, this removes any existing fence.
3099  */
3100 int
3101 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3102 {
3103         struct drm_device *dev = obj->base.dev;
3104         struct drm_i915_private *dev_priv = dev->dev_private;
3105         bool enable = obj->tiling_mode != I915_TILING_NONE;
3106         struct drm_i915_fence_reg *reg;
3107         int ret;
3108
3109         /* Have we updated the tiling parameters upon the object and so
3110          * will need to serialise the write to the associated fence register?
3111          */
3112         if (obj->fence_dirty) {
3113                 ret = i915_gem_object_wait_fence(obj);
3114                 if (ret)
3115                         return ret;
3116         }
3117
3118         /* Just update our place in the LRU if our fence is getting reused. */
3119         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3120                 reg = &dev_priv->fence_regs[obj->fence_reg];
3121                 if (!obj->fence_dirty) {
3122                         list_move_tail(&reg->lru_list,
3123                                        &dev_priv->mm.fence_list);
3124                         return 0;
3125                 }
3126         } else if (enable) {
3127                 reg = i915_find_fence_reg(dev);
3128                 if (IS_ERR(reg))
3129                         return PTR_ERR(reg);
3130
3131                 if (reg->obj) {
3132                         struct drm_i915_gem_object *old = reg->obj;
3133
3134                         ret = i915_gem_object_wait_fence(old);
3135                         if (ret)
3136                                 return ret;
3137
3138                         i915_gem_object_fence_lost(old);
3139                 }
3140         } else
3141                 return 0;
3142
3143         i915_gem_object_update_fence(obj, reg, enable);
3144
3145         return 0;
3146 }
3147
3148 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3149                                      struct drm_mm_node *gtt_space,
3150                                      unsigned long cache_level)
3151 {
3152         struct drm_mm_node *other;
3153
3154         /* On non-LLC machines we have to be careful when putting differing
3155          * types of snoopable memory together to avoid the prefetcher
3156          * crossing memory domains and dying.
3157          */
3158         if (HAS_LLC(dev))
3159                 return true;
3160
3161         if (!drm_mm_node_allocated(gtt_space))
3162                 return true;
3163
3164         if (list_empty(&gtt_space->node_list))
3165                 return true;
3166
3167         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3168         if (other->allocated && !other->hole_follows && other->color != cache_level)
3169                 return false;
3170
3171         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3172         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3173                 return false;
3174
3175         return true;
3176 }
3177
3178 static void i915_gem_verify_gtt(struct drm_device *dev)
3179 {
3180 #if WATCH_GTT
3181         struct drm_i915_private *dev_priv = dev->dev_private;
3182         struct drm_i915_gem_object *obj;
3183         int err = 0;
3184
3185         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3186                 if (obj->gtt_space == NULL) {
3187                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3188                         err++;
3189                         continue;
3190                 }
3191
3192                 if (obj->cache_level != obj->gtt_space->color) {
3193                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3194                                i915_gem_obj_ggtt_offset(obj),
3195                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3196                                obj->cache_level,
3197                                obj->gtt_space->color);
3198                         err++;
3199                         continue;
3200                 }
3201
3202                 if (!i915_gem_valid_gtt_space(dev,
3203                                               obj->gtt_space,
3204                                               obj->cache_level)) {
3205                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3206                                i915_gem_obj_ggtt_offset(obj),
3207                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3208                                obj->cache_level);
3209                         err++;
3210                         continue;
3211                 }
3212         }
3213
3214         WARN_ON(err);
3215 #endif
3216 }
3217
3218 /**
3219  * Finds free space in the GTT aperture and binds the object there.
3220  */
3221 static struct i915_vma *
3222 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3223                            struct i915_address_space *vm,
3224                            unsigned alignment,
3225                            unsigned flags)
3226 {
3227         struct drm_device *dev = obj->base.dev;
3228         struct drm_i915_private *dev_priv = dev->dev_private;
3229         u32 size, fence_size, fence_alignment, unfenced_alignment;
3230         size_t gtt_max =
3231                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3232         struct i915_vma *vma;
3233         int ret;
3234
3235         fence_size = i915_gem_get_gtt_size(dev,
3236                                            obj->base.size,
3237                                            obj->tiling_mode);
3238         fence_alignment = i915_gem_get_gtt_alignment(dev,
3239                                                      obj->base.size,
3240                                                      obj->tiling_mode, true);
3241         unfenced_alignment =
3242                 i915_gem_get_gtt_alignment(dev,
3243                                            obj->base.size,
3244                                            obj->tiling_mode, false);
3245
3246         if (alignment == 0)
3247                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3248                                                 unfenced_alignment;
3249         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3250                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3251                 return ERR_PTR(-EINVAL);
3252         }
3253
3254         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3255
3256         /* If the object is bigger than the entire aperture, reject it early
3257          * before evicting everything in a vain attempt to find space.
3258          */
3259         if (obj->base.size > gtt_max) {
3260                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3261                           obj->base.size,
3262                           flags & PIN_MAPPABLE ? "mappable" : "total",
3263                           gtt_max);
3264                 return ERR_PTR(-E2BIG);
3265         }
3266
3267         ret = i915_gem_object_get_pages(obj);
3268         if (ret)
3269                 return ERR_PTR(ret);
3270
3271         i915_gem_object_pin_pages(obj);
3272
3273         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3274         if (IS_ERR(vma))
3275                 goto err_unpin;
3276
3277 search_free:
3278         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3279                                                   size, alignment,
3280                                                   obj->cache_level, 0, gtt_max,
3281                                                   DRM_MM_SEARCH_DEFAULT,
3282                                                   DRM_MM_CREATE_DEFAULT);
3283         if (ret) {
3284                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3285                                                obj->cache_level, flags);
3286                 if (ret == 0)
3287                         goto search_free;
3288
3289                 goto err_free_vma;
3290         }
3291         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3292                                               obj->cache_level))) {
3293                 ret = -EINVAL;
3294                 goto err_remove_node;
3295         }
3296
3297         ret = i915_gem_gtt_prepare_object(obj);
3298         if (ret)
3299                 goto err_remove_node;
3300
3301         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3302         list_add_tail(&vma->mm_list, &vm->inactive_list);
3303
3304         if (i915_is_ggtt(vm)) {
3305                 bool mappable, fenceable;
3306
3307                 fenceable = (vma->node.size == fence_size &&
3308                              (vma->node.start & (fence_alignment - 1)) == 0);
3309
3310                 mappable = (vma->node.start + obj->base.size <=
3311                             dev_priv->gtt.mappable_end);
3312
3313                 obj->map_and_fenceable = mappable && fenceable;
3314         }
3315
3316         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3317
3318         trace_i915_vma_bind(vma, flags);
3319         vma->bind_vma(vma, obj->cache_level,
3320                       flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3321
3322         i915_gem_verify_gtt(dev);
3323         return vma;
3324
3325 err_remove_node:
3326         drm_mm_remove_node(&vma->node);
3327 err_free_vma:
3328         i915_gem_vma_destroy(vma);
3329         vma = ERR_PTR(ret);
3330 err_unpin:
3331         i915_gem_object_unpin_pages(obj);
3332         return vma;
3333 }
3334
3335 bool
3336 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3337                         bool force)
3338 {
3339         /* If we don't have a page list set up, then we're not pinned
3340          * to GPU, and we can ignore the cache flush because it'll happen
3341          * again at bind time.
3342          */
3343         if (obj->pages == NULL)
3344                 return false;
3345
3346         /*
3347          * Stolen memory is always coherent with the GPU as it is explicitly
3348          * marked as wc by the system, or the system is cache-coherent.
3349          */
3350         if (obj->stolen)
3351                 return false;
3352
3353         /* If the GPU is snooping the contents of the CPU cache,
3354          * we do not need to manually clear the CPU cache lines.  However,
3355          * the caches are only snooped when the render cache is
3356          * flushed/invalidated.  As we always have to emit invalidations
3357          * and flushes when moving into and out of the RENDER domain, correct
3358          * snooping behaviour occurs naturally as the result of our domain
3359          * tracking.
3360          */
3361         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3362                 return false;
3363
3364         trace_i915_gem_object_clflush(obj);
3365         drm_clflush_sg(obj->pages);
3366
3367         return true;
3368 }
3369
3370 /** Flushes the GTT write domain for the object if it's dirty. */
3371 static void
3372 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3373 {
3374         uint32_t old_write_domain;
3375
3376         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3377                 return;
3378
3379         /* No actual flushing is required for the GTT write domain.  Writes
3380          * to it immediately go to main memory as far as we know, so there's
3381          * no chipset flush.  It also doesn't land in render cache.
3382          *
3383          * However, we do have to enforce the order so that all writes through
3384          * the GTT land before any writes to the device, such as updates to
3385          * the GATT itself.
3386          */
3387         wmb();
3388
3389         old_write_domain = obj->base.write_domain;
3390         obj->base.write_domain = 0;
3391
3392         trace_i915_gem_object_change_domain(obj,
3393                                             obj->base.read_domains,
3394                                             old_write_domain);
3395 }
3396
3397 /** Flushes the CPU write domain for the object if it's dirty. */
3398 static void
3399 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3400                                        bool force)
3401 {
3402         uint32_t old_write_domain;
3403
3404         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3405                 return;
3406
3407         if (i915_gem_clflush_object(obj, force))
3408                 i915_gem_chipset_flush(obj->base.dev);
3409
3410         old_write_domain = obj->base.write_domain;
3411         obj->base.write_domain = 0;
3412
3413         trace_i915_gem_object_change_domain(obj,
3414                                             obj->base.read_domains,
3415                                             old_write_domain);
3416 }
3417
3418 /**
3419  * Moves a single object to the GTT read, and possibly write domain.
3420  *
3421  * This function returns when the move is complete, including waiting on
3422  * flushes to occur.
3423  */
3424 int
3425 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3426 {
3427         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3428         uint32_t old_write_domain, old_read_domains;
3429         int ret;
3430
3431         /* Not valid to be called on unbound objects. */
3432         if (!i915_gem_obj_bound_any(obj))
3433                 return -EINVAL;
3434
3435         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3436                 return 0;
3437
3438         ret = i915_gem_object_wait_rendering(obj, !write);
3439         if (ret)
3440                 return ret;
3441
3442         i915_gem_object_retire(obj);
3443         i915_gem_object_flush_cpu_write_domain(obj, false);
3444
3445         /* Serialise direct access to this object with the barriers for
3446          * coherent writes from the GPU, by effectively invalidating the
3447          * GTT domain upon first access.
3448          */
3449         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3450                 mb();
3451
3452         old_write_domain = obj->base.write_domain;
3453         old_read_domains = obj->base.read_domains;
3454
3455         /* It should now be out of any other write domains, and we can update
3456          * the domain values for our changes.
3457          */
3458         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3459         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3460         if (write) {
3461                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3462                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3463                 obj->dirty = 1;
3464         }
3465
3466         trace_i915_gem_object_change_domain(obj,
3467                                             old_read_domains,
3468                                             old_write_domain);
3469
3470         /* And bump the LRU for this access */
3471         if (i915_gem_object_is_inactive(obj)) {
3472                 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3473                 if (vma)
3474                         list_move_tail(&vma->mm_list,
3475                                        &dev_priv->gtt.base.inactive_list);
3476
3477         }
3478
3479         return 0;
3480 }
3481
3482 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3483                                     enum i915_cache_level cache_level)
3484 {
3485         struct drm_device *dev = obj->base.dev;
3486         struct i915_vma *vma, *next;
3487         int ret;
3488
3489         if (obj->cache_level == cache_level)
3490                 return 0;
3491
3492         if (i915_gem_obj_is_pinned(obj)) {
3493                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3494                 return -EBUSY;
3495         }
3496
3497         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3498                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3499                         ret = i915_vma_unbind(vma);
3500                         if (ret)
3501                                 return ret;
3502                 }
3503         }
3504
3505         if (i915_gem_obj_bound_any(obj)) {
3506                 ret = i915_gem_object_finish_gpu(obj);
3507                 if (ret)
3508                         return ret;
3509
3510                 i915_gem_object_finish_gtt(obj);
3511
3512                 /* Before SandyBridge, you could not use tiling or fence
3513                  * registers with snooped memory, so relinquish any fences
3514                  * currently pointing to our region in the aperture.
3515                  */
3516                 if (INTEL_INFO(dev)->gen < 6) {
3517                         ret = i915_gem_object_put_fence(obj);
3518                         if (ret)
3519                                 return ret;
3520                 }
3521
3522                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3523                         if (drm_mm_node_allocated(&vma->node))
3524                                 vma->bind_vma(vma, cache_level,
3525                                               obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3526         }
3527
3528         list_for_each_entry(vma, &obj->vma_list, vma_link)
3529                 vma->node.color = cache_level;
3530         obj->cache_level = cache_level;
3531
3532         if (cpu_write_needs_clflush(obj)) {
3533                 u32 old_read_domains, old_write_domain;
3534
3535                 /* If we're coming from LLC cached, then we haven't
3536                  * actually been tracking whether the data is in the
3537                  * CPU cache or not, since we only allow one bit set
3538                  * in obj->write_domain and have been skipping the clflushes.
3539                  * Just set it to the CPU cache for now.
3540                  */
3541                 i915_gem_object_retire(obj);
3542                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3543
3544                 old_read_domains = obj->base.read_domains;
3545                 old_write_domain = obj->base.write_domain;
3546
3547                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3548                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3549
3550                 trace_i915_gem_object_change_domain(obj,
3551                                                     old_read_domains,
3552                                                     old_write_domain);
3553         }
3554
3555         i915_gem_verify_gtt(dev);
3556         return 0;
3557 }
3558
3559 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3560                                struct drm_file *file)
3561 {
3562         struct drm_i915_gem_caching *args = data;
3563         struct drm_i915_gem_object *obj;
3564         int ret;
3565
3566         ret = i915_mutex_lock_interruptible(dev);
3567         if (ret)
3568                 return ret;
3569
3570         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3571         if (&obj->base == NULL) {
3572                 ret = -ENOENT;
3573                 goto unlock;
3574         }
3575
3576         switch (obj->cache_level) {
3577         case I915_CACHE_LLC:
3578         case I915_CACHE_L3_LLC:
3579                 args->caching = I915_CACHING_CACHED;
3580                 break;
3581
3582         case I915_CACHE_WT:
3583                 args->caching = I915_CACHING_DISPLAY;
3584                 break;
3585
3586         default:
3587                 args->caching = I915_CACHING_NONE;
3588                 break;
3589         }
3590
3591         drm_gem_object_unreference(&obj->base);
3592 unlock:
3593         mutex_unlock(&dev->struct_mutex);
3594         return ret;
3595 }
3596
3597 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3598                                struct drm_file *file)
3599 {
3600         struct drm_i915_gem_caching *args = data;
3601         struct drm_i915_gem_object *obj;
3602         enum i915_cache_level level;
3603         int ret;
3604
3605         switch (args->caching) {
3606         case I915_CACHING_NONE:
3607                 level = I915_CACHE_NONE;
3608                 break;
3609         case I915_CACHING_CACHED:
3610                 level = I915_CACHE_LLC;
3611                 break;
3612         case I915_CACHING_DISPLAY:
3613                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3614                 break;
3615         default:
3616                 return -EINVAL;
3617         }
3618
3619         ret = i915_mutex_lock_interruptible(dev);
3620         if (ret)
3621                 return ret;
3622
3623         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3624         if (&obj->base == NULL) {
3625                 ret = -ENOENT;
3626                 goto unlock;
3627         }
3628
3629         ret = i915_gem_object_set_cache_level(obj, level);
3630
3631         drm_gem_object_unreference(&obj->base);
3632 unlock:
3633         mutex_unlock(&dev->struct_mutex);
3634         return ret;
3635 }
3636
3637 static bool is_pin_display(struct drm_i915_gem_object *obj)
3638 {
3639         /* There are 3 sources that pin objects:
3640          *   1. The display engine (scanouts, sprites, cursors);
3641          *   2. Reservations for execbuffer;
3642          *   3. The user.
3643          *
3644          * We can ignore reservations as we hold the struct_mutex and
3645          * are only called outside of the reservation path.  The user
3646          * can only increment pin_count once, and so if after
3647          * subtracting the potential reference by the user, any pin_count
3648          * remains, it must be due to another use by the display engine.
3649          */
3650         return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3651 }
3652
3653 /*
3654  * Prepare buffer for display plane (scanout, cursors, etc).
3655  * Can be called from an uninterruptible phase (modesetting) and allows
3656  * any flushes to be pipelined (for pageflips).
3657  */
3658 int
3659 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3660                                      u32 alignment,
3661                                      struct intel_ring_buffer *pipelined)
3662 {
3663         u32 old_read_domains, old_write_domain;
3664         int ret;
3665
3666         if (pipelined != obj->ring) {
3667                 ret = i915_gem_object_sync(obj, pipelined);
3668                 if (ret)
3669                         return ret;
3670         }
3671
3672         /* Mark the pin_display early so that we account for the
3673          * display coherency whilst setting up the cache domains.
3674          */
3675         obj->pin_display = true;
3676
3677         /* The display engine is not coherent with the LLC cache on gen6.  As
3678          * a result, we make sure that the pinning that is about to occur is
3679          * done with uncached PTEs. This is lowest common denominator for all
3680          * chipsets.
3681          *
3682          * However for gen6+, we could do better by using the GFDT bit instead
3683          * of uncaching, which would allow us to flush all the LLC-cached data
3684          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3685          */
3686         ret = i915_gem_object_set_cache_level(obj,
3687                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3688         if (ret)
3689                 goto err_unpin_display;
3690
3691         /* As the user may map the buffer once pinned in the display plane
3692          * (e.g. libkms for the bootup splash), we have to ensure that we
3693          * always use map_and_fenceable for all scanout buffers.
3694          */
3695         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3696         if (ret)
3697                 goto err_unpin_display;
3698
3699         i915_gem_object_flush_cpu_write_domain(obj, true);
3700
3701         old_write_domain = obj->base.write_domain;
3702         old_read_domains = obj->base.read_domains;
3703
3704         /* It should now be out of any other write domains, and we can update
3705          * the domain values for our changes.
3706          */
3707         obj->base.write_domain = 0;
3708         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3709
3710         trace_i915_gem_object_change_domain(obj,
3711                                             old_read_domains,
3712                                             old_write_domain);
3713
3714         return 0;
3715
3716 err_unpin_display:
3717         obj->pin_display = is_pin_display(obj);
3718         return ret;
3719 }
3720
3721 void
3722 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3723 {
3724         i915_gem_object_ggtt_unpin(obj);
3725         obj->pin_display = is_pin_display(obj);
3726 }
3727
3728 int
3729 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3730 {
3731         int ret;
3732
3733         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3734                 return 0;
3735
3736         ret = i915_gem_object_wait_rendering(obj, false);
3737         if (ret)
3738                 return ret;
3739
3740         /* Ensure that we invalidate the GPU's caches and TLBs. */
3741         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3742         return 0;
3743 }
3744
3745 /**
3746  * Moves a single object to the CPU read, and possibly write domain.
3747  *
3748  * This function returns when the move is complete, including waiting on
3749  * flushes to occur.
3750  */
3751 int
3752 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3753 {
3754         uint32_t old_write_domain, old_read_domains;
3755         int ret;
3756
3757         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3758                 return 0;
3759
3760         ret = i915_gem_object_wait_rendering(obj, !write);
3761         if (ret)
3762                 return ret;
3763
3764         i915_gem_object_retire(obj);
3765         i915_gem_object_flush_gtt_write_domain(obj);
3766
3767         old_write_domain = obj->base.write_domain;
3768         old_read_domains = obj->base.read_domains;
3769
3770         /* Flush the CPU cache if it's still invalid. */
3771         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3772                 i915_gem_clflush_object(obj, false);
3773
3774                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3775         }
3776
3777         /* It should now be out of any other write domains, and we can update
3778          * the domain values for our changes.
3779          */
3780         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3781
3782         /* If we're writing through the CPU, then the GPU read domains will
3783          * need to be invalidated at next use.
3784          */
3785         if (write) {
3786                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3787                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3788         }
3789
3790         trace_i915_gem_object_change_domain(obj,
3791                                             old_read_domains,
3792                                             old_write_domain);
3793
3794         return 0;
3795 }
3796
3797 /* Throttle our rendering by waiting until the ring has completed our requests
3798  * emitted over 20 msec ago.
3799  *
3800  * Note that if we were to use the current jiffies each time around the loop,
3801  * we wouldn't escape the function with any frames outstanding if the time to
3802  * render a frame was over 20ms.
3803  *
3804  * This should get us reasonable parallelism between CPU and GPU but also
3805  * relatively low latency when blocking on a particular request to finish.
3806  */
3807 static int
3808 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3809 {
3810         struct drm_i915_private *dev_priv = dev->dev_private;
3811         struct drm_i915_file_private *file_priv = file->driver_priv;
3812         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3813         struct drm_i915_gem_request *request;
3814         struct intel_ring_buffer *ring = NULL;
3815         unsigned reset_counter;
3816         u32 seqno = 0;
3817         int ret;
3818
3819         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3820         if (ret)
3821                 return ret;
3822
3823         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3824         if (ret)
3825                 return ret;
3826
3827         spin_lock(&file_priv->mm.lock);
3828         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3829                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3830                         break;
3831
3832                 ring = request->ring;
3833                 seqno = request->seqno;
3834         }
3835         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3836         spin_unlock(&file_priv->mm.lock);
3837
3838         if (seqno == 0)
3839                 return 0;
3840
3841         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3842         if (ret == 0)
3843                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3844
3845         return ret;
3846 }
3847
3848 int
3849 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3850                     struct i915_address_space *vm,
3851                     uint32_t alignment,
3852                     unsigned flags)
3853 {
3854         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3855         struct i915_vma *vma;
3856         int ret;
3857
3858         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3859                 return -ENODEV;
3860
3861         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3862                 return -EINVAL;
3863
3864         vma = i915_gem_obj_to_vma(obj, vm);
3865         if (vma) {
3866                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3867                         return -EBUSY;
3868
3869                 if ((alignment &&
3870                      vma->node.start & (alignment - 1)) ||
3871                     (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3872                         WARN(vma->pin_count,
3873                              "bo is already pinned with incorrect alignment:"
3874                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3875                              " obj->map_and_fenceable=%d\n",
3876                              i915_gem_obj_offset(obj, vm), alignment,
3877                              flags & PIN_MAPPABLE,
3878                              obj->map_and_fenceable);
3879                         ret = i915_vma_unbind(vma);
3880                         if (ret)
3881                                 return ret;
3882
3883                         vma = NULL;
3884                 }
3885         }
3886
3887         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3888                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3889                 if (IS_ERR(vma))
3890                         return PTR_ERR(vma);
3891         }
3892
3893         if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3894                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
3895
3896         vma->pin_count++;
3897         if (flags & PIN_MAPPABLE)
3898                 obj->pin_mappable |= true;
3899
3900         return 0;
3901 }
3902
3903 void
3904 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3905 {
3906         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3907
3908         BUG_ON(!vma);
3909         BUG_ON(vma->pin_count == 0);
3910         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3911
3912         if (--vma->pin_count == 0)
3913                 obj->pin_mappable = false;
3914 }
3915
3916 int
3917 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3918                    struct drm_file *file)
3919 {
3920         struct drm_i915_gem_pin *args = data;
3921         struct drm_i915_gem_object *obj;
3922         int ret;
3923
3924         if (INTEL_INFO(dev)->gen >= 6)
3925                 return -ENODEV;
3926
3927         ret = i915_mutex_lock_interruptible(dev);
3928         if (ret)
3929                 return ret;
3930
3931         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3932         if (&obj->base == NULL) {
3933                 ret = -ENOENT;
3934                 goto unlock;
3935         }
3936
3937         if (obj->madv != I915_MADV_WILLNEED) {
3938                 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
3939                 ret = -EFAULT;
3940                 goto out;
3941         }
3942
3943         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3944                 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
3945                           args->handle);
3946                 ret = -EINVAL;
3947                 goto out;
3948         }
3949
3950         if (obj->user_pin_count == ULONG_MAX) {
3951                 ret = -EBUSY;
3952                 goto out;
3953         }
3954
3955         if (obj->user_pin_count == 0) {
3956                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3957                 if (ret)
3958                         goto out;
3959         }
3960
3961         obj->user_pin_count++;
3962         obj->pin_filp = file;
3963
3964         args->offset = i915_gem_obj_ggtt_offset(obj);
3965 out:
3966         drm_gem_object_unreference(&obj->base);
3967 unlock:
3968         mutex_unlock(&dev->struct_mutex);
3969         return ret;
3970 }
3971
3972 int
3973 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3974                      struct drm_file *file)
3975 {
3976         struct drm_i915_gem_pin *args = data;
3977         struct drm_i915_gem_object *obj;
3978         int ret;
3979
3980         ret = i915_mutex_lock_interruptible(dev);
3981         if (ret)
3982                 return ret;
3983
3984         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3985         if (&obj->base == NULL) {
3986                 ret = -ENOENT;
3987                 goto unlock;
3988         }
3989
3990         if (obj->pin_filp != file) {
3991                 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3992                           args->handle);
3993                 ret = -EINVAL;
3994                 goto out;
3995         }
3996         obj->user_pin_count--;
3997         if (obj->user_pin_count == 0) {
3998                 obj->pin_filp = NULL;
3999                 i915_gem_object_ggtt_unpin(obj);
4000         }
4001
4002 out:
4003         drm_gem_object_unreference(&obj->base);
4004 unlock:
4005         mutex_unlock(&dev->struct_mutex);
4006         return ret;
4007 }
4008
4009 int
4010 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4011                     struct drm_file *file)
4012 {
4013         struct drm_i915_gem_busy *args = data;
4014         struct drm_i915_gem_object *obj;
4015         int ret;
4016
4017         ret = i915_mutex_lock_interruptible(dev);
4018         if (ret)
4019                 return ret;
4020
4021         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4022         if (&obj->base == NULL) {
4023                 ret = -ENOENT;
4024                 goto unlock;
4025         }
4026
4027         /* Count all active objects as busy, even if they are currently not used
4028          * by the gpu. Users of this interface expect objects to eventually
4029          * become non-busy without any further actions, therefore emit any
4030          * necessary flushes here.
4031          */
4032         ret = i915_gem_object_flush_active(obj);
4033
4034         args->busy = obj->active;
4035         if (obj->ring) {
4036                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4037                 args->busy |= intel_ring_flag(obj->ring) << 16;
4038         }
4039
4040         drm_gem_object_unreference(&obj->base);
4041 unlock:
4042         mutex_unlock(&dev->struct_mutex);
4043         return ret;
4044 }
4045
4046 int
4047 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4048                         struct drm_file *file_priv)
4049 {
4050         return i915_gem_ring_throttle(dev, file_priv);
4051 }
4052
4053 int
4054 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4055                        struct drm_file *file_priv)
4056 {
4057         struct drm_i915_gem_madvise *args = data;
4058         struct drm_i915_gem_object *obj;
4059         int ret;
4060
4061         switch (args->madv) {
4062         case I915_MADV_DONTNEED:
4063         case I915_MADV_WILLNEED:
4064             break;
4065         default:
4066             return -EINVAL;
4067         }
4068
4069         ret = i915_mutex_lock_interruptible(dev);
4070         if (ret)
4071                 return ret;
4072
4073         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4074         if (&obj->base == NULL) {
4075                 ret = -ENOENT;
4076                 goto unlock;
4077         }
4078
4079         if (i915_gem_obj_is_pinned(obj)) {
4080                 ret = -EINVAL;
4081                 goto out;
4082         }
4083
4084         if (obj->madv != __I915_MADV_PURGED)
4085                 obj->madv = args->madv;
4086
4087         /* if the object is no longer attached, discard its backing storage */
4088         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4089                 i915_gem_object_truncate(obj);
4090
4091         args->retained = obj->madv != __I915_MADV_PURGED;
4092
4093 out:
4094         drm_gem_object_unreference(&obj->base);
4095 unlock:
4096         mutex_unlock(&dev->struct_mutex);
4097         return ret;
4098 }
4099
4100 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4101                           const struct drm_i915_gem_object_ops *ops)
4102 {
4103         INIT_LIST_HEAD(&obj->global_list);
4104         INIT_LIST_HEAD(&obj->ring_list);
4105         INIT_LIST_HEAD(&obj->obj_exec_link);
4106         INIT_LIST_HEAD(&obj->vma_list);
4107
4108         obj->ops = ops;
4109
4110         obj->fence_reg = I915_FENCE_REG_NONE;
4111         obj->madv = I915_MADV_WILLNEED;
4112         /* Avoid an unnecessary call to unbind on the first bind. */
4113         obj->map_and_fenceable = true;
4114
4115         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4116 }
4117
4118 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4119         .get_pages = i915_gem_object_get_pages_gtt,
4120         .put_pages = i915_gem_object_put_pages_gtt,
4121 };
4122
4123 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4124                                                   size_t size)
4125 {
4126         struct drm_i915_gem_object *obj;
4127         struct address_space *mapping;
4128         gfp_t mask;
4129
4130         obj = i915_gem_object_alloc(dev);
4131         if (obj == NULL)
4132                 return NULL;
4133
4134         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4135                 i915_gem_object_free(obj);
4136                 return NULL;
4137         }
4138
4139         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4140         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4141                 /* 965gm cannot relocate objects above 4GiB. */
4142                 mask &= ~__GFP_HIGHMEM;
4143                 mask |= __GFP_DMA32;
4144         }
4145
4146         mapping = file_inode(obj->base.filp)->i_mapping;
4147         mapping_set_gfp_mask(mapping, mask);
4148
4149         i915_gem_object_init(obj, &i915_gem_object_ops);
4150
4151         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4152         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4153
4154         if (HAS_LLC(dev)) {
4155                 /* On some devices, we can have the GPU use the LLC (the CPU
4156                  * cache) for about a 10% performance improvement
4157                  * compared to uncached.  Graphics requests other than
4158                  * display scanout are coherent with the CPU in
4159                  * accessing this cache.  This means in this mode we
4160                  * don't need to clflush on the CPU side, and on the
4161                  * GPU side we only need to flush internal caches to
4162                  * get data visible to the CPU.
4163                  *
4164                  * However, we maintain the display planes as UC, and so
4165                  * need to rebind when first used as such.
4166                  */
4167                 obj->cache_level = I915_CACHE_LLC;
4168         } else
4169                 obj->cache_level = I915_CACHE_NONE;
4170
4171         trace_i915_gem_object_create(obj);
4172
4173         return obj;
4174 }
4175
4176 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4177 {
4178         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4179         struct drm_device *dev = obj->base.dev;
4180         struct drm_i915_private *dev_priv = dev->dev_private;
4181         struct i915_vma *vma, *next;
4182
4183         intel_runtime_pm_get(dev_priv);
4184
4185         trace_i915_gem_object_destroy(obj);
4186
4187         if (obj->phys_obj)
4188                 i915_gem_detach_phys_object(dev, obj);
4189
4190         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4191                 int ret;
4192
4193                 vma->pin_count = 0;
4194                 ret = i915_vma_unbind(vma);
4195                 if (WARN_ON(ret == -ERESTARTSYS)) {
4196                         bool was_interruptible;
4197
4198                         was_interruptible = dev_priv->mm.interruptible;
4199                         dev_priv->mm.interruptible = false;
4200
4201                         WARN_ON(i915_vma_unbind(vma));
4202
4203                         dev_priv->mm.interruptible = was_interruptible;
4204                 }
4205         }
4206
4207         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4208          * before progressing. */
4209         if (obj->stolen)
4210                 i915_gem_object_unpin_pages(obj);
4211
4212         if (WARN_ON(obj->pages_pin_count))
4213                 obj->pages_pin_count = 0;
4214         i915_gem_object_put_pages(obj);
4215         i915_gem_object_free_mmap_offset(obj);
4216         i915_gem_object_release_stolen(obj);
4217
4218         BUG_ON(obj->pages);
4219
4220         if (obj->base.import_attach)
4221                 drm_prime_gem_destroy(&obj->base, NULL);
4222
4223         drm_gem_object_release(&obj->base);
4224         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4225
4226         kfree(obj->bit_17);
4227         i915_gem_object_free(obj);
4228
4229         intel_runtime_pm_put(dev_priv);
4230 }
4231
4232 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4233                                      struct i915_address_space *vm)
4234 {
4235         struct i915_vma *vma;
4236         list_for_each_entry(vma, &obj->vma_list, vma_link)
4237                 if (vma->vm == vm)
4238                         return vma;
4239
4240         return NULL;
4241 }
4242
4243 void i915_gem_vma_destroy(struct i915_vma *vma)
4244 {
4245         WARN_ON(vma->node.allocated);
4246
4247         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4248         if (!list_empty(&vma->exec_list))
4249                 return;
4250
4251         list_del(&vma->vma_link);
4252
4253         kfree(vma);
4254 }
4255
4256 static void
4257 i915_gem_stop_ringbuffers(struct drm_device *dev)
4258 {
4259         struct drm_i915_private *dev_priv = dev->dev_private;
4260         struct intel_ring_buffer *ring;
4261         int i;
4262
4263         for_each_ring(ring, dev_priv, i)
4264                 intel_stop_ring_buffer(ring);
4265 }
4266
4267 int
4268 i915_gem_suspend(struct drm_device *dev)
4269 {
4270         struct drm_i915_private *dev_priv = dev->dev_private;
4271         int ret = 0;
4272
4273         mutex_lock(&dev->struct_mutex);
4274         if (dev_priv->ums.mm_suspended)
4275                 goto err;
4276
4277         ret = i915_gpu_idle(dev);
4278         if (ret)
4279                 goto err;
4280
4281         i915_gem_retire_requests(dev);
4282
4283         /* Under UMS, be paranoid and evict. */
4284         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4285                 i915_gem_evict_everything(dev);
4286
4287         i915_kernel_lost_context(dev);
4288         i915_gem_stop_ringbuffers(dev);
4289
4290         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4291          * We need to replace this with a semaphore, or something.
4292          * And not confound ums.mm_suspended!
4293          */
4294         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4295                                                              DRIVER_MODESET);
4296         mutex_unlock(&dev->struct_mutex);
4297
4298         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4299         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4300         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4301
4302         return 0;
4303
4304 err:
4305         mutex_unlock(&dev->struct_mutex);
4306         return ret;
4307 }
4308
4309 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4310 {
4311         struct drm_device *dev = ring->dev;
4312         struct drm_i915_private *dev_priv = dev->dev_private;
4313         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4314         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4315         int i, ret;
4316
4317         if (!HAS_L3_DPF(dev) || !remap_info)
4318                 return 0;
4319
4320         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4321         if (ret)
4322                 return ret;
4323
4324         /*
4325          * Note: We do not worry about the concurrent register cacheline hang
4326          * here because no other code should access these registers other than
4327          * at initialization time.
4328          */
4329         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4330                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4331                 intel_ring_emit(ring, reg_base + i);
4332                 intel_ring_emit(ring, remap_info[i/4]);
4333         }
4334
4335         intel_ring_advance(ring);
4336
4337         return ret;
4338 }
4339
4340 void i915_gem_init_swizzling(struct drm_device *dev)
4341 {
4342         struct drm_i915_private *dev_priv = dev->dev_private;
4343
4344         if (INTEL_INFO(dev)->gen < 5 ||
4345             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4346                 return;
4347
4348         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4349                                  DISP_TILE_SURFACE_SWIZZLING);
4350
4351         if (IS_GEN5(dev))
4352                 return;
4353
4354         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4355         if (IS_GEN6(dev))
4356                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4357         else if (IS_GEN7(dev))
4358                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4359         else if (IS_GEN8(dev))
4360                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4361         else
4362                 BUG();
4363 }
4364
4365 static bool
4366 intel_enable_blt(struct drm_device *dev)
4367 {
4368         if (!HAS_BLT(dev))
4369                 return false;
4370
4371         /* The blitter was dysfunctional on early prototypes */
4372         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4373                 DRM_INFO("BLT not supported on this pre-production hardware;"
4374                          " graphics performance will be degraded.\n");
4375                 return false;
4376         }
4377
4378         return true;
4379 }
4380
4381 static int i915_gem_init_rings(struct drm_device *dev)
4382 {
4383         struct drm_i915_private *dev_priv = dev->dev_private;
4384         int ret;
4385
4386         ret = intel_init_render_ring_buffer(dev);
4387         if (ret)
4388                 return ret;
4389
4390         if (HAS_BSD(dev)) {
4391                 ret = intel_init_bsd_ring_buffer(dev);
4392                 if (ret)
4393                         goto cleanup_render_ring;
4394         }
4395
4396         if (intel_enable_blt(dev)) {
4397                 ret = intel_init_blt_ring_buffer(dev);
4398                 if (ret)
4399                         goto cleanup_bsd_ring;
4400         }
4401
4402         if (HAS_VEBOX(dev)) {
4403                 ret = intel_init_vebox_ring_buffer(dev);
4404                 if (ret)
4405                         goto cleanup_blt_ring;
4406         }
4407
4408         if (HAS_BSD2(dev)) {
4409                 ret = intel_init_bsd2_ring_buffer(dev);
4410                 if (ret)
4411                         goto cleanup_vebox_ring;
4412         }
4413
4414         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4415         if (ret)
4416                 goto cleanup_bsd2_ring;
4417
4418         return 0;
4419
4420 cleanup_bsd2_ring:
4421         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4422 cleanup_vebox_ring:
4423         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4424 cleanup_blt_ring:
4425         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4426 cleanup_bsd_ring:
4427         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4428 cleanup_render_ring:
4429         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4430
4431         return ret;
4432 }
4433
4434 int
4435 i915_gem_init_hw(struct drm_device *dev)
4436 {
4437         struct drm_i915_private *dev_priv = dev->dev_private;
4438         int ret, i;
4439
4440         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4441                 return -EIO;
4442
4443         if (dev_priv->ellc_size)
4444                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4445
4446         if (IS_HASWELL(dev))
4447                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4448                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4449
4450         if (HAS_PCH_NOP(dev)) {
4451                 if (IS_IVYBRIDGE(dev)) {
4452                         u32 temp = I915_READ(GEN7_MSG_CTL);
4453                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4454                         I915_WRITE(GEN7_MSG_CTL, temp);
4455                 } else if (INTEL_INFO(dev)->gen >= 7) {
4456                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4457                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4458                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4459                 }
4460         }
4461
4462         i915_gem_init_swizzling(dev);
4463
4464         ret = i915_gem_init_rings(dev);
4465         if (ret)
4466                 return ret;
4467
4468         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4469                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4470
4471         /*
4472          * XXX: Contexts should only be initialized once. Doing a switch to the
4473          * default context switch however is something we'd like to do after
4474          * reset or thaw (the latter may not actually be necessary for HW, but
4475          * goes with our code better). Context switching requires rings (for
4476          * the do_switch), but before enabling PPGTT. So don't move this.
4477          */
4478         ret = i915_gem_context_enable(dev_priv);
4479         if (ret && ret != -EIO) {
4480                 DRM_ERROR("Context enable failed %d\n", ret);
4481                 i915_gem_cleanup_ringbuffer(dev);
4482         }
4483
4484         return ret;
4485 }
4486
4487 int i915_gem_init(struct drm_device *dev)
4488 {
4489         struct drm_i915_private *dev_priv = dev->dev_private;
4490         int ret;
4491
4492         mutex_lock(&dev->struct_mutex);
4493
4494         if (IS_VALLEYVIEW(dev)) {
4495                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4496                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4497                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4498                               VLV_GTLC_ALLOWWAKEACK), 10))
4499                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4500         }
4501
4502         i915_gem_init_global_gtt(dev);
4503
4504         ret = i915_gem_context_init(dev);
4505         if (ret) {
4506                 mutex_unlock(&dev->struct_mutex);
4507                 return ret;
4508         }
4509
4510         ret = i915_gem_init_hw(dev);
4511         if (ret == -EIO) {
4512                 /* Allow ring initialisation to fail by marking the GPU as
4513                  * wedged. But we only want to do this where the GPU is angry,
4514                  * for all other failure, such as an allocation failure, bail.
4515                  */
4516                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4517                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4518                 ret = 0;
4519         }
4520         mutex_unlock(&dev->struct_mutex);
4521
4522         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4523         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4524                 dev_priv->dri1.allow_batchbuffer = 1;
4525         return ret;
4526 }
4527
4528 void
4529 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4530 {
4531         struct drm_i915_private *dev_priv = dev->dev_private;
4532         struct intel_ring_buffer *ring;
4533         int i;
4534
4535         for_each_ring(ring, dev_priv, i)
4536                 intel_cleanup_ring_buffer(ring);
4537 }
4538
4539 int
4540 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4541                        struct drm_file *file_priv)
4542 {
4543         struct drm_i915_private *dev_priv = dev->dev_private;
4544         int ret;
4545
4546         if (drm_core_check_feature(dev, DRIVER_MODESET))
4547                 return 0;
4548
4549         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4550                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4551                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4552         }
4553
4554         mutex_lock(&dev->struct_mutex);
4555         dev_priv->ums.mm_suspended = 0;
4556
4557         ret = i915_gem_init_hw(dev);
4558         if (ret != 0) {
4559                 mutex_unlock(&dev->struct_mutex);
4560                 return ret;
4561         }
4562
4563         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4564
4565         ret = drm_irq_install(dev, dev->pdev->irq);
4566         if (ret)
4567                 goto cleanup_ringbuffer;
4568         mutex_unlock(&dev->struct_mutex);
4569
4570         return 0;
4571
4572 cleanup_ringbuffer:
4573         i915_gem_cleanup_ringbuffer(dev);
4574         dev_priv->ums.mm_suspended = 1;
4575         mutex_unlock(&dev->struct_mutex);
4576
4577         return ret;
4578 }
4579
4580 int
4581 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4582                        struct drm_file *file_priv)
4583 {
4584         if (drm_core_check_feature(dev, DRIVER_MODESET))
4585                 return 0;
4586
4587         mutex_lock(&dev->struct_mutex);
4588         drm_irq_uninstall(dev);
4589         mutex_unlock(&dev->struct_mutex);
4590
4591         return i915_gem_suspend(dev);
4592 }
4593
4594 void
4595 i915_gem_lastclose(struct drm_device *dev)
4596 {
4597         int ret;
4598
4599         if (drm_core_check_feature(dev, DRIVER_MODESET))
4600                 return;
4601
4602         ret = i915_gem_suspend(dev);
4603         if (ret)
4604                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4605 }
4606
4607 static void
4608 init_ring_lists(struct intel_ring_buffer *ring)
4609 {
4610         INIT_LIST_HEAD(&ring->active_list);
4611         INIT_LIST_HEAD(&ring->request_list);
4612 }
4613
4614 void i915_init_vm(struct drm_i915_private *dev_priv,
4615                   struct i915_address_space *vm)
4616 {
4617         if (!i915_is_ggtt(vm))
4618                 drm_mm_init(&vm->mm, vm->start, vm->total);
4619         vm->dev = dev_priv->dev;
4620         INIT_LIST_HEAD(&vm->active_list);
4621         INIT_LIST_HEAD(&vm->inactive_list);
4622         INIT_LIST_HEAD(&vm->global_link);
4623         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4624 }
4625
4626 void
4627 i915_gem_load(struct drm_device *dev)
4628 {
4629         struct drm_i915_private *dev_priv = dev->dev_private;
4630         int i;
4631
4632         dev_priv->slab =
4633                 kmem_cache_create("i915_gem_object",
4634                                   sizeof(struct drm_i915_gem_object), 0,
4635                                   SLAB_HWCACHE_ALIGN,
4636                                   NULL);
4637
4638         INIT_LIST_HEAD(&dev_priv->vm_list);
4639         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4640
4641         INIT_LIST_HEAD(&dev_priv->context_list);
4642         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4643         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4644         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4645         for (i = 0; i < I915_NUM_RINGS; i++)
4646                 init_ring_lists(&dev_priv->ring[i]);
4647         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4648                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4649         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4650                           i915_gem_retire_work_handler);
4651         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4652                           i915_gem_idle_work_handler);
4653         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4654
4655         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4656         if (IS_GEN3(dev)) {
4657                 I915_WRITE(MI_ARB_STATE,
4658                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4659         }
4660
4661         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4662
4663         /* Old X drivers will take 0-2 for front, back, depth buffers */
4664         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4665                 dev_priv->fence_reg_start = 3;
4666
4667         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4668                 dev_priv->num_fence_regs = 32;
4669         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4670                 dev_priv->num_fence_regs = 16;
4671         else
4672                 dev_priv->num_fence_regs = 8;
4673
4674         /* Initialize fence registers to zero */
4675         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4676         i915_gem_restore_fences(dev);
4677
4678         i915_gem_detect_bit_6_swizzle(dev);
4679         init_waitqueue_head(&dev_priv->pending_flip_queue);
4680
4681         dev_priv->mm.interruptible = true;
4682
4683         dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4684         dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4685         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4686         register_shrinker(&dev_priv->mm.inactive_shrinker);
4687 }
4688
4689 /*
4690  * Create a physically contiguous memory object for this object
4691  * e.g. for cursor + overlay regs
4692  */
4693 static int i915_gem_init_phys_object(struct drm_device *dev,
4694                                      int id, int size, int align)
4695 {
4696         struct drm_i915_private *dev_priv = dev->dev_private;
4697         struct drm_i915_gem_phys_object *phys_obj;
4698         int ret;
4699
4700         if (dev_priv->mm.phys_objs[id - 1] || !size)
4701                 return 0;
4702
4703         phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4704         if (!phys_obj)
4705                 return -ENOMEM;
4706
4707         phys_obj->id = id;
4708
4709         phys_obj->handle = drm_pci_alloc(dev, size, align);
4710         if (!phys_obj->handle) {
4711                 ret = -ENOMEM;
4712                 goto kfree_obj;
4713         }
4714 #ifdef CONFIG_X86
4715         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4716 #endif
4717
4718         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4719
4720         return 0;
4721 kfree_obj:
4722         kfree(phys_obj);
4723         return ret;
4724 }
4725
4726 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4727 {
4728         struct drm_i915_private *dev_priv = dev->dev_private;
4729         struct drm_i915_gem_phys_object *phys_obj;
4730
4731         if (!dev_priv->mm.phys_objs[id - 1])
4732                 return;
4733
4734         phys_obj = dev_priv->mm.phys_objs[id - 1];
4735         if (phys_obj->cur_obj) {
4736                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4737         }
4738
4739 #ifdef CONFIG_X86
4740         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4741 #endif
4742         drm_pci_free(dev, phys_obj->handle);
4743         kfree(phys_obj);
4744         dev_priv->mm.phys_objs[id - 1] = NULL;
4745 }
4746
4747 void i915_gem_free_all_phys_object(struct drm_device *dev)
4748 {
4749         int i;
4750
4751         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4752                 i915_gem_free_phys_object(dev, i);
4753 }
4754
4755 void i915_gem_detach_phys_object(struct drm_device *dev,
4756                                  struct drm_i915_gem_object *obj)
4757 {
4758         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4759         char *vaddr;
4760         int i;
4761         int page_count;
4762
4763         if (!obj->phys_obj)
4764                 return;
4765         vaddr = obj->phys_obj->handle->vaddr;
4766
4767         page_count = obj->base.size / PAGE_SIZE;
4768         for (i = 0; i < page_count; i++) {
4769                 struct page *page = shmem_read_mapping_page(mapping, i);
4770                 if (!IS_ERR(page)) {
4771                         char *dst = kmap_atomic(page);
4772                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4773                         kunmap_atomic(dst);
4774
4775                         drm_clflush_pages(&page, 1);
4776
4777                         set_page_dirty(page);
4778                         mark_page_accessed(page);
4779                         page_cache_release(page);
4780                 }
4781         }
4782         i915_gem_chipset_flush(dev);
4783
4784         obj->phys_obj->cur_obj = NULL;
4785         obj->phys_obj = NULL;
4786 }
4787
4788 int
4789 i915_gem_attach_phys_object(struct drm_device *dev,
4790                             struct drm_i915_gem_object *obj,
4791                             int id,
4792                             int align)
4793 {
4794         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4795         struct drm_i915_private *dev_priv = dev->dev_private;
4796         int ret = 0;
4797         int page_count;
4798         int i;
4799
4800         if (id > I915_MAX_PHYS_OBJECT)
4801                 return -EINVAL;
4802
4803         if (obj->phys_obj) {
4804                 if (obj->phys_obj->id == id)
4805                         return 0;
4806                 i915_gem_detach_phys_object(dev, obj);
4807         }
4808
4809         /* create a new object */
4810         if (!dev_priv->mm.phys_objs[id - 1]) {
4811                 ret = i915_gem_init_phys_object(dev, id,
4812                                                 obj->base.size, align);
4813                 if (ret) {
4814                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4815                                   id, obj->base.size);
4816                         return ret;
4817                 }
4818         }
4819
4820         /* bind to the object */
4821         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4822         obj->phys_obj->cur_obj = obj;
4823
4824         page_count = obj->base.size / PAGE_SIZE;
4825
4826         for (i = 0; i < page_count; i++) {
4827                 struct page *page;
4828                 char *dst, *src;
4829
4830                 page = shmem_read_mapping_page(mapping, i);
4831                 if (IS_ERR(page))
4832                         return PTR_ERR(page);
4833
4834                 src = kmap_atomic(page);
4835                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4836                 memcpy(dst, src, PAGE_SIZE);
4837                 kunmap_atomic(src);
4838
4839                 mark_page_accessed(page);
4840                 page_cache_release(page);
4841         }
4842
4843         return 0;
4844 }
4845
4846 static int
4847 i915_gem_phys_pwrite(struct drm_device *dev,
4848                      struct drm_i915_gem_object *obj,
4849                      struct drm_i915_gem_pwrite *args,
4850                      struct drm_file *file_priv)
4851 {
4852         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4853         char __user *user_data = to_user_ptr(args->data_ptr);
4854
4855         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4856                 unsigned long unwritten;
4857
4858                 /* The physical object once assigned is fixed for the lifetime
4859                  * of the obj, so we can safely drop the lock and continue
4860                  * to access vaddr.
4861                  */
4862                 mutex_unlock(&dev->struct_mutex);
4863                 unwritten = copy_from_user(vaddr, user_data, args->size);
4864                 mutex_lock(&dev->struct_mutex);
4865                 if (unwritten)
4866                         return -EFAULT;
4867         }
4868
4869         i915_gem_chipset_flush(dev);
4870         return 0;
4871 }
4872
4873 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4874 {
4875         struct drm_i915_file_private *file_priv = file->driver_priv;
4876
4877         cancel_delayed_work_sync(&file_priv->mm.idle_work);
4878
4879         /* Clean up our request list when the client is going away, so that
4880          * later retire_requests won't dereference our soon-to-be-gone
4881          * file_priv.
4882          */
4883         spin_lock(&file_priv->mm.lock);
4884         while (!list_empty(&file_priv->mm.request_list)) {
4885                 struct drm_i915_gem_request *request;
4886
4887                 request = list_first_entry(&file_priv->mm.request_list,
4888                                            struct drm_i915_gem_request,
4889                                            client_list);
4890                 list_del(&request->client_list);
4891                 request->file_priv = NULL;
4892         }
4893         spin_unlock(&file_priv->mm.lock);
4894 }
4895
4896 static void
4897 i915_gem_file_idle_work_handler(struct work_struct *work)
4898 {
4899         struct drm_i915_file_private *file_priv =
4900                 container_of(work, typeof(*file_priv), mm.idle_work.work);
4901
4902         atomic_set(&file_priv->rps_wait_boost, false);
4903 }
4904
4905 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4906 {
4907         struct drm_i915_file_private *file_priv;
4908         int ret;
4909
4910         DRM_DEBUG_DRIVER("\n");
4911
4912         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4913         if (!file_priv)
4914                 return -ENOMEM;
4915
4916         file->driver_priv = file_priv;
4917         file_priv->dev_priv = dev->dev_private;
4918         file_priv->file = file;
4919
4920         spin_lock_init(&file_priv->mm.lock);
4921         INIT_LIST_HEAD(&file_priv->mm.request_list);
4922         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4923                           i915_gem_file_idle_work_handler);
4924
4925         ret = i915_gem_context_open(dev, file);
4926         if (ret)
4927                 kfree(file_priv);
4928
4929         return ret;
4930 }
4931
4932 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4933 {
4934         if (!mutex_is_locked(mutex))
4935                 return false;
4936
4937 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4938         return mutex->owner == task;
4939 #else
4940         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4941         return false;
4942 #endif
4943 }
4944
4945 static unsigned long
4946 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4947 {
4948         struct drm_i915_private *dev_priv =
4949                 container_of(shrinker,
4950                              struct drm_i915_private,
4951                              mm.inactive_shrinker);
4952         struct drm_device *dev = dev_priv->dev;
4953         struct drm_i915_gem_object *obj;
4954         bool unlock = true;
4955         unsigned long count;
4956
4957         if (!mutex_trylock(&dev->struct_mutex)) {
4958                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4959                         return 0;
4960
4961                 if (dev_priv->mm.shrinker_no_lock_stealing)
4962                         return 0;
4963
4964                 unlock = false;
4965         }
4966
4967         count = 0;
4968         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4969                 if (obj->pages_pin_count == 0)
4970                         count += obj->base.size >> PAGE_SHIFT;
4971
4972         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4973                 if (obj->active)
4974                         continue;
4975
4976                 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
4977                         count += obj->base.size >> PAGE_SHIFT;
4978         }
4979
4980         if (unlock)
4981                 mutex_unlock(&dev->struct_mutex);
4982
4983         return count;
4984 }
4985
4986 /* All the new VM stuff */
4987 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4988                                   struct i915_address_space *vm)
4989 {
4990         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4991         struct i915_vma *vma;
4992
4993         if (!dev_priv->mm.aliasing_ppgtt ||
4994             vm == &dev_priv->mm.aliasing_ppgtt->base)
4995                 vm = &dev_priv->gtt.base;
4996
4997         BUG_ON(list_empty(&o->vma_list));
4998         list_for_each_entry(vma, &o->vma_list, vma_link) {
4999                 if (vma->vm == vm)
5000                         return vma->node.start;
5001
5002         }
5003         return -1;
5004 }
5005
5006 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5007                         struct i915_address_space *vm)
5008 {
5009         struct i915_vma *vma;
5010
5011         list_for_each_entry(vma, &o->vma_list, vma_link)
5012                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5013                         return true;
5014
5015         return false;
5016 }
5017
5018 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5019 {
5020         struct i915_vma *vma;
5021
5022         list_for_each_entry(vma, &o->vma_list, vma_link)
5023                 if (drm_mm_node_allocated(&vma->node))
5024                         return true;
5025
5026         return false;
5027 }
5028
5029 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5030                                 struct i915_address_space *vm)
5031 {
5032         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5033         struct i915_vma *vma;
5034
5035         if (!dev_priv->mm.aliasing_ppgtt ||
5036             vm == &dev_priv->mm.aliasing_ppgtt->base)
5037                 vm = &dev_priv->gtt.base;
5038
5039         BUG_ON(list_empty(&o->vma_list));
5040
5041         list_for_each_entry(vma, &o->vma_list, vma_link)
5042                 if (vma->vm == vm)
5043                         return vma->node.size;
5044
5045         return 0;
5046 }
5047
5048 static unsigned long
5049 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5050 {
5051         struct drm_i915_private *dev_priv =
5052                 container_of(shrinker,
5053                              struct drm_i915_private,
5054                              mm.inactive_shrinker);
5055         struct drm_device *dev = dev_priv->dev;
5056         unsigned long freed;
5057         bool unlock = true;
5058
5059         if (!mutex_trylock(&dev->struct_mutex)) {
5060                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5061                         return SHRINK_STOP;
5062
5063                 if (dev_priv->mm.shrinker_no_lock_stealing)
5064                         return SHRINK_STOP;
5065
5066                 unlock = false;
5067         }
5068
5069         freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5070         if (freed < sc->nr_to_scan)
5071                 freed += __i915_gem_shrink(dev_priv,
5072                                            sc->nr_to_scan - freed,
5073                                            false);
5074         if (freed < sc->nr_to_scan)
5075                 freed += i915_gem_shrink_all(dev_priv);
5076
5077         if (unlock)
5078                 mutex_unlock(&dev->struct_mutex);
5079
5080         return freed;
5081 }
5082
5083 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5084 {
5085         struct i915_vma *vma;
5086
5087         if (WARN_ON(list_empty(&obj->vma_list)))
5088                 return NULL;
5089
5090         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5091         if (vma->vm != obj_to_ggtt(obj))
5092                 return NULL;
5093
5094         return vma;
5095 }