drm/i915: Update DRIVER_DATE to 20150327
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45                                bool readonly);
46 static void
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
49 static void i915_gem_write_fence(struct drm_device *dev, int reg,
50                                  struct drm_i915_gem_object *obj);
51 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
52                                          struct drm_i915_fence_reg *fence,
53                                          bool enable);
54
55 static bool cpu_cache_is_coherent(struct drm_device *dev,
56                                   enum i915_cache_level level)
57 {
58         return HAS_LLC(dev) || level != I915_CACHE_NONE;
59 }
60
61 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
62 {
63         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
64                 return true;
65
66         return obj->pin_display;
67 }
68
69 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
70 {
71         if (obj->tiling_mode)
72                 i915_gem_release_mmap(obj);
73
74         /* As we do not have an associated fence register, we will force
75          * a tiling change if we ever need to acquire one.
76          */
77         obj->fence_dirty = false;
78         obj->fence_reg = I915_FENCE_REG_NONE;
79 }
80
81 /* some bookkeeping */
82 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
83                                   size_t size)
84 {
85         spin_lock(&dev_priv->mm.object_stat_lock);
86         dev_priv->mm.object_count++;
87         dev_priv->mm.object_memory += size;
88         spin_unlock(&dev_priv->mm.object_stat_lock);
89 }
90
91 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
92                                      size_t size)
93 {
94         spin_lock(&dev_priv->mm.object_stat_lock);
95         dev_priv->mm.object_count--;
96         dev_priv->mm.object_memory -= size;
97         spin_unlock(&dev_priv->mm.object_stat_lock);
98 }
99
100 static int
101 i915_gem_wait_for_error(struct i915_gpu_error *error)
102 {
103         int ret;
104
105 #define EXIT_COND (!i915_reset_in_progress(error) || \
106                    i915_terminally_wedged(error))
107         if (EXIT_COND)
108                 return 0;
109
110         /*
111          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112          * userspace. If it takes that long something really bad is going on and
113          * we should simply try to bail out and fail as gracefully as possible.
114          */
115         ret = wait_event_interruptible_timeout(error->reset_queue,
116                                                EXIT_COND,
117                                                10*HZ);
118         if (ret == 0) {
119                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120                 return -EIO;
121         } else if (ret < 0) {
122                 return ret;
123         }
124 #undef EXIT_COND
125
126         return 0;
127 }
128
129 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 {
131         struct drm_i915_private *dev_priv = dev->dev_private;
132         int ret;
133
134         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
135         if (ret)
136                 return ret;
137
138         ret = mutex_lock_interruptible(&dev->struct_mutex);
139         if (ret)
140                 return ret;
141
142         WARN_ON(i915_verify_lists(dev));
143         return 0;
144 }
145
146 int
147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148                             struct drm_file *file)
149 {
150         struct drm_i915_private *dev_priv = dev->dev_private;
151         struct drm_i915_gem_get_aperture *args = data;
152         struct drm_i915_gem_object *obj;
153         size_t pinned;
154
155         pinned = 0;
156         mutex_lock(&dev->struct_mutex);
157         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
158                 if (i915_gem_obj_is_pinned(obj))
159                         pinned += i915_gem_obj_ggtt_size(obj);
160         mutex_unlock(&dev->struct_mutex);
161
162         args->aper_size = dev_priv->gtt.base.total;
163         args->aper_available_size = args->aper_size - pinned;
164
165         return 0;
166 }
167
168 static int
169 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
170 {
171         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
172         char *vaddr = obj->phys_handle->vaddr;
173         struct sg_table *st;
174         struct scatterlist *sg;
175         int i;
176
177         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
178                 return -EINVAL;
179
180         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
181                 struct page *page;
182                 char *src;
183
184                 page = shmem_read_mapping_page(mapping, i);
185                 if (IS_ERR(page))
186                         return PTR_ERR(page);
187
188                 src = kmap_atomic(page);
189                 memcpy(vaddr, src, PAGE_SIZE);
190                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
191                 kunmap_atomic(src);
192
193                 page_cache_release(page);
194                 vaddr += PAGE_SIZE;
195         }
196
197         i915_gem_chipset_flush(obj->base.dev);
198
199         st = kmalloc(sizeof(*st), GFP_KERNEL);
200         if (st == NULL)
201                 return -ENOMEM;
202
203         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
204                 kfree(st);
205                 return -ENOMEM;
206         }
207
208         sg = st->sgl;
209         sg->offset = 0;
210         sg->length = obj->base.size;
211
212         sg_dma_address(sg) = obj->phys_handle->busaddr;
213         sg_dma_len(sg) = obj->base.size;
214
215         obj->pages = st;
216         obj->has_dma_mapping = true;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (ret) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 WARN_ON(ret != -EIO);
233                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234         }
235
236         if (obj->madv == I915_MADV_DONTNEED)
237                 obj->dirty = 0;
238
239         if (obj->dirty) {
240                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
241                 char *vaddr = obj->phys_handle->vaddr;
242                 int i;
243
244                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245                         struct page *page;
246                         char *dst;
247
248                         page = shmem_read_mapping_page(mapping, i);
249                         if (IS_ERR(page))
250                                 continue;
251
252                         dst = kmap_atomic(page);
253                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
254                         memcpy(dst, vaddr, PAGE_SIZE);
255                         kunmap_atomic(dst);
256
257                         set_page_dirty(page);
258                         if (obj->madv == I915_MADV_WILLNEED)
259                                 mark_page_accessed(page);
260                         page_cache_release(page);
261                         vaddr += PAGE_SIZE;
262                 }
263                 obj->dirty = 0;
264         }
265
266         sg_free_table(obj->pages);
267         kfree(obj->pages);
268
269         obj->has_dma_mapping = false;
270 }
271
272 static void
273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
274 {
275         drm_pci_free(obj->base.dev, obj->phys_handle);
276 }
277
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279         .get_pages = i915_gem_object_get_pages_phys,
280         .put_pages = i915_gem_object_put_pages_phys,
281         .release = i915_gem_object_release_phys,
282 };
283
284 static int
285 drop_pages(struct drm_i915_gem_object *obj)
286 {
287         struct i915_vma *vma, *next;
288         int ret;
289
290         drm_gem_object_reference(&obj->base);
291         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
292                 if (i915_vma_unbind(vma))
293                         break;
294
295         ret = i915_gem_object_put_pages(obj);
296         drm_gem_object_unreference(&obj->base);
297
298         return ret;
299 }
300
301 int
302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
303                             int align)
304 {
305         drm_dma_handle_t *phys;
306         int ret;
307
308         if (obj->phys_handle) {
309                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
310                         return -EBUSY;
311
312                 return 0;
313         }
314
315         if (obj->madv != I915_MADV_WILLNEED)
316                 return -EFAULT;
317
318         if (obj->base.filp == NULL)
319                 return -EINVAL;
320
321         ret = drop_pages(obj);
322         if (ret)
323                 return ret;
324
325         /* create a new object */
326         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
327         if (!phys)
328                 return -ENOMEM;
329
330         obj->phys_handle = phys;
331         obj->ops = &i915_gem_phys_ops;
332
333         return i915_gem_object_get_pages(obj);
334 }
335
336 static int
337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338                      struct drm_i915_gem_pwrite *args,
339                      struct drm_file *file_priv)
340 {
341         struct drm_device *dev = obj->base.dev;
342         void *vaddr = obj->phys_handle->vaddr + args->offset;
343         char __user *user_data = to_user_ptr(args->data_ptr);
344         int ret = 0;
345
346         /* We manually control the domain here and pretend that it
347          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
348          */
349         ret = i915_gem_object_wait_rendering(obj, false);
350         if (ret)
351                 return ret;
352
353         intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
354         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355                 unsigned long unwritten;
356
357                 /* The physical object once assigned is fixed for the lifetime
358                  * of the obj, so we can safely drop the lock and continue
359                  * to access vaddr.
360                  */
361                 mutex_unlock(&dev->struct_mutex);
362                 unwritten = copy_from_user(vaddr, user_data, args->size);
363                 mutex_lock(&dev->struct_mutex);
364                 if (unwritten) {
365                         ret = -EFAULT;
366                         goto out;
367                 }
368         }
369
370         drm_clflush_virt_range(vaddr, args->size);
371         i915_gem_chipset_flush(dev);
372
373 out:
374         intel_fb_obj_flush(obj, false);
375         return ret;
376 }
377
378 void *i915_gem_object_alloc(struct drm_device *dev)
379 {
380         struct drm_i915_private *dev_priv = dev->dev_private;
381         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
382 }
383
384 void i915_gem_object_free(struct drm_i915_gem_object *obj)
385 {
386         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
387         kmem_cache_free(dev_priv->slab, obj);
388 }
389
390 static int
391 i915_gem_create(struct drm_file *file,
392                 struct drm_device *dev,
393                 uint64_t size,
394                 uint32_t *handle_p)
395 {
396         struct drm_i915_gem_object *obj;
397         int ret;
398         u32 handle;
399
400         size = roundup(size, PAGE_SIZE);
401         if (size == 0)
402                 return -EINVAL;
403
404         /* Allocate the new object */
405         obj = i915_gem_alloc_object(dev, size);
406         if (obj == NULL)
407                 return -ENOMEM;
408
409         ret = drm_gem_handle_create(file, &obj->base, &handle);
410         /* drop reference from allocate - handle holds it now */
411         drm_gem_object_unreference_unlocked(&obj->base);
412         if (ret)
413                 return ret;
414
415         *handle_p = handle;
416         return 0;
417 }
418
419 int
420 i915_gem_dumb_create(struct drm_file *file,
421                      struct drm_device *dev,
422                      struct drm_mode_create_dumb *args)
423 {
424         /* have to work out size/pitch and return them */
425         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
426         args->size = args->pitch * args->height;
427         return i915_gem_create(file, dev,
428                                args->size, &args->handle);
429 }
430
431 /**
432  * Creates a new mm object and returns a handle to it.
433  */
434 int
435 i915_gem_create_ioctl(struct drm_device *dev, void *data,
436                       struct drm_file *file)
437 {
438         struct drm_i915_gem_create *args = data;
439
440         return i915_gem_create(file, dev,
441                                args->size, &args->handle);
442 }
443
444 static inline int
445 __copy_to_user_swizzled(char __user *cpu_vaddr,
446                         const char *gpu_vaddr, int gpu_offset,
447                         int length)
448 {
449         int ret, cpu_offset = 0;
450
451         while (length > 0) {
452                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
453                 int this_length = min(cacheline_end - gpu_offset, length);
454                 int swizzled_gpu_offset = gpu_offset ^ 64;
455
456                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
457                                      gpu_vaddr + swizzled_gpu_offset,
458                                      this_length);
459                 if (ret)
460                         return ret + length;
461
462                 cpu_offset += this_length;
463                 gpu_offset += this_length;
464                 length -= this_length;
465         }
466
467         return 0;
468 }
469
470 static inline int
471 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
472                           const char __user *cpu_vaddr,
473                           int length)
474 {
475         int ret, cpu_offset = 0;
476
477         while (length > 0) {
478                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
479                 int this_length = min(cacheline_end - gpu_offset, length);
480                 int swizzled_gpu_offset = gpu_offset ^ 64;
481
482                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
483                                        cpu_vaddr + cpu_offset,
484                                        this_length);
485                 if (ret)
486                         return ret + length;
487
488                 cpu_offset += this_length;
489                 gpu_offset += this_length;
490                 length -= this_length;
491         }
492
493         return 0;
494 }
495
496 /*
497  * Pins the specified object's pages and synchronizes the object with
498  * GPU accesses. Sets needs_clflush to non-zero if the caller should
499  * flush the object from the CPU cache.
500  */
501 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
502                                     int *needs_clflush)
503 {
504         int ret;
505
506         *needs_clflush = 0;
507
508         if (!obj->base.filp)
509                 return -EINVAL;
510
511         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
512                 /* If we're not in the cpu read domain, set ourself into the gtt
513                  * read domain and manually flush cachelines (if required). This
514                  * optimizes for the case when the gpu will dirty the data
515                  * anyway again before the next pread happens. */
516                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
517                                                         obj->cache_level);
518                 ret = i915_gem_object_wait_rendering(obj, true);
519                 if (ret)
520                         return ret;
521
522                 i915_gem_object_retire(obj);
523         }
524
525         ret = i915_gem_object_get_pages(obj);
526         if (ret)
527                 return ret;
528
529         i915_gem_object_pin_pages(obj);
530
531         return ret;
532 }
533
534 /* Per-page copy function for the shmem pread fastpath.
535  * Flushes invalid cachelines before reading the target if
536  * needs_clflush is set. */
537 static int
538 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
539                  char __user *user_data,
540                  bool page_do_bit17_swizzling, bool needs_clflush)
541 {
542         char *vaddr;
543         int ret;
544
545         if (unlikely(page_do_bit17_swizzling))
546                 return -EINVAL;
547
548         vaddr = kmap_atomic(page);
549         if (needs_clflush)
550                 drm_clflush_virt_range(vaddr + shmem_page_offset,
551                                        page_length);
552         ret = __copy_to_user_inatomic(user_data,
553                                       vaddr + shmem_page_offset,
554                                       page_length);
555         kunmap_atomic(vaddr);
556
557         return ret ? -EFAULT : 0;
558 }
559
560 static void
561 shmem_clflush_swizzled_range(char *addr, unsigned long length,
562                              bool swizzled)
563 {
564         if (unlikely(swizzled)) {
565                 unsigned long start = (unsigned long) addr;
566                 unsigned long end = (unsigned long) addr + length;
567
568                 /* For swizzling simply ensure that we always flush both
569                  * channels. Lame, but simple and it works. Swizzled
570                  * pwrite/pread is far from a hotpath - current userspace
571                  * doesn't use it at all. */
572                 start = round_down(start, 128);
573                 end = round_up(end, 128);
574
575                 drm_clflush_virt_range((void *)start, end - start);
576         } else {
577                 drm_clflush_virt_range(addr, length);
578         }
579
580 }
581
582 /* Only difference to the fast-path function is that this can handle bit17
583  * and uses non-atomic copy and kmap functions. */
584 static int
585 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
586                  char __user *user_data,
587                  bool page_do_bit17_swizzling, bool needs_clflush)
588 {
589         char *vaddr;
590         int ret;
591
592         vaddr = kmap(page);
593         if (needs_clflush)
594                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
595                                              page_length,
596                                              page_do_bit17_swizzling);
597
598         if (page_do_bit17_swizzling)
599                 ret = __copy_to_user_swizzled(user_data,
600                                               vaddr, shmem_page_offset,
601                                               page_length);
602         else
603                 ret = __copy_to_user(user_data,
604                                      vaddr + shmem_page_offset,
605                                      page_length);
606         kunmap(page);
607
608         return ret ? - EFAULT : 0;
609 }
610
611 static int
612 i915_gem_shmem_pread(struct drm_device *dev,
613                      struct drm_i915_gem_object *obj,
614                      struct drm_i915_gem_pread *args,
615                      struct drm_file *file)
616 {
617         char __user *user_data;
618         ssize_t remain;
619         loff_t offset;
620         int shmem_page_offset, page_length, ret = 0;
621         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
622         int prefaulted = 0;
623         int needs_clflush = 0;
624         struct sg_page_iter sg_iter;
625
626         user_data = to_user_ptr(args->data_ptr);
627         remain = args->size;
628
629         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
630
631         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
632         if (ret)
633                 return ret;
634
635         offset = args->offset;
636
637         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
638                          offset >> PAGE_SHIFT) {
639                 struct page *page = sg_page_iter_page(&sg_iter);
640
641                 if (remain <= 0)
642                         break;
643
644                 /* Operation in this page
645                  *
646                  * shmem_page_offset = offset within page in shmem file
647                  * page_length = bytes to copy for this page
648                  */
649                 shmem_page_offset = offset_in_page(offset);
650                 page_length = remain;
651                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
652                         page_length = PAGE_SIZE - shmem_page_offset;
653
654                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
655                         (page_to_phys(page) & (1 << 17)) != 0;
656
657                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
658                                        user_data, page_do_bit17_swizzling,
659                                        needs_clflush);
660                 if (ret == 0)
661                         goto next_page;
662
663                 mutex_unlock(&dev->struct_mutex);
664
665                 if (likely(!i915.prefault_disable) && !prefaulted) {
666                         ret = fault_in_multipages_writeable(user_data, remain);
667                         /* Userspace is tricking us, but we've already clobbered
668                          * its pages with the prefault and promised to write the
669                          * data up to the first fault. Hence ignore any errors
670                          * and just continue. */
671                         (void)ret;
672                         prefaulted = 1;
673                 }
674
675                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
676                                        user_data, page_do_bit17_swizzling,
677                                        needs_clflush);
678
679                 mutex_lock(&dev->struct_mutex);
680
681                 if (ret)
682                         goto out;
683
684 next_page:
685                 remain -= page_length;
686                 user_data += page_length;
687                 offset += page_length;
688         }
689
690 out:
691         i915_gem_object_unpin_pages(obj);
692
693         return ret;
694 }
695
696 /**
697  * Reads data from the object referenced by handle.
698  *
699  * On error, the contents of *data are undefined.
700  */
701 int
702 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
703                      struct drm_file *file)
704 {
705         struct drm_i915_gem_pread *args = data;
706         struct drm_i915_gem_object *obj;
707         int ret = 0;
708
709         if (args->size == 0)
710                 return 0;
711
712         if (!access_ok(VERIFY_WRITE,
713                        to_user_ptr(args->data_ptr),
714                        args->size))
715                 return -EFAULT;
716
717         ret = i915_mutex_lock_interruptible(dev);
718         if (ret)
719                 return ret;
720
721         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
722         if (&obj->base == NULL) {
723                 ret = -ENOENT;
724                 goto unlock;
725         }
726
727         /* Bounds check source.  */
728         if (args->offset > obj->base.size ||
729             args->size > obj->base.size - args->offset) {
730                 ret = -EINVAL;
731                 goto out;
732         }
733
734         /* prime objects have no backing filp to GEM pread/pwrite
735          * pages from.
736          */
737         if (!obj->base.filp) {
738                 ret = -EINVAL;
739                 goto out;
740         }
741
742         trace_i915_gem_object_pread(obj, args->offset, args->size);
743
744         ret = i915_gem_shmem_pread(dev, obj, args, file);
745
746 out:
747         drm_gem_object_unreference(&obj->base);
748 unlock:
749         mutex_unlock(&dev->struct_mutex);
750         return ret;
751 }
752
753 /* This is the fast write path which cannot handle
754  * page faults in the source data
755  */
756
757 static inline int
758 fast_user_write(struct io_mapping *mapping,
759                 loff_t page_base, int page_offset,
760                 char __user *user_data,
761                 int length)
762 {
763         void __iomem *vaddr_atomic;
764         void *vaddr;
765         unsigned long unwritten;
766
767         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
768         /* We can use the cpu mem copy function because this is X86. */
769         vaddr = (void __force*)vaddr_atomic + page_offset;
770         unwritten = __copy_from_user_inatomic_nocache(vaddr,
771                                                       user_data, length);
772         io_mapping_unmap_atomic(vaddr_atomic);
773         return unwritten;
774 }
775
776 /**
777  * This is the fast pwrite path, where we copy the data directly from the
778  * user into the GTT, uncached.
779  */
780 static int
781 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
782                          struct drm_i915_gem_object *obj,
783                          struct drm_i915_gem_pwrite *args,
784                          struct drm_file *file)
785 {
786         struct drm_i915_private *dev_priv = dev->dev_private;
787         ssize_t remain;
788         loff_t offset, page_base;
789         char __user *user_data;
790         int page_offset, page_length, ret;
791
792         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
793         if (ret)
794                 goto out;
795
796         ret = i915_gem_object_set_to_gtt_domain(obj, true);
797         if (ret)
798                 goto out_unpin;
799
800         ret = i915_gem_object_put_fence(obj);
801         if (ret)
802                 goto out_unpin;
803
804         user_data = to_user_ptr(args->data_ptr);
805         remain = args->size;
806
807         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
808
809         intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
810
811         while (remain > 0) {
812                 /* Operation in this page
813                  *
814                  * page_base = page offset within aperture
815                  * page_offset = offset within page
816                  * page_length = bytes to copy for this page
817                  */
818                 page_base = offset & PAGE_MASK;
819                 page_offset = offset_in_page(offset);
820                 page_length = remain;
821                 if ((page_offset + remain) > PAGE_SIZE)
822                         page_length = PAGE_SIZE - page_offset;
823
824                 /* If we get a fault while copying data, then (presumably) our
825                  * source page isn't available.  Return the error and we'll
826                  * retry in the slow path.
827                  */
828                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
829                                     page_offset, user_data, page_length)) {
830                         ret = -EFAULT;
831                         goto out_flush;
832                 }
833
834                 remain -= page_length;
835                 user_data += page_length;
836                 offset += page_length;
837         }
838
839 out_flush:
840         intel_fb_obj_flush(obj, false);
841 out_unpin:
842         i915_gem_object_ggtt_unpin(obj);
843 out:
844         return ret;
845 }
846
847 /* Per-page copy function for the shmem pwrite fastpath.
848  * Flushes invalid cachelines before writing to the target if
849  * needs_clflush_before is set and flushes out any written cachelines after
850  * writing if needs_clflush is set. */
851 static int
852 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
853                   char __user *user_data,
854                   bool page_do_bit17_swizzling,
855                   bool needs_clflush_before,
856                   bool needs_clflush_after)
857 {
858         char *vaddr;
859         int ret;
860
861         if (unlikely(page_do_bit17_swizzling))
862                 return -EINVAL;
863
864         vaddr = kmap_atomic(page);
865         if (needs_clflush_before)
866                 drm_clflush_virt_range(vaddr + shmem_page_offset,
867                                        page_length);
868         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
869                                         user_data, page_length);
870         if (needs_clflush_after)
871                 drm_clflush_virt_range(vaddr + shmem_page_offset,
872                                        page_length);
873         kunmap_atomic(vaddr);
874
875         return ret ? -EFAULT : 0;
876 }
877
878 /* Only difference to the fast-path function is that this can handle bit17
879  * and uses non-atomic copy and kmap functions. */
880 static int
881 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
882                   char __user *user_data,
883                   bool page_do_bit17_swizzling,
884                   bool needs_clflush_before,
885                   bool needs_clflush_after)
886 {
887         char *vaddr;
888         int ret;
889
890         vaddr = kmap(page);
891         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
892                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
893                                              page_length,
894                                              page_do_bit17_swizzling);
895         if (page_do_bit17_swizzling)
896                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
897                                                 user_data,
898                                                 page_length);
899         else
900                 ret = __copy_from_user(vaddr + shmem_page_offset,
901                                        user_data,
902                                        page_length);
903         if (needs_clflush_after)
904                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
905                                              page_length,
906                                              page_do_bit17_swizzling);
907         kunmap(page);
908
909         return ret ? -EFAULT : 0;
910 }
911
912 static int
913 i915_gem_shmem_pwrite(struct drm_device *dev,
914                       struct drm_i915_gem_object *obj,
915                       struct drm_i915_gem_pwrite *args,
916                       struct drm_file *file)
917 {
918         ssize_t remain;
919         loff_t offset;
920         char __user *user_data;
921         int shmem_page_offset, page_length, ret = 0;
922         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
923         int hit_slowpath = 0;
924         int needs_clflush_after = 0;
925         int needs_clflush_before = 0;
926         struct sg_page_iter sg_iter;
927
928         user_data = to_user_ptr(args->data_ptr);
929         remain = args->size;
930
931         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
932
933         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
934                 /* If we're not in the cpu write domain, set ourself into the gtt
935                  * write domain and manually flush cachelines (if required). This
936                  * optimizes for the case when the gpu will use the data
937                  * right away and we therefore have to clflush anyway. */
938                 needs_clflush_after = cpu_write_needs_clflush(obj);
939                 ret = i915_gem_object_wait_rendering(obj, false);
940                 if (ret)
941                         return ret;
942
943                 i915_gem_object_retire(obj);
944         }
945         /* Same trick applies to invalidate partially written cachelines read
946          * before writing. */
947         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
948                 needs_clflush_before =
949                         !cpu_cache_is_coherent(dev, obj->cache_level);
950
951         ret = i915_gem_object_get_pages(obj);
952         if (ret)
953                 return ret;
954
955         intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
956
957         i915_gem_object_pin_pages(obj);
958
959         offset = args->offset;
960         obj->dirty = 1;
961
962         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
963                          offset >> PAGE_SHIFT) {
964                 struct page *page = sg_page_iter_page(&sg_iter);
965                 int partial_cacheline_write;
966
967                 if (remain <= 0)
968                         break;
969
970                 /* Operation in this page
971                  *
972                  * shmem_page_offset = offset within page in shmem file
973                  * page_length = bytes to copy for this page
974                  */
975                 shmem_page_offset = offset_in_page(offset);
976
977                 page_length = remain;
978                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
979                         page_length = PAGE_SIZE - shmem_page_offset;
980
981                 /* If we don't overwrite a cacheline completely we need to be
982                  * careful to have up-to-date data by first clflushing. Don't
983                  * overcomplicate things and flush the entire patch. */
984                 partial_cacheline_write = needs_clflush_before &&
985                         ((shmem_page_offset | page_length)
986                                 & (boot_cpu_data.x86_clflush_size - 1));
987
988                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
989                         (page_to_phys(page) & (1 << 17)) != 0;
990
991                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
992                                         user_data, page_do_bit17_swizzling,
993                                         partial_cacheline_write,
994                                         needs_clflush_after);
995                 if (ret == 0)
996                         goto next_page;
997
998                 hit_slowpath = 1;
999                 mutex_unlock(&dev->struct_mutex);
1000                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1001                                         user_data, page_do_bit17_swizzling,
1002                                         partial_cacheline_write,
1003                                         needs_clflush_after);
1004
1005                 mutex_lock(&dev->struct_mutex);
1006
1007                 if (ret)
1008                         goto out;
1009
1010 next_page:
1011                 remain -= page_length;
1012                 user_data += page_length;
1013                 offset += page_length;
1014         }
1015
1016 out:
1017         i915_gem_object_unpin_pages(obj);
1018
1019         if (hit_slowpath) {
1020                 /*
1021                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1022                  * cachelines in-line while writing and the object moved
1023                  * out of the cpu write domain while we've dropped the lock.
1024                  */
1025                 if (!needs_clflush_after &&
1026                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1027                         if (i915_gem_clflush_object(obj, obj->pin_display))
1028                                 i915_gem_chipset_flush(dev);
1029                 }
1030         }
1031
1032         if (needs_clflush_after)
1033                 i915_gem_chipset_flush(dev);
1034
1035         intel_fb_obj_flush(obj, false);
1036         return ret;
1037 }
1038
1039 /**
1040  * Writes data to the object referenced by handle.
1041  *
1042  * On error, the contents of the buffer that were to be modified are undefined.
1043  */
1044 int
1045 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1046                       struct drm_file *file)
1047 {
1048         struct drm_i915_private *dev_priv = dev->dev_private;
1049         struct drm_i915_gem_pwrite *args = data;
1050         struct drm_i915_gem_object *obj;
1051         int ret;
1052
1053         if (args->size == 0)
1054                 return 0;
1055
1056         if (!access_ok(VERIFY_READ,
1057                        to_user_ptr(args->data_ptr),
1058                        args->size))
1059                 return -EFAULT;
1060
1061         if (likely(!i915.prefault_disable)) {
1062                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1063                                                    args->size);
1064                 if (ret)
1065                         return -EFAULT;
1066         }
1067
1068         intel_runtime_pm_get(dev_priv);
1069
1070         ret = i915_mutex_lock_interruptible(dev);
1071         if (ret)
1072                 goto put_rpm;
1073
1074         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075         if (&obj->base == NULL) {
1076                 ret = -ENOENT;
1077                 goto unlock;
1078         }
1079
1080         /* Bounds check destination. */
1081         if (args->offset > obj->base.size ||
1082             args->size > obj->base.size - args->offset) {
1083                 ret = -EINVAL;
1084                 goto out;
1085         }
1086
1087         /* prime objects have no backing filp to GEM pread/pwrite
1088          * pages from.
1089          */
1090         if (!obj->base.filp) {
1091                 ret = -EINVAL;
1092                 goto out;
1093         }
1094
1095         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1096
1097         ret = -EFAULT;
1098         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1099          * it would end up going through the fenced access, and we'll get
1100          * different detiling behavior between reading and writing.
1101          * pread/pwrite currently are reading and writing from the CPU
1102          * perspective, requiring manual detiling by the client.
1103          */
1104         if (obj->tiling_mode == I915_TILING_NONE &&
1105             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1106             cpu_write_needs_clflush(obj)) {
1107                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1108                 /* Note that the gtt paths might fail with non-page-backed user
1109                  * pointers (e.g. gtt mappings when moving data between
1110                  * textures). Fallback to the shmem path in that case. */
1111         }
1112
1113         if (ret == -EFAULT || ret == -ENOSPC) {
1114                 if (obj->phys_handle)
1115                         ret = i915_gem_phys_pwrite(obj, args, file);
1116                 else
1117                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1118         }
1119
1120 out:
1121         drm_gem_object_unreference(&obj->base);
1122 unlock:
1123         mutex_unlock(&dev->struct_mutex);
1124 put_rpm:
1125         intel_runtime_pm_put(dev_priv);
1126
1127         return ret;
1128 }
1129
1130 int
1131 i915_gem_check_wedge(struct i915_gpu_error *error,
1132                      bool interruptible)
1133 {
1134         if (i915_reset_in_progress(error)) {
1135                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1136                  * -EIO unconditionally for these. */
1137                 if (!interruptible)
1138                         return -EIO;
1139
1140                 /* Recovery complete, but the reset failed ... */
1141                 if (i915_terminally_wedged(error))
1142                         return -EIO;
1143
1144                 /*
1145                  * Check if GPU Reset is in progress - we need intel_ring_begin
1146                  * to work properly to reinit the hw state while the gpu is
1147                  * still marked as reset-in-progress. Handle this with a flag.
1148                  */
1149                 if (!error->reload_in_reset)
1150                         return -EAGAIN;
1151         }
1152
1153         return 0;
1154 }
1155
1156 /*
1157  * Compare arbitrary request against outstanding lazy request. Emit on match.
1158  */
1159 int
1160 i915_gem_check_olr(struct drm_i915_gem_request *req)
1161 {
1162         int ret;
1163
1164         WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
1165
1166         ret = 0;
1167         if (req == req->ring->outstanding_lazy_request)
1168                 ret = i915_add_request(req->ring);
1169
1170         return ret;
1171 }
1172
1173 static void fake_irq(unsigned long data)
1174 {
1175         wake_up_process((struct task_struct *)data);
1176 }
1177
1178 static bool missed_irq(struct drm_i915_private *dev_priv,
1179                        struct intel_engine_cs *ring)
1180 {
1181         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1182 }
1183
1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1185 {
1186         if (file_priv == NULL)
1187                 return true;
1188
1189         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1190 }
1191
1192 /**
1193  * __i915_wait_request - wait until execution of request has finished
1194  * @req: duh!
1195  * @reset_counter: reset sequence associated with the given request
1196  * @interruptible: do an interruptible wait (normally yes)
1197  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1198  *
1199  * Note: It is of utmost importance that the passed in seqno and reset_counter
1200  * values have been read by the caller in an smp safe manner. Where read-side
1201  * locks are involved, it is sufficient to read the reset_counter before
1202  * unlocking the lock that protects the seqno. For lockless tricks, the
1203  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1204  * inserted.
1205  *
1206  * Returns 0 if the request was found within the alloted time. Else returns the
1207  * errno with remaining time filled in timeout argument.
1208  */
1209 int __i915_wait_request(struct drm_i915_gem_request *req,
1210                         unsigned reset_counter,
1211                         bool interruptible,
1212                         s64 *timeout,
1213                         struct drm_i915_file_private *file_priv)
1214 {
1215         struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1216         struct drm_device *dev = ring->dev;
1217         struct drm_i915_private *dev_priv = dev->dev_private;
1218         const bool irq_test_in_progress =
1219                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1220         DEFINE_WAIT(wait);
1221         unsigned long timeout_expire;
1222         s64 before, now;
1223         int ret;
1224
1225         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1226
1227         if (i915_gem_request_completed(req, true))
1228                 return 0;
1229
1230         timeout_expire = timeout ?
1231                 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1232
1233         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1234                 gen6_rps_boost(dev_priv);
1235                 if (file_priv)
1236                         mod_delayed_work(dev_priv->wq,
1237                                          &file_priv->mm.idle_work,
1238                                          msecs_to_jiffies(100));
1239         }
1240
1241         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1242                 return -ENODEV;
1243
1244         /* Record current time in case interrupted by signal, or wedged */
1245         trace_i915_gem_request_wait_begin(req);
1246         before = ktime_get_raw_ns();
1247         for (;;) {
1248                 struct timer_list timer;
1249
1250                 prepare_to_wait(&ring->irq_queue, &wait,
1251                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1252
1253                 /* We need to check whether any gpu reset happened in between
1254                  * the caller grabbing the seqno and now ... */
1255                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1256                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1257                          * is truely gone. */
1258                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1259                         if (ret == 0)
1260                                 ret = -EAGAIN;
1261                         break;
1262                 }
1263
1264                 if (i915_gem_request_completed(req, false)) {
1265                         ret = 0;
1266                         break;
1267                 }
1268
1269                 if (interruptible && signal_pending(current)) {
1270                         ret = -ERESTARTSYS;
1271                         break;
1272                 }
1273
1274                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1275                         ret = -ETIME;
1276                         break;
1277                 }
1278
1279                 timer.function = NULL;
1280                 if (timeout || missed_irq(dev_priv, ring)) {
1281                         unsigned long expire;
1282
1283                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1284                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1285                         mod_timer(&timer, expire);
1286                 }
1287
1288                 io_schedule();
1289
1290                 if (timer.function) {
1291                         del_singleshot_timer_sync(&timer);
1292                         destroy_timer_on_stack(&timer);
1293                 }
1294         }
1295         now = ktime_get_raw_ns();
1296         trace_i915_gem_request_wait_end(req);
1297
1298         if (!irq_test_in_progress)
1299                 ring->irq_put(ring);
1300
1301         finish_wait(&ring->irq_queue, &wait);
1302
1303         if (timeout) {
1304                 s64 tres = *timeout - (now - before);
1305
1306                 *timeout = tres < 0 ? 0 : tres;
1307
1308                 /*
1309                  * Apparently ktime isn't accurate enough and occasionally has a
1310                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1311                  * things up to make the test happy. We allow up to 1 jiffy.
1312                  *
1313                  * This is a regrssion from the timespec->ktime conversion.
1314                  */
1315                 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1316                         *timeout = 0;
1317         }
1318
1319         return ret;
1320 }
1321
1322 /**
1323  * Waits for a request to be signaled, and cleans up the
1324  * request and object lists appropriately for that event.
1325  */
1326 int
1327 i915_wait_request(struct drm_i915_gem_request *req)
1328 {
1329         struct drm_device *dev;
1330         struct drm_i915_private *dev_priv;
1331         bool interruptible;
1332         unsigned reset_counter;
1333         int ret;
1334
1335         BUG_ON(req == NULL);
1336
1337         dev = req->ring->dev;
1338         dev_priv = dev->dev_private;
1339         interruptible = dev_priv->mm.interruptible;
1340
1341         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1342
1343         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1344         if (ret)
1345                 return ret;
1346
1347         ret = i915_gem_check_olr(req);
1348         if (ret)
1349                 return ret;
1350
1351         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1352         i915_gem_request_reference(req);
1353         ret = __i915_wait_request(req, reset_counter,
1354                                   interruptible, NULL, NULL);
1355         i915_gem_request_unreference(req);
1356         return ret;
1357 }
1358
1359 static int
1360 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1361 {
1362         if (!obj->active)
1363                 return 0;
1364
1365         /* Manually manage the write flush as we may have not yet
1366          * retired the buffer.
1367          *
1368          * Note that the last_write_req is always the earlier of
1369          * the two (read/write) requests, so if we haved successfully waited,
1370          * we know we have passed the last write.
1371          */
1372         i915_gem_request_assign(&obj->last_write_req, NULL);
1373
1374         return 0;
1375 }
1376
1377 /**
1378  * Ensures that all rendering to the object has completed and the object is
1379  * safe to unbind from the GTT or access from the CPU.
1380  */
1381 static __must_check int
1382 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1383                                bool readonly)
1384 {
1385         struct drm_i915_gem_request *req;
1386         int ret;
1387
1388         req = readonly ? obj->last_write_req : obj->last_read_req;
1389         if (!req)
1390                 return 0;
1391
1392         ret = i915_wait_request(req);
1393         if (ret)
1394                 return ret;
1395
1396         return i915_gem_object_wait_rendering__tail(obj);
1397 }
1398
1399 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1400  * as the object state may change during this call.
1401  */
1402 static __must_check int
1403 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1404                                             struct drm_i915_file_private *file_priv,
1405                                             bool readonly)
1406 {
1407         struct drm_i915_gem_request *req;
1408         struct drm_device *dev = obj->base.dev;
1409         struct drm_i915_private *dev_priv = dev->dev_private;
1410         unsigned reset_counter;
1411         int ret;
1412
1413         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1414         BUG_ON(!dev_priv->mm.interruptible);
1415
1416         req = readonly ? obj->last_write_req : obj->last_read_req;
1417         if (!req)
1418                 return 0;
1419
1420         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1421         if (ret)
1422                 return ret;
1423
1424         ret = i915_gem_check_olr(req);
1425         if (ret)
1426                 return ret;
1427
1428         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1429         i915_gem_request_reference(req);
1430         mutex_unlock(&dev->struct_mutex);
1431         ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
1432         mutex_lock(&dev->struct_mutex);
1433         i915_gem_request_unreference(req);
1434         if (ret)
1435                 return ret;
1436
1437         return i915_gem_object_wait_rendering__tail(obj);
1438 }
1439
1440 /**
1441  * Called when user space prepares to use an object with the CPU, either
1442  * through the mmap ioctl's mapping or a GTT mapping.
1443  */
1444 int
1445 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1446                           struct drm_file *file)
1447 {
1448         struct drm_i915_gem_set_domain *args = data;
1449         struct drm_i915_gem_object *obj;
1450         uint32_t read_domains = args->read_domains;
1451         uint32_t write_domain = args->write_domain;
1452         int ret;
1453
1454         /* Only handle setting domains to types used by the CPU. */
1455         if (write_domain & I915_GEM_GPU_DOMAINS)
1456                 return -EINVAL;
1457
1458         if (read_domains & I915_GEM_GPU_DOMAINS)
1459                 return -EINVAL;
1460
1461         /* Having something in the write domain implies it's in the read
1462          * domain, and only that read domain.  Enforce that in the request.
1463          */
1464         if (write_domain != 0 && read_domains != write_domain)
1465                 return -EINVAL;
1466
1467         ret = i915_mutex_lock_interruptible(dev);
1468         if (ret)
1469                 return ret;
1470
1471         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1472         if (&obj->base == NULL) {
1473                 ret = -ENOENT;
1474                 goto unlock;
1475         }
1476
1477         /* Try to flush the object off the GPU without holding the lock.
1478          * We will repeat the flush holding the lock in the normal manner
1479          * to catch cases where we are gazumped.
1480          */
1481         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1482                                                           file->driver_priv,
1483                                                           !write_domain);
1484         if (ret)
1485                 goto unref;
1486
1487         if (read_domains & I915_GEM_DOMAIN_GTT)
1488                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1489         else
1490                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1491
1492 unref:
1493         drm_gem_object_unreference(&obj->base);
1494 unlock:
1495         mutex_unlock(&dev->struct_mutex);
1496         return ret;
1497 }
1498
1499 /**
1500  * Called when user space has done writes to this buffer
1501  */
1502 int
1503 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1504                          struct drm_file *file)
1505 {
1506         struct drm_i915_gem_sw_finish *args = data;
1507         struct drm_i915_gem_object *obj;
1508         int ret = 0;
1509
1510         ret = i915_mutex_lock_interruptible(dev);
1511         if (ret)
1512                 return ret;
1513
1514         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1515         if (&obj->base == NULL) {
1516                 ret = -ENOENT;
1517                 goto unlock;
1518         }
1519
1520         /* Pinned buffers may be scanout, so flush the cache */
1521         if (obj->pin_display)
1522                 i915_gem_object_flush_cpu_write_domain(obj);
1523
1524         drm_gem_object_unreference(&obj->base);
1525 unlock:
1526         mutex_unlock(&dev->struct_mutex);
1527         return ret;
1528 }
1529
1530 /**
1531  * Maps the contents of an object, returning the address it is mapped
1532  * into.
1533  *
1534  * While the mapping holds a reference on the contents of the object, it doesn't
1535  * imply a ref on the object itself.
1536  *
1537  * IMPORTANT:
1538  *
1539  * DRM driver writers who look a this function as an example for how to do GEM
1540  * mmap support, please don't implement mmap support like here. The modern way
1541  * to implement DRM mmap support is with an mmap offset ioctl (like
1542  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1543  * That way debug tooling like valgrind will understand what's going on, hiding
1544  * the mmap call in a driver private ioctl will break that. The i915 driver only
1545  * does cpu mmaps this way because we didn't know better.
1546  */
1547 int
1548 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1549                     struct drm_file *file)
1550 {
1551         struct drm_i915_gem_mmap *args = data;
1552         struct drm_gem_object *obj;
1553         unsigned long addr;
1554
1555         if (args->flags & ~(I915_MMAP_WC))
1556                 return -EINVAL;
1557
1558         if (args->flags & I915_MMAP_WC && !cpu_has_pat)
1559                 return -ENODEV;
1560
1561         obj = drm_gem_object_lookup(dev, file, args->handle);
1562         if (obj == NULL)
1563                 return -ENOENT;
1564
1565         /* prime objects have no backing filp to GEM mmap
1566          * pages from.
1567          */
1568         if (!obj->filp) {
1569                 drm_gem_object_unreference_unlocked(obj);
1570                 return -EINVAL;
1571         }
1572
1573         addr = vm_mmap(obj->filp, 0, args->size,
1574                        PROT_READ | PROT_WRITE, MAP_SHARED,
1575                        args->offset);
1576         if (args->flags & I915_MMAP_WC) {
1577                 struct mm_struct *mm = current->mm;
1578                 struct vm_area_struct *vma;
1579
1580                 down_write(&mm->mmap_sem);
1581                 vma = find_vma(mm, addr);
1582                 if (vma)
1583                         vma->vm_page_prot =
1584                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1585                 else
1586                         addr = -ENOMEM;
1587                 up_write(&mm->mmap_sem);
1588         }
1589         drm_gem_object_unreference_unlocked(obj);
1590         if (IS_ERR((void *)addr))
1591                 return addr;
1592
1593         args->addr_ptr = (uint64_t) addr;
1594
1595         return 0;
1596 }
1597
1598 /**
1599  * i915_gem_fault - fault a page into the GTT
1600  * vma: VMA in question
1601  * vmf: fault info
1602  *
1603  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1604  * from userspace.  The fault handler takes care of binding the object to
1605  * the GTT (if needed), allocating and programming a fence register (again,
1606  * only if needed based on whether the old reg is still valid or the object
1607  * is tiled) and inserting a new PTE into the faulting process.
1608  *
1609  * Note that the faulting process may involve evicting existing objects
1610  * from the GTT and/or fence registers to make room.  So performance may
1611  * suffer if the GTT working set is large or there are few fence registers
1612  * left.
1613  */
1614 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1615 {
1616         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1617         struct drm_device *dev = obj->base.dev;
1618         struct drm_i915_private *dev_priv = dev->dev_private;
1619         pgoff_t page_offset;
1620         unsigned long pfn;
1621         int ret = 0;
1622         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1623
1624         intel_runtime_pm_get(dev_priv);
1625
1626         /* We don't use vmf->pgoff since that has the fake offset */
1627         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1628                 PAGE_SHIFT;
1629
1630         ret = i915_mutex_lock_interruptible(dev);
1631         if (ret)
1632                 goto out;
1633
1634         trace_i915_gem_object_fault(obj, page_offset, true, write);
1635
1636         /* Try to flush the object off the GPU first without holding the lock.
1637          * Upon reacquiring the lock, we will perform our sanity checks and then
1638          * repeat the flush holding the lock in the normal manner to catch cases
1639          * where we are gazumped.
1640          */
1641         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1642         if (ret)
1643                 goto unlock;
1644
1645         /* Access to snoopable pages through the GTT is incoherent. */
1646         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1647                 ret = -EFAULT;
1648                 goto unlock;
1649         }
1650
1651         /* Now bind it into the GTT if needed */
1652         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1653         if (ret)
1654                 goto unlock;
1655
1656         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1657         if (ret)
1658                 goto unpin;
1659
1660         ret = i915_gem_object_get_fence(obj);
1661         if (ret)
1662                 goto unpin;
1663
1664         /* Finally, remap it using the new GTT offset */
1665         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1666         pfn >>= PAGE_SHIFT;
1667
1668         if (!obj->fault_mappable) {
1669                 unsigned long size = min_t(unsigned long,
1670                                            vma->vm_end - vma->vm_start,
1671                                            obj->base.size);
1672                 int i;
1673
1674                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1675                         ret = vm_insert_pfn(vma,
1676                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1677                                             pfn + i);
1678                         if (ret)
1679                                 break;
1680                 }
1681
1682                 obj->fault_mappable = true;
1683         } else
1684                 ret = vm_insert_pfn(vma,
1685                                     (unsigned long)vmf->virtual_address,
1686                                     pfn + page_offset);
1687 unpin:
1688         i915_gem_object_ggtt_unpin(obj);
1689 unlock:
1690         mutex_unlock(&dev->struct_mutex);
1691 out:
1692         switch (ret) {
1693         case -EIO:
1694                 /*
1695                  * We eat errors when the gpu is terminally wedged to avoid
1696                  * userspace unduly crashing (gl has no provisions for mmaps to
1697                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1698                  * and so needs to be reported.
1699                  */
1700                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1701                         ret = VM_FAULT_SIGBUS;
1702                         break;
1703                 }
1704         case -EAGAIN:
1705                 /*
1706                  * EAGAIN means the gpu is hung and we'll wait for the error
1707                  * handler to reset everything when re-faulting in
1708                  * i915_mutex_lock_interruptible.
1709                  */
1710         case 0:
1711         case -ERESTARTSYS:
1712         case -EINTR:
1713         case -EBUSY:
1714                 /*
1715                  * EBUSY is ok: this just means that another thread
1716                  * already did the job.
1717                  */
1718                 ret = VM_FAULT_NOPAGE;
1719                 break;
1720         case -ENOMEM:
1721                 ret = VM_FAULT_OOM;
1722                 break;
1723         case -ENOSPC:
1724         case -EFAULT:
1725                 ret = VM_FAULT_SIGBUS;
1726                 break;
1727         default:
1728                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1729                 ret = VM_FAULT_SIGBUS;
1730                 break;
1731         }
1732
1733         intel_runtime_pm_put(dev_priv);
1734         return ret;
1735 }
1736
1737 /**
1738  * i915_gem_release_mmap - remove physical page mappings
1739  * @obj: obj in question
1740  *
1741  * Preserve the reservation of the mmapping with the DRM core code, but
1742  * relinquish ownership of the pages back to the system.
1743  *
1744  * It is vital that we remove the page mapping if we have mapped a tiled
1745  * object through the GTT and then lose the fence register due to
1746  * resource pressure. Similarly if the object has been moved out of the
1747  * aperture, than pages mapped into userspace must be revoked. Removing the
1748  * mapping will then trigger a page fault on the next user access, allowing
1749  * fixup by i915_gem_fault().
1750  */
1751 void
1752 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1753 {
1754         if (!obj->fault_mappable)
1755                 return;
1756
1757         drm_vma_node_unmap(&obj->base.vma_node,
1758                            obj->base.dev->anon_inode->i_mapping);
1759         obj->fault_mappable = false;
1760 }
1761
1762 void
1763 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1764 {
1765         struct drm_i915_gem_object *obj;
1766
1767         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1768                 i915_gem_release_mmap(obj);
1769 }
1770
1771 uint32_t
1772 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1773 {
1774         uint32_t gtt_size;
1775
1776         if (INTEL_INFO(dev)->gen >= 4 ||
1777             tiling_mode == I915_TILING_NONE)
1778                 return size;
1779
1780         /* Previous chips need a power-of-two fence region when tiling */
1781         if (INTEL_INFO(dev)->gen == 3)
1782                 gtt_size = 1024*1024;
1783         else
1784                 gtt_size = 512*1024;
1785
1786         while (gtt_size < size)
1787                 gtt_size <<= 1;
1788
1789         return gtt_size;
1790 }
1791
1792 /**
1793  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1794  * @obj: object to check
1795  *
1796  * Return the required GTT alignment for an object, taking into account
1797  * potential fence register mapping.
1798  */
1799 uint32_t
1800 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1801                            int tiling_mode, bool fenced)
1802 {
1803         /*
1804          * Minimum alignment is 4k (GTT page size), but might be greater
1805          * if a fence register is needed for the object.
1806          */
1807         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1808             tiling_mode == I915_TILING_NONE)
1809                 return 4096;
1810
1811         /*
1812          * Previous chips need to be aligned to the size of the smallest
1813          * fence register that can contain the object.
1814          */
1815         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1816 }
1817
1818 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1819 {
1820         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1821         int ret;
1822
1823         if (drm_vma_node_has_offset(&obj->base.vma_node))
1824                 return 0;
1825
1826         dev_priv->mm.shrinker_no_lock_stealing = true;
1827
1828         ret = drm_gem_create_mmap_offset(&obj->base);
1829         if (ret != -ENOSPC)
1830                 goto out;
1831
1832         /* Badly fragmented mmap space? The only way we can recover
1833          * space is by destroying unwanted objects. We can't randomly release
1834          * mmap_offsets as userspace expects them to be persistent for the
1835          * lifetime of the objects. The closest we can is to release the
1836          * offsets on purgeable objects by truncating it and marking it purged,
1837          * which prevents userspace from ever using that object again.
1838          */
1839         i915_gem_shrink(dev_priv,
1840                         obj->base.size >> PAGE_SHIFT,
1841                         I915_SHRINK_BOUND |
1842                         I915_SHRINK_UNBOUND |
1843                         I915_SHRINK_PURGEABLE);
1844         ret = drm_gem_create_mmap_offset(&obj->base);
1845         if (ret != -ENOSPC)
1846                 goto out;
1847
1848         i915_gem_shrink_all(dev_priv);
1849         ret = drm_gem_create_mmap_offset(&obj->base);
1850 out:
1851         dev_priv->mm.shrinker_no_lock_stealing = false;
1852
1853         return ret;
1854 }
1855
1856 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1857 {
1858         drm_gem_free_mmap_offset(&obj->base);
1859 }
1860
1861 int
1862 i915_gem_mmap_gtt(struct drm_file *file,
1863                   struct drm_device *dev,
1864                   uint32_t handle,
1865                   uint64_t *offset)
1866 {
1867         struct drm_i915_private *dev_priv = dev->dev_private;
1868         struct drm_i915_gem_object *obj;
1869         int ret;
1870
1871         ret = i915_mutex_lock_interruptible(dev);
1872         if (ret)
1873                 return ret;
1874
1875         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1876         if (&obj->base == NULL) {
1877                 ret = -ENOENT;
1878                 goto unlock;
1879         }
1880
1881         if (obj->base.size > dev_priv->gtt.mappable_end) {
1882                 ret = -E2BIG;
1883                 goto out;
1884         }
1885
1886         if (obj->madv != I915_MADV_WILLNEED) {
1887                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1888                 ret = -EFAULT;
1889                 goto out;
1890         }
1891
1892         ret = i915_gem_object_create_mmap_offset(obj);
1893         if (ret)
1894                 goto out;
1895
1896         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1897
1898 out:
1899         drm_gem_object_unreference(&obj->base);
1900 unlock:
1901         mutex_unlock(&dev->struct_mutex);
1902         return ret;
1903 }
1904
1905 /**
1906  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1907  * @dev: DRM device
1908  * @data: GTT mapping ioctl data
1909  * @file: GEM object info
1910  *
1911  * Simply returns the fake offset to userspace so it can mmap it.
1912  * The mmap call will end up in drm_gem_mmap(), which will set things
1913  * up so we can get faults in the handler above.
1914  *
1915  * The fault handler will take care of binding the object into the GTT
1916  * (since it may have been evicted to make room for something), allocating
1917  * a fence register, and mapping the appropriate aperture address into
1918  * userspace.
1919  */
1920 int
1921 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1922                         struct drm_file *file)
1923 {
1924         struct drm_i915_gem_mmap_gtt *args = data;
1925
1926         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1927 }
1928
1929 /* Immediately discard the backing storage */
1930 static void
1931 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1932 {
1933         i915_gem_object_free_mmap_offset(obj);
1934
1935         if (obj->base.filp == NULL)
1936                 return;
1937
1938         /* Our goal here is to return as much of the memory as
1939          * is possible back to the system as we are called from OOM.
1940          * To do this we must instruct the shmfs to drop all of its
1941          * backing pages, *now*.
1942          */
1943         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1944         obj->madv = __I915_MADV_PURGED;
1945 }
1946
1947 /* Try to discard unwanted pages */
1948 static void
1949 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1950 {
1951         struct address_space *mapping;
1952
1953         switch (obj->madv) {
1954         case I915_MADV_DONTNEED:
1955                 i915_gem_object_truncate(obj);
1956         case __I915_MADV_PURGED:
1957                 return;
1958         }
1959
1960         if (obj->base.filp == NULL)
1961                 return;
1962
1963         mapping = file_inode(obj->base.filp)->i_mapping,
1964         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1965 }
1966
1967 static void
1968 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1969 {
1970         struct sg_page_iter sg_iter;
1971         int ret;
1972
1973         BUG_ON(obj->madv == __I915_MADV_PURGED);
1974
1975         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1976         if (ret) {
1977                 /* In the event of a disaster, abandon all caches and
1978                  * hope for the best.
1979                  */
1980                 WARN_ON(ret != -EIO);
1981                 i915_gem_clflush_object(obj, true);
1982                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1983         }
1984
1985         if (i915_gem_object_needs_bit17_swizzle(obj))
1986                 i915_gem_object_save_bit_17_swizzle(obj);
1987
1988         if (obj->madv == I915_MADV_DONTNEED)
1989                 obj->dirty = 0;
1990
1991         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1992                 struct page *page = sg_page_iter_page(&sg_iter);
1993
1994                 if (obj->dirty)
1995                         set_page_dirty(page);
1996
1997                 if (obj->madv == I915_MADV_WILLNEED)
1998                         mark_page_accessed(page);
1999
2000                 page_cache_release(page);
2001         }
2002         obj->dirty = 0;
2003
2004         sg_free_table(obj->pages);
2005         kfree(obj->pages);
2006 }
2007
2008 int
2009 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2010 {
2011         const struct drm_i915_gem_object_ops *ops = obj->ops;
2012
2013         if (obj->pages == NULL)
2014                 return 0;
2015
2016         if (obj->pages_pin_count)
2017                 return -EBUSY;
2018
2019         BUG_ON(i915_gem_obj_bound_any(obj));
2020
2021         /* ->put_pages might need to allocate memory for the bit17 swizzle
2022          * array, hence protect them from being reaped by removing them from gtt
2023          * lists early. */
2024         list_del(&obj->global_list);
2025
2026         ops->put_pages(obj);
2027         obj->pages = NULL;
2028
2029         i915_gem_object_invalidate(obj);
2030
2031         return 0;
2032 }
2033
2034 static int
2035 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2036 {
2037         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2038         int page_count, i;
2039         struct address_space *mapping;
2040         struct sg_table *st;
2041         struct scatterlist *sg;
2042         struct sg_page_iter sg_iter;
2043         struct page *page;
2044         unsigned long last_pfn = 0;     /* suppress gcc warning */
2045         gfp_t gfp;
2046
2047         /* Assert that the object is not currently in any GPU domain. As it
2048          * wasn't in the GTT, there shouldn't be any way it could have been in
2049          * a GPU cache
2050          */
2051         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2052         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2053
2054         st = kmalloc(sizeof(*st), GFP_KERNEL);
2055         if (st == NULL)
2056                 return -ENOMEM;
2057
2058         page_count = obj->base.size / PAGE_SIZE;
2059         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2060                 kfree(st);
2061                 return -ENOMEM;
2062         }
2063
2064         /* Get the list of pages out of our struct file.  They'll be pinned
2065          * at this point until we release them.
2066          *
2067          * Fail silently without starting the shrinker
2068          */
2069         mapping = file_inode(obj->base.filp)->i_mapping;
2070         gfp = mapping_gfp_mask(mapping);
2071         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2072         gfp &= ~(__GFP_IO | __GFP_WAIT);
2073         sg = st->sgl;
2074         st->nents = 0;
2075         for (i = 0; i < page_count; i++) {
2076                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2077                 if (IS_ERR(page)) {
2078                         i915_gem_shrink(dev_priv,
2079                                         page_count,
2080                                         I915_SHRINK_BOUND |
2081                                         I915_SHRINK_UNBOUND |
2082                                         I915_SHRINK_PURGEABLE);
2083                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2084                 }
2085                 if (IS_ERR(page)) {
2086                         /* We've tried hard to allocate the memory by reaping
2087                          * our own buffer, now let the real VM do its job and
2088                          * go down in flames if truly OOM.
2089                          */
2090                         i915_gem_shrink_all(dev_priv);
2091                         page = shmem_read_mapping_page(mapping, i);
2092                         if (IS_ERR(page))
2093                                 goto err_pages;
2094                 }
2095 #ifdef CONFIG_SWIOTLB
2096                 if (swiotlb_nr_tbl()) {
2097                         st->nents++;
2098                         sg_set_page(sg, page, PAGE_SIZE, 0);
2099                         sg = sg_next(sg);
2100                         continue;
2101                 }
2102 #endif
2103                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2104                         if (i)
2105                                 sg = sg_next(sg);
2106                         st->nents++;
2107                         sg_set_page(sg, page, PAGE_SIZE, 0);
2108                 } else {
2109                         sg->length += PAGE_SIZE;
2110                 }
2111                 last_pfn = page_to_pfn(page);
2112
2113                 /* Check that the i965g/gm workaround works. */
2114                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2115         }
2116 #ifdef CONFIG_SWIOTLB
2117         if (!swiotlb_nr_tbl())
2118 #endif
2119                 sg_mark_end(sg);
2120         obj->pages = st;
2121
2122         if (i915_gem_object_needs_bit17_swizzle(obj))
2123                 i915_gem_object_do_bit_17_swizzle(obj);
2124
2125         if (obj->tiling_mode != I915_TILING_NONE &&
2126             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2127                 i915_gem_object_pin_pages(obj);
2128
2129         return 0;
2130
2131 err_pages:
2132         sg_mark_end(sg);
2133         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2134                 page_cache_release(sg_page_iter_page(&sg_iter));
2135         sg_free_table(st);
2136         kfree(st);
2137
2138         /* shmemfs first checks if there is enough memory to allocate the page
2139          * and reports ENOSPC should there be insufficient, along with the usual
2140          * ENOMEM for a genuine allocation failure.
2141          *
2142          * We use ENOSPC in our driver to mean that we have run out of aperture
2143          * space and so want to translate the error from shmemfs back to our
2144          * usual understanding of ENOMEM.
2145          */
2146         if (PTR_ERR(page) == -ENOSPC)
2147                 return -ENOMEM;
2148         else
2149                 return PTR_ERR(page);
2150 }
2151
2152 /* Ensure that the associated pages are gathered from the backing storage
2153  * and pinned into our object. i915_gem_object_get_pages() may be called
2154  * multiple times before they are released by a single call to
2155  * i915_gem_object_put_pages() - once the pages are no longer referenced
2156  * either as a result of memory pressure (reaping pages under the shrinker)
2157  * or as the object is itself released.
2158  */
2159 int
2160 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2161 {
2162         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2163         const struct drm_i915_gem_object_ops *ops = obj->ops;
2164         int ret;
2165
2166         if (obj->pages)
2167                 return 0;
2168
2169         if (obj->madv != I915_MADV_WILLNEED) {
2170                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2171                 return -EFAULT;
2172         }
2173
2174         BUG_ON(obj->pages_pin_count);
2175
2176         ret = ops->get_pages(obj);
2177         if (ret)
2178                 return ret;
2179
2180         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2181         return 0;
2182 }
2183
2184 static void
2185 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2186                                struct intel_engine_cs *ring)
2187 {
2188         struct drm_i915_gem_request *req;
2189         struct intel_engine_cs *old_ring;
2190
2191         BUG_ON(ring == NULL);
2192
2193         req = intel_ring_get_request(ring);
2194         old_ring = i915_gem_request_get_ring(obj->last_read_req);
2195
2196         if (old_ring != ring && obj->last_write_req) {
2197                 /* Keep the request relative to the current ring */
2198                 i915_gem_request_assign(&obj->last_write_req, req);
2199         }
2200
2201         /* Add a reference if we're newly entering the active list. */
2202         if (!obj->active) {
2203                 drm_gem_object_reference(&obj->base);
2204                 obj->active = 1;
2205         }
2206
2207         list_move_tail(&obj->ring_list, &ring->active_list);
2208
2209         i915_gem_request_assign(&obj->last_read_req, req);
2210 }
2211
2212 void i915_vma_move_to_active(struct i915_vma *vma,
2213                              struct intel_engine_cs *ring)
2214 {
2215         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2216         return i915_gem_object_move_to_active(vma->obj, ring);
2217 }
2218
2219 static void
2220 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2221 {
2222         struct i915_vma *vma;
2223
2224         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2225         BUG_ON(!obj->active);
2226
2227         list_for_each_entry(vma, &obj->vma_list, vma_link) {
2228                 if (!list_empty(&vma->mm_list))
2229                         list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
2230         }
2231
2232         intel_fb_obj_flush(obj, true);
2233
2234         list_del_init(&obj->ring_list);
2235
2236         i915_gem_request_assign(&obj->last_read_req, NULL);
2237         i915_gem_request_assign(&obj->last_write_req, NULL);
2238         obj->base.write_domain = 0;
2239
2240         i915_gem_request_assign(&obj->last_fenced_req, NULL);
2241
2242         obj->active = 0;
2243         drm_gem_object_unreference(&obj->base);
2244
2245         WARN_ON(i915_verify_lists(dev));
2246 }
2247
2248 static void
2249 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2250 {
2251         if (obj->last_read_req == NULL)
2252                 return;
2253
2254         if (i915_gem_request_completed(obj->last_read_req, true))
2255                 i915_gem_object_move_to_inactive(obj);
2256 }
2257
2258 static int
2259 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2260 {
2261         struct drm_i915_private *dev_priv = dev->dev_private;
2262         struct intel_engine_cs *ring;
2263         int ret, i, j;
2264
2265         /* Carefully retire all requests without writing to the rings */
2266         for_each_ring(ring, dev_priv, i) {
2267                 ret = intel_ring_idle(ring);
2268                 if (ret)
2269                         return ret;
2270         }
2271         i915_gem_retire_requests(dev);
2272
2273         /* Finally reset hw state */
2274         for_each_ring(ring, dev_priv, i) {
2275                 intel_ring_init_seqno(ring, seqno);
2276
2277                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2278                         ring->semaphore.sync_seqno[j] = 0;
2279         }
2280
2281         return 0;
2282 }
2283
2284 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2285 {
2286         struct drm_i915_private *dev_priv = dev->dev_private;
2287         int ret;
2288
2289         if (seqno == 0)
2290                 return -EINVAL;
2291
2292         /* HWS page needs to be set less than what we
2293          * will inject to ring
2294          */
2295         ret = i915_gem_init_seqno(dev, seqno - 1);
2296         if (ret)
2297                 return ret;
2298
2299         /* Carefully set the last_seqno value so that wrap
2300          * detection still works
2301          */
2302         dev_priv->next_seqno = seqno;
2303         dev_priv->last_seqno = seqno - 1;
2304         if (dev_priv->last_seqno == 0)
2305                 dev_priv->last_seqno--;
2306
2307         return 0;
2308 }
2309
2310 int
2311 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2312 {
2313         struct drm_i915_private *dev_priv = dev->dev_private;
2314
2315         /* reserve 0 for non-seqno */
2316         if (dev_priv->next_seqno == 0) {
2317                 int ret = i915_gem_init_seqno(dev, 0);
2318                 if (ret)
2319                         return ret;
2320
2321                 dev_priv->next_seqno = 1;
2322         }
2323
2324         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2325         return 0;
2326 }
2327
2328 int __i915_add_request(struct intel_engine_cs *ring,
2329                        struct drm_file *file,
2330                        struct drm_i915_gem_object *obj)
2331 {
2332         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2333         struct drm_i915_gem_request *request;
2334         struct intel_ringbuffer *ringbuf;
2335         u32 request_start;
2336         int ret;
2337
2338         request = ring->outstanding_lazy_request;
2339         if (WARN_ON(request == NULL))
2340                 return -ENOMEM;
2341
2342         if (i915.enable_execlists) {
2343                 ringbuf = request->ctx->engine[ring->id].ringbuf;
2344         } else
2345                 ringbuf = ring->buffer;
2346
2347         request_start = intel_ring_get_tail(ringbuf);
2348         /*
2349          * Emit any outstanding flushes - execbuf can fail to emit the flush
2350          * after having emitted the batchbuffer command. Hence we need to fix
2351          * things up similar to emitting the lazy request. The difference here
2352          * is that the flush _must_ happen before the next request, no matter
2353          * what.
2354          */
2355         if (i915.enable_execlists) {
2356                 ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
2357                 if (ret)
2358                         return ret;
2359         } else {
2360                 ret = intel_ring_flush_all_caches(ring);
2361                 if (ret)
2362                         return ret;
2363         }
2364
2365         /* Record the position of the start of the request so that
2366          * should we detect the updated seqno part-way through the
2367          * GPU processing the request, we never over-estimate the
2368          * position of the head.
2369          */
2370         request->postfix = intel_ring_get_tail(ringbuf);
2371
2372         if (i915.enable_execlists) {
2373                 ret = ring->emit_request(ringbuf, request);
2374                 if (ret)
2375                         return ret;
2376         } else {
2377                 ret = ring->add_request(ring);
2378                 if (ret)
2379                         return ret;
2380         }
2381
2382         request->head = request_start;
2383         request->tail = intel_ring_get_tail(ringbuf);
2384
2385         /* Whilst this request exists, batch_obj will be on the
2386          * active_list, and so will hold the active reference. Only when this
2387          * request is retired will the the batch_obj be moved onto the
2388          * inactive_list and lose its active reference. Hence we do not need
2389          * to explicitly hold another reference here.
2390          */
2391         request->batch_obj = obj;
2392
2393         if (!i915.enable_execlists) {
2394                 /* Hold a reference to the current context so that we can inspect
2395                  * it later in case a hangcheck error event fires.
2396                  */
2397                 request->ctx = ring->last_context;
2398                 if (request->ctx)
2399                         i915_gem_context_reference(request->ctx);
2400         }
2401
2402         request->emitted_jiffies = jiffies;
2403         list_add_tail(&request->list, &ring->request_list);
2404         request->file_priv = NULL;
2405
2406         if (file) {
2407                 struct drm_i915_file_private *file_priv = file->driver_priv;
2408
2409                 spin_lock(&file_priv->mm.lock);
2410                 request->file_priv = file_priv;
2411                 list_add_tail(&request->client_list,
2412                               &file_priv->mm.request_list);
2413                 spin_unlock(&file_priv->mm.lock);
2414
2415                 request->pid = get_pid(task_pid(current));
2416         }
2417
2418         trace_i915_gem_request_add(request);
2419         ring->outstanding_lazy_request = NULL;
2420
2421         i915_queue_hangcheck(ring->dev);
2422
2423         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2424         queue_delayed_work(dev_priv->wq,
2425                            &dev_priv->mm.retire_work,
2426                            round_jiffies_up_relative(HZ));
2427         intel_mark_busy(dev_priv->dev);
2428
2429         return 0;
2430 }
2431
2432 static inline void
2433 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2434 {
2435         struct drm_i915_file_private *file_priv = request->file_priv;
2436
2437         if (!file_priv)
2438                 return;
2439
2440         spin_lock(&file_priv->mm.lock);
2441         list_del(&request->client_list);
2442         request->file_priv = NULL;
2443         spin_unlock(&file_priv->mm.lock);
2444 }
2445
2446 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2447                                    const struct intel_context *ctx)
2448 {
2449         unsigned long elapsed;
2450
2451         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2452
2453         if (ctx->hang_stats.banned)
2454                 return true;
2455
2456         if (ctx->hang_stats.ban_period_seconds &&
2457             elapsed <= ctx->hang_stats.ban_period_seconds) {
2458                 if (!i915_gem_context_is_default(ctx)) {
2459                         DRM_DEBUG("context hanging too fast, banning!\n");
2460                         return true;
2461                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2462                         if (i915_stop_ring_allow_warn(dev_priv))
2463                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2464                         return true;
2465                 }
2466         }
2467
2468         return false;
2469 }
2470
2471 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2472                                   struct intel_context *ctx,
2473                                   const bool guilty)
2474 {
2475         struct i915_ctx_hang_stats *hs;
2476
2477         if (WARN_ON(!ctx))
2478                 return;
2479
2480         hs = &ctx->hang_stats;
2481
2482         if (guilty) {
2483                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2484                 hs->batch_active++;
2485                 hs->guilty_ts = get_seconds();
2486         } else {
2487                 hs->batch_pending++;
2488         }
2489 }
2490
2491 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2492 {
2493         list_del(&request->list);
2494         i915_gem_request_remove_from_client(request);
2495
2496         put_pid(request->pid);
2497
2498         i915_gem_request_unreference(request);
2499 }
2500
2501 void i915_gem_request_free(struct kref *req_ref)
2502 {
2503         struct drm_i915_gem_request *req = container_of(req_ref,
2504                                                  typeof(*req), ref);
2505         struct intel_context *ctx = req->ctx;
2506
2507         if (ctx) {
2508                 if (i915.enable_execlists) {
2509                         struct intel_engine_cs *ring = req->ring;
2510
2511                         if (ctx != ring->default_context)
2512                                 intel_lr_context_unpin(ring, ctx);
2513                 }
2514
2515                 i915_gem_context_unreference(ctx);
2516         }
2517
2518         kfree(req);
2519 }
2520
2521 struct drm_i915_gem_request *
2522 i915_gem_find_active_request(struct intel_engine_cs *ring)
2523 {
2524         struct drm_i915_gem_request *request;
2525
2526         list_for_each_entry(request, &ring->request_list, list) {
2527                 if (i915_gem_request_completed(request, false))
2528                         continue;
2529
2530                 return request;
2531         }
2532
2533         return NULL;
2534 }
2535
2536 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2537                                        struct intel_engine_cs *ring)
2538 {
2539         struct drm_i915_gem_request *request;
2540         bool ring_hung;
2541
2542         request = i915_gem_find_active_request(ring);
2543
2544         if (request == NULL)
2545                 return;
2546
2547         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2548
2549         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2550
2551         list_for_each_entry_continue(request, &ring->request_list, list)
2552                 i915_set_reset_status(dev_priv, request->ctx, false);
2553 }
2554
2555 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2556                                         struct intel_engine_cs *ring)
2557 {
2558         while (!list_empty(&ring->active_list)) {
2559                 struct drm_i915_gem_object *obj;
2560
2561                 obj = list_first_entry(&ring->active_list,
2562                                        struct drm_i915_gem_object,
2563                                        ring_list);
2564
2565                 i915_gem_object_move_to_inactive(obj);
2566         }
2567
2568         /*
2569          * Clear the execlists queue up before freeing the requests, as those
2570          * are the ones that keep the context and ringbuffer backing objects
2571          * pinned in place.
2572          */
2573         while (!list_empty(&ring->execlist_queue)) {
2574                 struct drm_i915_gem_request *submit_req;
2575
2576                 submit_req = list_first_entry(&ring->execlist_queue,
2577                                 struct drm_i915_gem_request,
2578                                 execlist_link);
2579                 list_del(&submit_req->execlist_link);
2580                 intel_runtime_pm_put(dev_priv);
2581
2582                 if (submit_req->ctx != ring->default_context)
2583                         intel_lr_context_unpin(ring, submit_req->ctx);
2584
2585                 i915_gem_request_unreference(submit_req);
2586         }
2587
2588         /*
2589          * We must free the requests after all the corresponding objects have
2590          * been moved off active lists. Which is the same order as the normal
2591          * retire_requests function does. This is important if object hold
2592          * implicit references on things like e.g. ppgtt address spaces through
2593          * the request.
2594          */
2595         while (!list_empty(&ring->request_list)) {
2596                 struct drm_i915_gem_request *request;
2597
2598                 request = list_first_entry(&ring->request_list,
2599                                            struct drm_i915_gem_request,
2600                                            list);
2601
2602                 i915_gem_free_request(request);
2603         }
2604
2605         /* This may not have been flushed before the reset, so clean it now */
2606         i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
2607 }
2608
2609 void i915_gem_restore_fences(struct drm_device *dev)
2610 {
2611         struct drm_i915_private *dev_priv = dev->dev_private;
2612         int i;
2613
2614         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2615                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2616
2617                 /*
2618                  * Commit delayed tiling changes if we have an object still
2619                  * attached to the fence, otherwise just clear the fence.
2620                  */
2621                 if (reg->obj) {
2622                         i915_gem_object_update_fence(reg->obj, reg,
2623                                                      reg->obj->tiling_mode);
2624                 } else {
2625                         i915_gem_write_fence(dev, i, NULL);
2626                 }
2627         }
2628 }
2629
2630 void i915_gem_reset(struct drm_device *dev)
2631 {
2632         struct drm_i915_private *dev_priv = dev->dev_private;
2633         struct intel_engine_cs *ring;
2634         int i;
2635
2636         /*
2637          * Before we free the objects from the requests, we need to inspect
2638          * them for finding the guilty party. As the requests only borrow
2639          * their reference to the objects, the inspection must be done first.
2640          */
2641         for_each_ring(ring, dev_priv, i)
2642                 i915_gem_reset_ring_status(dev_priv, ring);
2643
2644         for_each_ring(ring, dev_priv, i)
2645                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2646
2647         i915_gem_context_reset(dev);
2648
2649         i915_gem_restore_fences(dev);
2650 }
2651
2652 /**
2653  * This function clears the request list as sequence numbers are passed.
2654  */
2655 void
2656 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2657 {
2658         if (list_empty(&ring->request_list))
2659                 return;
2660
2661         WARN_ON(i915_verify_lists(ring->dev));
2662
2663         /* Move any buffers on the active list that are no longer referenced
2664          * by the ringbuffer to the flushing/inactive lists as appropriate,
2665          * before we free the context associated with the requests.
2666          */
2667         while (!list_empty(&ring->active_list)) {
2668                 struct drm_i915_gem_object *obj;
2669
2670                 obj = list_first_entry(&ring->active_list,
2671                                       struct drm_i915_gem_object,
2672                                       ring_list);
2673
2674                 if (!i915_gem_request_completed(obj->last_read_req, true))
2675                         break;
2676
2677                 i915_gem_object_move_to_inactive(obj);
2678         }
2679
2680
2681         while (!list_empty(&ring->request_list)) {
2682                 struct drm_i915_gem_request *request;
2683
2684                 request = list_first_entry(&ring->request_list,
2685                                            struct drm_i915_gem_request,
2686                                            list);
2687
2688                 if (!i915_gem_request_completed(request, true))
2689                         break;
2690
2691                 trace_i915_gem_request_retire(request);
2692
2693                 /* We know the GPU must have read the request to have
2694                  * sent us the seqno + interrupt, so use the position
2695                  * of tail of the request to update the last known position
2696                  * of the GPU head.
2697                  */
2698                 request->ringbuf->last_retired_head = request->postfix;
2699
2700                 i915_gem_free_request(request);
2701         }
2702
2703         if (unlikely(ring->trace_irq_req &&
2704                      i915_gem_request_completed(ring->trace_irq_req, true))) {
2705                 ring->irq_put(ring);
2706                 i915_gem_request_assign(&ring->trace_irq_req, NULL);
2707         }
2708
2709         WARN_ON(i915_verify_lists(ring->dev));
2710 }
2711
2712 bool
2713 i915_gem_retire_requests(struct drm_device *dev)
2714 {
2715         struct drm_i915_private *dev_priv = dev->dev_private;
2716         struct intel_engine_cs *ring;
2717         bool idle = true;
2718         int i;
2719
2720         for_each_ring(ring, dev_priv, i) {
2721                 i915_gem_retire_requests_ring(ring);
2722                 idle &= list_empty(&ring->request_list);
2723                 if (i915.enable_execlists) {
2724                         unsigned long flags;
2725
2726                         spin_lock_irqsave(&ring->execlist_lock, flags);
2727                         idle &= list_empty(&ring->execlist_queue);
2728                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
2729
2730                         intel_execlists_retire_requests(ring);
2731                 }
2732         }
2733
2734         if (idle)
2735                 mod_delayed_work(dev_priv->wq,
2736                                    &dev_priv->mm.idle_work,
2737                                    msecs_to_jiffies(100));
2738
2739         return idle;
2740 }
2741
2742 static void
2743 i915_gem_retire_work_handler(struct work_struct *work)
2744 {
2745         struct drm_i915_private *dev_priv =
2746                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2747         struct drm_device *dev = dev_priv->dev;
2748         bool idle;
2749
2750         /* Come back later if the device is busy... */
2751         idle = false;
2752         if (mutex_trylock(&dev->struct_mutex)) {
2753                 idle = i915_gem_retire_requests(dev);
2754                 mutex_unlock(&dev->struct_mutex);
2755         }
2756         if (!idle)
2757                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2758                                    round_jiffies_up_relative(HZ));
2759 }
2760
2761 static void
2762 i915_gem_idle_work_handler(struct work_struct *work)
2763 {
2764         struct drm_i915_private *dev_priv =
2765                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2766
2767         intel_mark_idle(dev_priv->dev);
2768 }
2769
2770 /**
2771  * Ensures that an object will eventually get non-busy by flushing any required
2772  * write domains, emitting any outstanding lazy request and retiring and
2773  * completed requests.
2774  */
2775 static int
2776 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2777 {
2778         struct intel_engine_cs *ring;
2779         int ret;
2780
2781         if (obj->active) {
2782                 ring = i915_gem_request_get_ring(obj->last_read_req);
2783
2784                 ret = i915_gem_check_olr(obj->last_read_req);
2785                 if (ret)
2786                         return ret;
2787
2788                 i915_gem_retire_requests_ring(ring);
2789         }
2790
2791         return 0;
2792 }
2793
2794 /**
2795  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2796  * @DRM_IOCTL_ARGS: standard ioctl arguments
2797  *
2798  * Returns 0 if successful, else an error is returned with the remaining time in
2799  * the timeout parameter.
2800  *  -ETIME: object is still busy after timeout
2801  *  -ERESTARTSYS: signal interrupted the wait
2802  *  -ENONENT: object doesn't exist
2803  * Also possible, but rare:
2804  *  -EAGAIN: GPU wedged
2805  *  -ENOMEM: damn
2806  *  -ENODEV: Internal IRQ fail
2807  *  -E?: The add request failed
2808  *
2809  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2810  * non-zero timeout parameter the wait ioctl will wait for the given number of
2811  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2812  * without holding struct_mutex the object may become re-busied before this
2813  * function completes. A similar but shorter * race condition exists in the busy
2814  * ioctl
2815  */
2816 int
2817 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2818 {
2819         struct drm_i915_private *dev_priv = dev->dev_private;
2820         struct drm_i915_gem_wait *args = data;
2821         struct drm_i915_gem_object *obj;
2822         struct drm_i915_gem_request *req;
2823         unsigned reset_counter;
2824         int ret = 0;
2825
2826         if (args->flags != 0)
2827                 return -EINVAL;
2828
2829         ret = i915_mutex_lock_interruptible(dev);
2830         if (ret)
2831                 return ret;
2832
2833         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2834         if (&obj->base == NULL) {
2835                 mutex_unlock(&dev->struct_mutex);
2836                 return -ENOENT;
2837         }
2838
2839         /* Need to make sure the object gets inactive eventually. */
2840         ret = i915_gem_object_flush_active(obj);
2841         if (ret)
2842                 goto out;
2843
2844         if (!obj->active || !obj->last_read_req)
2845                 goto out;
2846
2847         req = obj->last_read_req;
2848
2849         /* Do this after OLR check to make sure we make forward progress polling
2850          * on this IOCTL with a timeout == 0 (like busy ioctl)
2851          */
2852         if (args->timeout_ns == 0) {
2853                 ret = -ETIME;
2854                 goto out;
2855         }
2856
2857         drm_gem_object_unreference(&obj->base);
2858         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2859         i915_gem_request_reference(req);
2860         mutex_unlock(&dev->struct_mutex);
2861
2862         ret = __i915_wait_request(req, reset_counter, true,
2863                                   args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2864                                   file->driver_priv);
2865         mutex_lock(&dev->struct_mutex);
2866         i915_gem_request_unreference(req);
2867         mutex_unlock(&dev->struct_mutex);
2868         return ret;
2869
2870 out:
2871         drm_gem_object_unreference(&obj->base);
2872         mutex_unlock(&dev->struct_mutex);
2873         return ret;
2874 }
2875
2876 /**
2877  * i915_gem_object_sync - sync an object to a ring.
2878  *
2879  * @obj: object which may be in use on another ring.
2880  * @to: ring we wish to use the object on. May be NULL.
2881  *
2882  * This code is meant to abstract object synchronization with the GPU.
2883  * Calling with NULL implies synchronizing the object with the CPU
2884  * rather than a particular GPU ring.
2885  *
2886  * Returns 0 if successful, else propagates up the lower layer error.
2887  */
2888 int
2889 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2890                      struct intel_engine_cs *to)
2891 {
2892         struct intel_engine_cs *from;
2893         u32 seqno;
2894         int ret, idx;
2895
2896         from = i915_gem_request_get_ring(obj->last_read_req);
2897
2898         if (from == NULL || to == from)
2899                 return 0;
2900
2901         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2902                 return i915_gem_object_wait_rendering(obj, false);
2903
2904         idx = intel_ring_sync_index(from, to);
2905
2906         seqno = i915_gem_request_get_seqno(obj->last_read_req);
2907         /* Optimization: Avoid semaphore sync when we are sure we already
2908          * waited for an object with higher seqno */
2909         if (seqno <= from->semaphore.sync_seqno[idx])
2910                 return 0;
2911
2912         ret = i915_gem_check_olr(obj->last_read_req);
2913         if (ret)
2914                 return ret;
2915
2916         trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
2917         ret = to->semaphore.sync_to(to, from, seqno);
2918         if (!ret)
2919                 /* We use last_read_req because sync_to()
2920                  * might have just caused seqno wrap under
2921                  * the radar.
2922                  */
2923                 from->semaphore.sync_seqno[idx] =
2924                                 i915_gem_request_get_seqno(obj->last_read_req);
2925
2926         return ret;
2927 }
2928
2929 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2930 {
2931         u32 old_write_domain, old_read_domains;
2932
2933         /* Force a pagefault for domain tracking on next user access */
2934         i915_gem_release_mmap(obj);
2935
2936         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2937                 return;
2938
2939         /* Wait for any direct GTT access to complete */
2940         mb();
2941
2942         old_read_domains = obj->base.read_domains;
2943         old_write_domain = obj->base.write_domain;
2944
2945         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2946         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2947
2948         trace_i915_gem_object_change_domain(obj,
2949                                             old_read_domains,
2950                                             old_write_domain);
2951 }
2952
2953 int i915_vma_unbind(struct i915_vma *vma)
2954 {
2955         struct drm_i915_gem_object *obj = vma->obj;
2956         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2957         int ret;
2958
2959         if (list_empty(&vma->vma_link))
2960                 return 0;
2961
2962         if (!drm_mm_node_allocated(&vma->node)) {
2963                 i915_gem_vma_destroy(vma);
2964                 return 0;
2965         }
2966
2967         if (vma->pin_count)
2968                 return -EBUSY;
2969
2970         BUG_ON(obj->pages == NULL);
2971
2972         ret = i915_gem_object_finish_gpu(obj);
2973         if (ret)
2974                 return ret;
2975         /* Continue on if we fail due to EIO, the GPU is hung so we
2976          * should be safe and we need to cleanup or else we might
2977          * cause memory corruption through use-after-free.
2978          */
2979
2980         if (i915_is_ggtt(vma->vm) &&
2981             vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2982                 i915_gem_object_finish_gtt(obj);
2983
2984                 /* release the fence reg _after_ flushing */
2985                 ret = i915_gem_object_put_fence(obj);
2986                 if (ret)
2987                         return ret;
2988         }
2989
2990         trace_i915_vma_unbind(vma);
2991
2992         vma->unbind_vma(vma);
2993
2994         list_del_init(&vma->mm_list);
2995         if (i915_is_ggtt(vma->vm)) {
2996                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2997                         obj->map_and_fenceable = false;
2998                 } else if (vma->ggtt_view.pages) {
2999                         sg_free_table(vma->ggtt_view.pages);
3000                         kfree(vma->ggtt_view.pages);
3001                         vma->ggtt_view.pages = NULL;
3002                 }
3003         }
3004
3005         drm_mm_remove_node(&vma->node);
3006         i915_gem_vma_destroy(vma);
3007
3008         /* Since the unbound list is global, only move to that list if
3009          * no more VMAs exist. */
3010         if (list_empty(&obj->vma_list)) {
3011                 /* Throw away the active reference before
3012                  * moving to the unbound list. */
3013                 i915_gem_object_retire(obj);
3014
3015                 i915_gem_gtt_finish_object(obj);
3016                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3017         }
3018
3019         /* And finally now the object is completely decoupled from this vma,
3020          * we can drop its hold on the backing storage and allow it to be
3021          * reaped by the shrinker.
3022          */
3023         i915_gem_object_unpin_pages(obj);
3024
3025         return 0;
3026 }
3027
3028 int i915_gpu_idle(struct drm_device *dev)
3029 {
3030         struct drm_i915_private *dev_priv = dev->dev_private;
3031         struct intel_engine_cs *ring;
3032         int ret, i;
3033
3034         /* Flush everything onto the inactive list. */
3035         for_each_ring(ring, dev_priv, i) {
3036                 if (!i915.enable_execlists) {
3037                         ret = i915_switch_context(ring, ring->default_context);
3038                         if (ret)
3039                                 return ret;
3040                 }
3041
3042                 ret = intel_ring_idle(ring);
3043                 if (ret)
3044                         return ret;
3045         }
3046
3047         return 0;
3048 }
3049
3050 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3051                                  struct drm_i915_gem_object *obj)
3052 {
3053         struct drm_i915_private *dev_priv = dev->dev_private;
3054         int fence_reg;
3055         int fence_pitch_shift;
3056
3057         if (INTEL_INFO(dev)->gen >= 6) {
3058                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3059                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3060         } else {
3061                 fence_reg = FENCE_REG_965_0;
3062                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3063         }
3064
3065         fence_reg += reg * 8;
3066
3067         /* To w/a incoherency with non-atomic 64-bit register updates,
3068          * we split the 64-bit update into two 32-bit writes. In order
3069          * for a partial fence not to be evaluated between writes, we
3070          * precede the update with write to turn off the fence register,
3071          * and only enable the fence as the last step.
3072          *
3073          * For extra levels of paranoia, we make sure each step lands
3074          * before applying the next step.
3075          */
3076         I915_WRITE(fence_reg, 0);
3077         POSTING_READ(fence_reg);
3078
3079         if (obj) {
3080                 u32 size = i915_gem_obj_ggtt_size(obj);
3081                 uint64_t val;
3082
3083                 /* Adjust fence size to match tiled area */
3084                 if (obj->tiling_mode != I915_TILING_NONE) {
3085                         uint32_t row_size = obj->stride *
3086                                 (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
3087                         size = (size / row_size) * row_size;
3088                 }
3089
3090                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3091                                  0xfffff000) << 32;
3092                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3093                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3094                 if (obj->tiling_mode == I915_TILING_Y)
3095                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3096                 val |= I965_FENCE_REG_VALID;
3097
3098                 I915_WRITE(fence_reg + 4, val >> 32);
3099                 POSTING_READ(fence_reg + 4);
3100
3101                 I915_WRITE(fence_reg + 0, val);
3102                 POSTING_READ(fence_reg);
3103         } else {
3104                 I915_WRITE(fence_reg + 4, 0);
3105                 POSTING_READ(fence_reg + 4);
3106         }
3107 }
3108
3109 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3110                                  struct drm_i915_gem_object *obj)
3111 {
3112         struct drm_i915_private *dev_priv = dev->dev_private;
3113         u32 val;
3114
3115         if (obj) {
3116                 u32 size = i915_gem_obj_ggtt_size(obj);
3117                 int pitch_val;
3118                 int tile_width;
3119
3120                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3121                      (size & -size) != size ||
3122                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3123                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3124                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3125
3126                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3127                         tile_width = 128;
3128                 else
3129                         tile_width = 512;
3130
3131                 /* Note: pitch better be a power of two tile widths */
3132                 pitch_val = obj->stride / tile_width;
3133                 pitch_val = ffs(pitch_val) - 1;
3134
3135                 val = i915_gem_obj_ggtt_offset(obj);
3136                 if (obj->tiling_mode == I915_TILING_Y)
3137                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3138                 val |= I915_FENCE_SIZE_BITS(size);
3139                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3140                 val |= I830_FENCE_REG_VALID;
3141         } else
3142                 val = 0;
3143
3144         if (reg < 8)
3145                 reg = FENCE_REG_830_0 + reg * 4;
3146         else
3147                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3148
3149         I915_WRITE(reg, val);
3150         POSTING_READ(reg);
3151 }
3152
3153 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3154                                 struct drm_i915_gem_object *obj)
3155 {
3156         struct drm_i915_private *dev_priv = dev->dev_private;
3157         uint32_t val;
3158
3159         if (obj) {
3160                 u32 size = i915_gem_obj_ggtt_size(obj);
3161                 uint32_t pitch_val;
3162
3163                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3164                      (size & -size) != size ||
3165                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3166                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3167                      i915_gem_obj_ggtt_offset(obj), size);
3168
3169                 pitch_val = obj->stride / 128;
3170                 pitch_val = ffs(pitch_val) - 1;
3171
3172                 val = i915_gem_obj_ggtt_offset(obj);
3173                 if (obj->tiling_mode == I915_TILING_Y)
3174                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3175                 val |= I830_FENCE_SIZE_BITS(size);
3176                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3177                 val |= I830_FENCE_REG_VALID;
3178         } else
3179                 val = 0;
3180
3181         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3182         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3183 }
3184
3185 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3186 {
3187         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3188 }
3189
3190 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3191                                  struct drm_i915_gem_object *obj)
3192 {
3193         struct drm_i915_private *dev_priv = dev->dev_private;
3194
3195         /* Ensure that all CPU reads are completed before installing a fence
3196          * and all writes before removing the fence.
3197          */
3198         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3199                 mb();
3200
3201         WARN(obj && (!obj->stride || !obj->tiling_mode),
3202              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3203              obj->stride, obj->tiling_mode);
3204
3205         if (IS_GEN2(dev))
3206                 i830_write_fence_reg(dev, reg, obj);
3207         else if (IS_GEN3(dev))
3208                 i915_write_fence_reg(dev, reg, obj);
3209         else if (INTEL_INFO(dev)->gen >= 4)
3210                 i965_write_fence_reg(dev, reg, obj);
3211
3212         /* And similarly be paranoid that no direct access to this region
3213          * is reordered to before the fence is installed.
3214          */
3215         if (i915_gem_object_needs_mb(obj))
3216                 mb();
3217 }
3218
3219 static inline int fence_number(struct drm_i915_private *dev_priv,
3220                                struct drm_i915_fence_reg *fence)
3221 {
3222         return fence - dev_priv->fence_regs;
3223 }
3224
3225 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3226                                          struct drm_i915_fence_reg *fence,
3227                                          bool enable)
3228 {
3229         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3230         int reg = fence_number(dev_priv, fence);
3231
3232         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3233
3234         if (enable) {
3235                 obj->fence_reg = reg;
3236                 fence->obj = obj;
3237                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3238         } else {
3239                 obj->fence_reg = I915_FENCE_REG_NONE;
3240                 fence->obj = NULL;
3241                 list_del_init(&fence->lru_list);
3242         }
3243         obj->fence_dirty = false;
3244 }
3245
3246 static int
3247 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3248 {
3249         if (obj->last_fenced_req) {
3250                 int ret = i915_wait_request(obj->last_fenced_req);
3251                 if (ret)
3252                         return ret;
3253
3254                 i915_gem_request_assign(&obj->last_fenced_req, NULL);
3255         }
3256
3257         return 0;
3258 }
3259
3260 int
3261 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3262 {
3263         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3264         struct drm_i915_fence_reg *fence;
3265         int ret;
3266
3267         ret = i915_gem_object_wait_fence(obj);
3268         if (ret)
3269                 return ret;
3270
3271         if (obj->fence_reg == I915_FENCE_REG_NONE)
3272                 return 0;
3273
3274         fence = &dev_priv->fence_regs[obj->fence_reg];
3275
3276         if (WARN_ON(fence->pin_count))
3277                 return -EBUSY;
3278
3279         i915_gem_object_fence_lost(obj);
3280         i915_gem_object_update_fence(obj, fence, false);
3281
3282         return 0;
3283 }
3284
3285 static struct drm_i915_fence_reg *
3286 i915_find_fence_reg(struct drm_device *dev)
3287 {
3288         struct drm_i915_private *dev_priv = dev->dev_private;
3289         struct drm_i915_fence_reg *reg, *avail;
3290         int i;
3291
3292         /* First try to find a free reg */
3293         avail = NULL;
3294         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3295                 reg = &dev_priv->fence_regs[i];
3296                 if (!reg->obj)
3297                         return reg;
3298
3299                 if (!reg->pin_count)
3300                         avail = reg;
3301         }
3302
3303         if (avail == NULL)
3304                 goto deadlock;
3305
3306         /* None available, try to steal one or wait for a user to finish */
3307         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3308                 if (reg->pin_count)
3309                         continue;
3310
3311                 return reg;
3312         }
3313
3314 deadlock:
3315         /* Wait for completion of pending flips which consume fences */
3316         if (intel_has_pending_fb_unpin(dev))
3317                 return ERR_PTR(-EAGAIN);
3318
3319         return ERR_PTR(-EDEADLK);
3320 }
3321
3322 /**
3323  * i915_gem_object_get_fence - set up fencing for an object
3324  * @obj: object to map through a fence reg
3325  *
3326  * When mapping objects through the GTT, userspace wants to be able to write
3327  * to them without having to worry about swizzling if the object is tiled.
3328  * This function walks the fence regs looking for a free one for @obj,
3329  * stealing one if it can't find any.
3330  *
3331  * It then sets up the reg based on the object's properties: address, pitch
3332  * and tiling format.
3333  *
3334  * For an untiled surface, this removes any existing fence.
3335  */
3336 int
3337 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3338 {
3339         struct drm_device *dev = obj->base.dev;
3340         struct drm_i915_private *dev_priv = dev->dev_private;
3341         bool enable = obj->tiling_mode != I915_TILING_NONE;
3342         struct drm_i915_fence_reg *reg;
3343         int ret;
3344
3345         /* Have we updated the tiling parameters upon the object and so
3346          * will need to serialise the write to the associated fence register?
3347          */
3348         if (obj->fence_dirty) {
3349                 ret = i915_gem_object_wait_fence(obj);
3350                 if (ret)
3351                         return ret;
3352         }
3353
3354         /* Just update our place in the LRU if our fence is getting reused. */
3355         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3356                 reg = &dev_priv->fence_regs[obj->fence_reg];
3357                 if (!obj->fence_dirty) {
3358                         list_move_tail(&reg->lru_list,
3359                                        &dev_priv->mm.fence_list);
3360                         return 0;
3361                 }
3362         } else if (enable) {
3363                 if (WARN_ON(!obj->map_and_fenceable))
3364                         return -EINVAL;
3365
3366                 reg = i915_find_fence_reg(dev);
3367                 if (IS_ERR(reg))
3368                         return PTR_ERR(reg);
3369
3370                 if (reg->obj) {
3371                         struct drm_i915_gem_object *old = reg->obj;
3372
3373                         ret = i915_gem_object_wait_fence(old);
3374                         if (ret)
3375                                 return ret;
3376
3377                         i915_gem_object_fence_lost(old);
3378                 }
3379         } else
3380                 return 0;
3381
3382         i915_gem_object_update_fence(obj, reg, enable);
3383
3384         return 0;
3385 }
3386
3387 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3388                                      unsigned long cache_level)
3389 {
3390         struct drm_mm_node *gtt_space = &vma->node;
3391         struct drm_mm_node *other;
3392
3393         /*
3394          * On some machines we have to be careful when putting differing types
3395          * of snoopable memory together to avoid the prefetcher crossing memory
3396          * domains and dying. During vm initialisation, we decide whether or not
3397          * these constraints apply and set the drm_mm.color_adjust
3398          * appropriately.
3399          */
3400         if (vma->vm->mm.color_adjust == NULL)
3401                 return true;
3402
3403         if (!drm_mm_node_allocated(gtt_space))
3404                 return true;
3405
3406         if (list_empty(&gtt_space->node_list))
3407                 return true;
3408
3409         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3410         if (other->allocated && !other->hole_follows && other->color != cache_level)
3411                 return false;
3412
3413         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3414         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3415                 return false;
3416
3417         return true;
3418 }
3419
3420 /**
3421  * Finds free space in the GTT aperture and binds the object there.
3422  */
3423 static struct i915_vma *
3424 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3425                            struct i915_address_space *vm,
3426                            const struct i915_ggtt_view *ggtt_view,
3427                            unsigned alignment,
3428                            uint64_t flags)
3429 {
3430         struct drm_device *dev = obj->base.dev;
3431         struct drm_i915_private *dev_priv = dev->dev_private;
3432         u32 size, fence_size, fence_alignment, unfenced_alignment;
3433         unsigned long start =
3434                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3435         unsigned long end =
3436                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3437         struct i915_vma *vma;
3438         int ret;
3439
3440         if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3441                 return ERR_PTR(-EINVAL);
3442
3443         fence_size = i915_gem_get_gtt_size(dev,
3444                                            obj->base.size,
3445                                            obj->tiling_mode);
3446         fence_alignment = i915_gem_get_gtt_alignment(dev,
3447                                                      obj->base.size,
3448                                                      obj->tiling_mode, true);
3449         unfenced_alignment =
3450                 i915_gem_get_gtt_alignment(dev,
3451                                            obj->base.size,
3452                                            obj->tiling_mode, false);
3453
3454         if (alignment == 0)
3455                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3456                                                 unfenced_alignment;
3457         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3458                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3459                 return ERR_PTR(-EINVAL);
3460         }
3461
3462         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3463
3464         /* If the object is bigger than the entire aperture, reject it early
3465          * before evicting everything in a vain attempt to find space.
3466          */
3467         if (obj->base.size > end) {
3468                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3469                           obj->base.size,
3470                           flags & PIN_MAPPABLE ? "mappable" : "total",
3471                           end);
3472                 return ERR_PTR(-E2BIG);
3473         }
3474
3475         ret = i915_gem_object_get_pages(obj);
3476         if (ret)
3477                 return ERR_PTR(ret);
3478
3479         i915_gem_object_pin_pages(obj);
3480
3481         vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3482                           i915_gem_obj_lookup_or_create_vma(obj, vm);
3483
3484         if (IS_ERR(vma))
3485                 goto err_unpin;
3486
3487 search_free:
3488         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3489                                                   size, alignment,
3490                                                   obj->cache_level,
3491                                                   start, end,
3492                                                   DRM_MM_SEARCH_DEFAULT,
3493                                                   DRM_MM_CREATE_DEFAULT);
3494         if (ret) {
3495                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3496                                                obj->cache_level,
3497                                                start, end,
3498                                                flags);
3499                 if (ret == 0)
3500                         goto search_free;
3501
3502                 goto err_free_vma;
3503         }
3504         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3505                 ret = -EINVAL;
3506                 goto err_remove_node;
3507         }
3508
3509         ret = i915_gem_gtt_prepare_object(obj);
3510         if (ret)
3511                 goto err_remove_node;
3512
3513         /*  allocate before insert / bind */
3514         if (vma->vm->allocate_va_range) {
3515                 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
3516                                 VM_TO_TRACE_NAME(vma->vm));
3517                 ret = vma->vm->allocate_va_range(vma->vm,
3518                                                 vma->node.start,
3519                                                 vma->node.size);
3520                 if (ret)
3521                         goto err_remove_node;
3522         }
3523
3524         trace_i915_vma_bind(vma, flags);
3525         ret = i915_vma_bind(vma, obj->cache_level,
3526                             flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3527         if (ret)
3528                 goto err_finish_gtt;
3529
3530         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3531         list_add_tail(&vma->mm_list, &vm->inactive_list);
3532
3533         return vma;
3534
3535 err_finish_gtt:
3536         i915_gem_gtt_finish_object(obj);
3537 err_remove_node:
3538         drm_mm_remove_node(&vma->node);
3539 err_free_vma:
3540         i915_gem_vma_destroy(vma);
3541         vma = ERR_PTR(ret);
3542 err_unpin:
3543         i915_gem_object_unpin_pages(obj);
3544         return vma;
3545 }
3546
3547 bool
3548 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3549                         bool force)
3550 {
3551         /* If we don't have a page list set up, then we're not pinned
3552          * to GPU, and we can ignore the cache flush because it'll happen
3553          * again at bind time.
3554          */
3555         if (obj->pages == NULL)
3556                 return false;
3557
3558         /*
3559          * Stolen memory is always coherent with the GPU as it is explicitly
3560          * marked as wc by the system, or the system is cache-coherent.
3561          */
3562         if (obj->stolen || obj->phys_handle)
3563                 return false;
3564
3565         /* If the GPU is snooping the contents of the CPU cache,
3566          * we do not need to manually clear the CPU cache lines.  However,
3567          * the caches are only snooped when the render cache is
3568          * flushed/invalidated.  As we always have to emit invalidations
3569          * and flushes when moving into and out of the RENDER domain, correct
3570          * snooping behaviour occurs naturally as the result of our domain
3571          * tracking.
3572          */
3573         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3574                 obj->cache_dirty = true;
3575                 return false;
3576         }
3577
3578         trace_i915_gem_object_clflush(obj);
3579         drm_clflush_sg(obj->pages);
3580         obj->cache_dirty = false;
3581
3582         return true;
3583 }
3584
3585 /** Flushes the GTT write domain for the object if it's dirty. */
3586 static void
3587 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3588 {
3589         uint32_t old_write_domain;
3590
3591         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3592                 return;
3593
3594         /* No actual flushing is required for the GTT write domain.  Writes
3595          * to it immediately go to main memory as far as we know, so there's
3596          * no chipset flush.  It also doesn't land in render cache.
3597          *
3598          * However, we do have to enforce the order so that all writes through
3599          * the GTT land before any writes to the device, such as updates to
3600          * the GATT itself.
3601          */
3602         wmb();
3603
3604         old_write_domain = obj->base.write_domain;
3605         obj->base.write_domain = 0;
3606
3607         intel_fb_obj_flush(obj, false);
3608
3609         trace_i915_gem_object_change_domain(obj,
3610                                             obj->base.read_domains,
3611                                             old_write_domain);
3612 }
3613
3614 /** Flushes the CPU write domain for the object if it's dirty. */
3615 static void
3616 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3617 {
3618         uint32_t old_write_domain;
3619
3620         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3621                 return;
3622
3623         if (i915_gem_clflush_object(obj, obj->pin_display))
3624                 i915_gem_chipset_flush(obj->base.dev);
3625
3626         old_write_domain = obj->base.write_domain;
3627         obj->base.write_domain = 0;
3628
3629         intel_fb_obj_flush(obj, false);
3630
3631         trace_i915_gem_object_change_domain(obj,
3632                                             obj->base.read_domains,
3633                                             old_write_domain);
3634 }
3635
3636 /**
3637  * Moves a single object to the GTT read, and possibly write domain.
3638  *
3639  * This function returns when the move is complete, including waiting on
3640  * flushes to occur.
3641  */
3642 int
3643 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3644 {
3645         uint32_t old_write_domain, old_read_domains;
3646         struct i915_vma *vma;
3647         int ret;
3648
3649         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3650                 return 0;
3651
3652         ret = i915_gem_object_wait_rendering(obj, !write);
3653         if (ret)
3654                 return ret;
3655
3656         i915_gem_object_retire(obj);
3657
3658         /* Flush and acquire obj->pages so that we are coherent through
3659          * direct access in memory with previous cached writes through
3660          * shmemfs and that our cache domain tracking remains valid.
3661          * For example, if the obj->filp was moved to swap without us
3662          * being notified and releasing the pages, we would mistakenly
3663          * continue to assume that the obj remained out of the CPU cached
3664          * domain.
3665          */
3666         ret = i915_gem_object_get_pages(obj);
3667         if (ret)
3668                 return ret;
3669
3670         i915_gem_object_flush_cpu_write_domain(obj);
3671
3672         /* Serialise direct access to this object with the barriers for
3673          * coherent writes from the GPU, by effectively invalidating the
3674          * GTT domain upon first access.
3675          */
3676         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3677                 mb();
3678
3679         old_write_domain = obj->base.write_domain;
3680         old_read_domains = obj->base.read_domains;
3681
3682         /* It should now be out of any other write domains, and we can update
3683          * the domain values for our changes.
3684          */
3685         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3686         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3687         if (write) {
3688                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3689                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3690                 obj->dirty = 1;
3691         }
3692
3693         if (write)
3694                 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
3695
3696         trace_i915_gem_object_change_domain(obj,
3697                                             old_read_domains,
3698                                             old_write_domain);
3699
3700         /* And bump the LRU for this access */
3701         vma = i915_gem_obj_to_ggtt(obj);
3702         if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3703                 list_move_tail(&vma->mm_list,
3704                                &to_i915(obj->base.dev)->gtt.base.inactive_list);
3705
3706         return 0;
3707 }
3708
3709 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3710                                     enum i915_cache_level cache_level)
3711 {
3712         struct drm_device *dev = obj->base.dev;
3713         struct i915_vma *vma, *next;
3714         int ret;
3715
3716         if (obj->cache_level == cache_level)
3717                 return 0;
3718
3719         if (i915_gem_obj_is_pinned(obj)) {
3720                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3721                 return -EBUSY;
3722         }
3723
3724         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3725                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3726                         ret = i915_vma_unbind(vma);
3727                         if (ret)
3728                                 return ret;
3729                 }
3730         }
3731
3732         if (i915_gem_obj_bound_any(obj)) {
3733                 ret = i915_gem_object_finish_gpu(obj);
3734                 if (ret)
3735                         return ret;
3736
3737                 i915_gem_object_finish_gtt(obj);
3738
3739                 /* Before SandyBridge, you could not use tiling or fence
3740                  * registers with snooped memory, so relinquish any fences
3741                  * currently pointing to our region in the aperture.
3742                  */
3743                 if (INTEL_INFO(dev)->gen < 6) {
3744                         ret = i915_gem_object_put_fence(obj);
3745                         if (ret)
3746                                 return ret;
3747                 }
3748
3749                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3750                         if (drm_mm_node_allocated(&vma->node)) {
3751                                 ret = i915_vma_bind(vma, cache_level,
3752                                                     vma->bound & GLOBAL_BIND);
3753                                 if (ret)
3754                                         return ret;
3755                         }
3756         }
3757
3758         list_for_each_entry(vma, &obj->vma_list, vma_link)
3759                 vma->node.color = cache_level;
3760         obj->cache_level = cache_level;
3761
3762         if (obj->cache_dirty &&
3763             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3764             cpu_write_needs_clflush(obj)) {
3765                 if (i915_gem_clflush_object(obj, true))
3766                         i915_gem_chipset_flush(obj->base.dev);
3767         }
3768
3769         return 0;
3770 }
3771
3772 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3773                                struct drm_file *file)
3774 {
3775         struct drm_i915_gem_caching *args = data;
3776         struct drm_i915_gem_object *obj;
3777         int ret;
3778
3779         ret = i915_mutex_lock_interruptible(dev);
3780         if (ret)
3781                 return ret;
3782
3783         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3784         if (&obj->base == NULL) {
3785                 ret = -ENOENT;
3786                 goto unlock;
3787         }
3788
3789         switch (obj->cache_level) {
3790         case I915_CACHE_LLC:
3791         case I915_CACHE_L3_LLC:
3792                 args->caching = I915_CACHING_CACHED;
3793                 break;
3794
3795         case I915_CACHE_WT:
3796                 args->caching = I915_CACHING_DISPLAY;
3797                 break;
3798
3799         default:
3800                 args->caching = I915_CACHING_NONE;
3801                 break;
3802         }
3803
3804         drm_gem_object_unreference(&obj->base);
3805 unlock:
3806         mutex_unlock(&dev->struct_mutex);
3807         return ret;
3808 }
3809
3810 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3811                                struct drm_file *file)
3812 {
3813         struct drm_i915_gem_caching *args = data;
3814         struct drm_i915_gem_object *obj;
3815         enum i915_cache_level level;
3816         int ret;
3817
3818         switch (args->caching) {
3819         case I915_CACHING_NONE:
3820                 level = I915_CACHE_NONE;
3821                 break;
3822         case I915_CACHING_CACHED:
3823                 level = I915_CACHE_LLC;
3824                 break;
3825         case I915_CACHING_DISPLAY:
3826                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3827                 break;
3828         default:
3829                 return -EINVAL;
3830         }
3831
3832         ret = i915_mutex_lock_interruptible(dev);
3833         if (ret)
3834                 return ret;
3835
3836         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3837         if (&obj->base == NULL) {
3838                 ret = -ENOENT;
3839                 goto unlock;
3840         }
3841
3842         ret = i915_gem_object_set_cache_level(obj, level);
3843
3844         drm_gem_object_unreference(&obj->base);
3845 unlock:
3846         mutex_unlock(&dev->struct_mutex);
3847         return ret;
3848 }
3849
3850 static bool is_pin_display(struct drm_i915_gem_object *obj)
3851 {
3852         struct i915_vma *vma;
3853
3854         vma = i915_gem_obj_to_ggtt(obj);
3855         if (!vma)
3856                 return false;
3857
3858         /* There are 2 sources that pin objects:
3859          *   1. The display engine (scanouts, sprites, cursors);
3860          *   2. Reservations for execbuffer;
3861          *
3862          * We can ignore reservations as we hold the struct_mutex and
3863          * are only called outside of the reservation path.
3864          */
3865         return vma->pin_count;
3866 }
3867
3868 /*
3869  * Prepare buffer for display plane (scanout, cursors, etc).
3870  * Can be called from an uninterruptible phase (modesetting) and allows
3871  * any flushes to be pipelined (for pageflips).
3872  */
3873 int
3874 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3875                                      u32 alignment,
3876                                      struct intel_engine_cs *pipelined,
3877                                      const struct i915_ggtt_view *view)
3878 {
3879         u32 old_read_domains, old_write_domain;
3880         bool was_pin_display;
3881         int ret;
3882
3883         if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
3884                 ret = i915_gem_object_sync(obj, pipelined);
3885                 if (ret)
3886                         return ret;
3887         }
3888
3889         /* Mark the pin_display early so that we account for the
3890          * display coherency whilst setting up the cache domains.
3891          */
3892         was_pin_display = obj->pin_display;
3893         obj->pin_display = true;
3894
3895         /* The display engine is not coherent with the LLC cache on gen6.  As
3896          * a result, we make sure that the pinning that is about to occur is
3897          * done with uncached PTEs. This is lowest common denominator for all
3898          * chipsets.
3899          *
3900          * However for gen6+, we could do better by using the GFDT bit instead
3901          * of uncaching, which would allow us to flush all the LLC-cached data
3902          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3903          */
3904         ret = i915_gem_object_set_cache_level(obj,
3905                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3906         if (ret)
3907                 goto err_unpin_display;
3908
3909         /* As the user may map the buffer once pinned in the display plane
3910          * (e.g. libkms for the bootup splash), we have to ensure that we
3911          * always use map_and_fenceable for all scanout buffers.
3912          */
3913         ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3914                                        view->type == I915_GGTT_VIEW_NORMAL ?
3915                                        PIN_MAPPABLE : 0);
3916         if (ret)
3917                 goto err_unpin_display;
3918
3919         i915_gem_object_flush_cpu_write_domain(obj);
3920
3921         old_write_domain = obj->base.write_domain;
3922         old_read_domains = obj->base.read_domains;
3923
3924         /* It should now be out of any other write domains, and we can update
3925          * the domain values for our changes.
3926          */
3927         obj->base.write_domain = 0;
3928         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3929
3930         trace_i915_gem_object_change_domain(obj,
3931                                             old_read_domains,
3932                                             old_write_domain);
3933
3934         return 0;
3935
3936 err_unpin_display:
3937         WARN_ON(was_pin_display != is_pin_display(obj));
3938         obj->pin_display = was_pin_display;
3939         return ret;
3940 }
3941
3942 void
3943 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3944                                          const struct i915_ggtt_view *view)
3945 {
3946         i915_gem_object_ggtt_unpin_view(obj, view);
3947
3948         obj->pin_display = is_pin_display(obj);
3949 }
3950
3951 int
3952 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3953 {
3954         int ret;
3955
3956         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3957                 return 0;
3958
3959         ret = i915_gem_object_wait_rendering(obj, false);
3960         if (ret)
3961                 return ret;
3962
3963         /* Ensure that we invalidate the GPU's caches and TLBs. */
3964         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3965         return 0;
3966 }
3967
3968 /**
3969  * Moves a single object to the CPU read, and possibly write domain.
3970  *
3971  * This function returns when the move is complete, including waiting on
3972  * flushes to occur.
3973  */
3974 int
3975 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3976 {
3977         uint32_t old_write_domain, old_read_domains;
3978         int ret;
3979
3980         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3981                 return 0;
3982
3983         ret = i915_gem_object_wait_rendering(obj, !write);
3984         if (ret)
3985                 return ret;
3986
3987         i915_gem_object_retire(obj);
3988         i915_gem_object_flush_gtt_write_domain(obj);
3989
3990         old_write_domain = obj->base.write_domain;
3991         old_read_domains = obj->base.read_domains;
3992
3993         /* Flush the CPU cache if it's still invalid. */
3994         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3995                 i915_gem_clflush_object(obj, false);
3996
3997                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3998         }
3999
4000         /* It should now be out of any other write domains, and we can update
4001          * the domain values for our changes.
4002          */
4003         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4004
4005         /* If we're writing through the CPU, then the GPU read domains will
4006          * need to be invalidated at next use.
4007          */
4008         if (write) {
4009                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4010                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4011         }
4012
4013         if (write)
4014                 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
4015
4016         trace_i915_gem_object_change_domain(obj,
4017                                             old_read_domains,
4018                                             old_write_domain);
4019
4020         return 0;
4021 }
4022
4023 /* Throttle our rendering by waiting until the ring has completed our requests
4024  * emitted over 20 msec ago.
4025  *
4026  * Note that if we were to use the current jiffies each time around the loop,
4027  * we wouldn't escape the function with any frames outstanding if the time to
4028  * render a frame was over 20ms.
4029  *
4030  * This should get us reasonable parallelism between CPU and GPU but also
4031  * relatively low latency when blocking on a particular request to finish.
4032  */
4033 static int
4034 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4035 {
4036         struct drm_i915_private *dev_priv = dev->dev_private;
4037         struct drm_i915_file_private *file_priv = file->driver_priv;
4038         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4039         struct drm_i915_gem_request *request, *target = NULL;
4040         unsigned reset_counter;
4041         int ret;
4042
4043         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4044         if (ret)
4045                 return ret;
4046
4047         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4048         if (ret)
4049                 return ret;
4050
4051         spin_lock(&file_priv->mm.lock);
4052         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4053                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4054                         break;
4055
4056                 target = request;
4057         }
4058         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4059         if (target)
4060                 i915_gem_request_reference(target);
4061         spin_unlock(&file_priv->mm.lock);
4062
4063         if (target == NULL)
4064                 return 0;
4065
4066         ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4067         if (ret == 0)
4068                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4069
4070         mutex_lock(&dev->struct_mutex);
4071         i915_gem_request_unreference(target);
4072         mutex_unlock(&dev->struct_mutex);
4073
4074         return ret;
4075 }
4076
4077 static bool
4078 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4079 {
4080         struct drm_i915_gem_object *obj = vma->obj;
4081
4082         if (alignment &&
4083             vma->node.start & (alignment - 1))
4084                 return true;
4085
4086         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4087                 return true;
4088
4089         if (flags & PIN_OFFSET_BIAS &&
4090             vma->node.start < (flags & PIN_OFFSET_MASK))
4091                 return true;
4092
4093         return false;
4094 }
4095
4096 static int
4097 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4098                        struct i915_address_space *vm,
4099                        const struct i915_ggtt_view *ggtt_view,
4100                        uint32_t alignment,
4101                        uint64_t flags)
4102 {
4103         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4104         struct i915_vma *vma;
4105         unsigned bound;
4106         int ret;
4107
4108         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4109                 return -ENODEV;
4110
4111         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4112                 return -EINVAL;
4113
4114         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4115                 return -EINVAL;
4116
4117         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4118                 return -EINVAL;
4119
4120         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4121                           i915_gem_obj_to_vma(obj, vm);
4122
4123         if (IS_ERR(vma))
4124                 return PTR_ERR(vma);
4125
4126         if (vma) {
4127                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4128                         return -EBUSY;
4129
4130                 if (i915_vma_misplaced(vma, alignment, flags)) {
4131                         unsigned long offset;
4132                         offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4133                                              i915_gem_obj_offset(obj, vm);
4134                         WARN(vma->pin_count,
4135                              "bo is already pinned in %s with incorrect alignment:"
4136                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4137                              " obj->map_and_fenceable=%d\n",
4138                              ggtt_view ? "ggtt" : "ppgtt",
4139                              offset,
4140                              alignment,
4141                              !!(flags & PIN_MAPPABLE),
4142                              obj->map_and_fenceable);
4143                         ret = i915_vma_unbind(vma);
4144                         if (ret)
4145                                 return ret;
4146
4147                         vma = NULL;
4148                 }
4149         }
4150
4151         bound = vma ? vma->bound : 0;
4152         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4153                 /* In true PPGTT, bind has possibly changed PDEs, which
4154                  * means we must do a context switch before the GPU can
4155                  * accurately read some of the VMAs.
4156                  */
4157                 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4158                                                  flags);
4159                 if (IS_ERR(vma))
4160                         return PTR_ERR(vma);
4161         }
4162
4163         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
4164                 ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
4165                 if (ret)
4166                         return ret;
4167         }
4168
4169         if ((bound ^ vma->bound) & GLOBAL_BIND) {
4170                 bool mappable, fenceable;
4171                 u32 fence_size, fence_alignment;
4172
4173                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4174                                                    obj->base.size,
4175                                                    obj->tiling_mode);
4176                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4177                                                              obj->base.size,
4178                                                              obj->tiling_mode,
4179                                                              true);
4180
4181                 fenceable = (vma->node.size == fence_size &&
4182                              (vma->node.start & (fence_alignment - 1)) == 0);
4183
4184                 mappable = (vma->node.start + fence_size <=
4185                             dev_priv->gtt.mappable_end);
4186
4187                 obj->map_and_fenceable = mappable && fenceable;
4188         }
4189
4190         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4191
4192         vma->pin_count++;
4193         if (flags & PIN_MAPPABLE)
4194                 obj->pin_mappable |= true;
4195
4196         return 0;
4197 }
4198
4199 int
4200 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4201                     struct i915_address_space *vm,
4202                     uint32_t alignment,
4203                     uint64_t flags)
4204 {
4205         return i915_gem_object_do_pin(obj, vm,
4206                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4207                                       alignment, flags);
4208 }
4209
4210 int
4211 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4212                          const struct i915_ggtt_view *view,
4213                          uint32_t alignment,
4214                          uint64_t flags)
4215 {
4216         if (WARN_ONCE(!view, "no view specified"))
4217                 return -EINVAL;
4218
4219         return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4220                                       alignment, flags | PIN_GLOBAL);
4221 }
4222
4223 void
4224 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4225                                 const struct i915_ggtt_view *view)
4226 {
4227         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4228
4229         BUG_ON(!vma);
4230         WARN_ON(vma->pin_count == 0);
4231         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4232
4233         if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
4234                 obj->pin_mappable = false;
4235 }
4236
4237 bool
4238 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4239 {
4240         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4241                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4242                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4243
4244                 WARN_ON(!ggtt_vma ||
4245                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4246                         ggtt_vma->pin_count);
4247                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4248                 return true;
4249         } else
4250                 return false;
4251 }
4252
4253 void
4254 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4255 {
4256         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4257                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4258                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4259                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4260         }
4261 }
4262
4263 int
4264 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4265                     struct drm_file *file)
4266 {
4267         struct drm_i915_gem_busy *args = data;
4268         struct drm_i915_gem_object *obj;
4269         int ret;
4270
4271         ret = i915_mutex_lock_interruptible(dev);
4272         if (ret)
4273                 return ret;
4274
4275         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4276         if (&obj->base == NULL) {
4277                 ret = -ENOENT;
4278                 goto unlock;
4279         }
4280
4281         /* Count all active objects as busy, even if they are currently not used
4282          * by the gpu. Users of this interface expect objects to eventually
4283          * become non-busy without any further actions, therefore emit any
4284          * necessary flushes here.
4285          */
4286         ret = i915_gem_object_flush_active(obj);
4287
4288         args->busy = obj->active;
4289         if (obj->last_read_req) {
4290                 struct intel_engine_cs *ring;
4291                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4292                 ring = i915_gem_request_get_ring(obj->last_read_req);
4293                 args->busy |= intel_ring_flag(ring) << 16;
4294         }
4295
4296         drm_gem_object_unreference(&obj->base);
4297 unlock:
4298         mutex_unlock(&dev->struct_mutex);
4299         return ret;
4300 }
4301
4302 int
4303 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4304                         struct drm_file *file_priv)
4305 {
4306         return i915_gem_ring_throttle(dev, file_priv);
4307 }
4308
4309 int
4310 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4311                        struct drm_file *file_priv)
4312 {
4313         struct drm_i915_private *dev_priv = dev->dev_private;
4314         struct drm_i915_gem_madvise *args = data;
4315         struct drm_i915_gem_object *obj;
4316         int ret;
4317
4318         switch (args->madv) {
4319         case I915_MADV_DONTNEED:
4320         case I915_MADV_WILLNEED:
4321             break;
4322         default:
4323             return -EINVAL;
4324         }
4325
4326         ret = i915_mutex_lock_interruptible(dev);
4327         if (ret)
4328                 return ret;
4329
4330         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4331         if (&obj->base == NULL) {
4332                 ret = -ENOENT;
4333                 goto unlock;
4334         }
4335
4336         if (i915_gem_obj_is_pinned(obj)) {
4337                 ret = -EINVAL;
4338                 goto out;
4339         }
4340
4341         if (obj->pages &&
4342             obj->tiling_mode != I915_TILING_NONE &&
4343             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4344                 if (obj->madv == I915_MADV_WILLNEED)
4345                         i915_gem_object_unpin_pages(obj);
4346                 if (args->madv == I915_MADV_WILLNEED)
4347                         i915_gem_object_pin_pages(obj);
4348         }
4349
4350         if (obj->madv != __I915_MADV_PURGED)
4351                 obj->madv = args->madv;
4352
4353         /* if the object is no longer attached, discard its backing storage */
4354         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4355                 i915_gem_object_truncate(obj);
4356
4357         args->retained = obj->madv != __I915_MADV_PURGED;
4358
4359 out:
4360         drm_gem_object_unreference(&obj->base);
4361 unlock:
4362         mutex_unlock(&dev->struct_mutex);
4363         return ret;
4364 }
4365
4366 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4367                           const struct drm_i915_gem_object_ops *ops)
4368 {
4369         INIT_LIST_HEAD(&obj->global_list);
4370         INIT_LIST_HEAD(&obj->ring_list);
4371         INIT_LIST_HEAD(&obj->obj_exec_link);
4372         INIT_LIST_HEAD(&obj->vma_list);
4373         INIT_LIST_HEAD(&obj->batch_pool_list);
4374
4375         obj->ops = ops;
4376
4377         obj->fence_reg = I915_FENCE_REG_NONE;
4378         obj->madv = I915_MADV_WILLNEED;
4379
4380         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4381 }
4382
4383 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4384         .get_pages = i915_gem_object_get_pages_gtt,
4385         .put_pages = i915_gem_object_put_pages_gtt,
4386 };
4387
4388 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4389                                                   size_t size)
4390 {
4391         struct drm_i915_gem_object *obj;
4392         struct address_space *mapping;
4393         gfp_t mask;
4394
4395         obj = i915_gem_object_alloc(dev);
4396         if (obj == NULL)
4397                 return NULL;
4398
4399         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4400                 i915_gem_object_free(obj);
4401                 return NULL;
4402         }
4403
4404         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4405         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4406                 /* 965gm cannot relocate objects above 4GiB. */
4407                 mask &= ~__GFP_HIGHMEM;
4408                 mask |= __GFP_DMA32;
4409         }
4410
4411         mapping = file_inode(obj->base.filp)->i_mapping;
4412         mapping_set_gfp_mask(mapping, mask);
4413
4414         i915_gem_object_init(obj, &i915_gem_object_ops);
4415
4416         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4417         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4418
4419         if (HAS_LLC(dev)) {
4420                 /* On some devices, we can have the GPU use the LLC (the CPU
4421                  * cache) for about a 10% performance improvement
4422                  * compared to uncached.  Graphics requests other than
4423                  * display scanout are coherent with the CPU in
4424                  * accessing this cache.  This means in this mode we
4425                  * don't need to clflush on the CPU side, and on the
4426                  * GPU side we only need to flush internal caches to
4427                  * get data visible to the CPU.
4428                  *
4429                  * However, we maintain the display planes as UC, and so
4430                  * need to rebind when first used as such.
4431                  */
4432                 obj->cache_level = I915_CACHE_LLC;
4433         } else
4434                 obj->cache_level = I915_CACHE_NONE;
4435
4436         trace_i915_gem_object_create(obj);
4437
4438         return obj;
4439 }
4440
4441 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4442 {
4443         /* If we are the last user of the backing storage (be it shmemfs
4444          * pages or stolen etc), we know that the pages are going to be
4445          * immediately released. In this case, we can then skip copying
4446          * back the contents from the GPU.
4447          */
4448
4449         if (obj->madv != I915_MADV_WILLNEED)
4450                 return false;
4451
4452         if (obj->base.filp == NULL)
4453                 return true;
4454
4455         /* At first glance, this looks racy, but then again so would be
4456          * userspace racing mmap against close. However, the first external
4457          * reference to the filp can only be obtained through the
4458          * i915_gem_mmap_ioctl() which safeguards us against the user
4459          * acquiring such a reference whilst we are in the middle of
4460          * freeing the object.
4461          */
4462         return atomic_long_read(&obj->base.filp->f_count) == 1;
4463 }
4464
4465 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4466 {
4467         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4468         struct drm_device *dev = obj->base.dev;
4469         struct drm_i915_private *dev_priv = dev->dev_private;
4470         struct i915_vma *vma, *next;
4471
4472         intel_runtime_pm_get(dev_priv);
4473
4474         trace_i915_gem_object_destroy(obj);
4475
4476         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4477                 int ret;
4478
4479                 vma->pin_count = 0;
4480                 ret = i915_vma_unbind(vma);
4481                 if (WARN_ON(ret == -ERESTARTSYS)) {
4482                         bool was_interruptible;
4483
4484                         was_interruptible = dev_priv->mm.interruptible;
4485                         dev_priv->mm.interruptible = false;
4486
4487                         WARN_ON(i915_vma_unbind(vma));
4488
4489                         dev_priv->mm.interruptible = was_interruptible;
4490                 }
4491         }
4492
4493         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4494          * before progressing. */
4495         if (obj->stolen)
4496                 i915_gem_object_unpin_pages(obj);
4497
4498         WARN_ON(obj->frontbuffer_bits);
4499
4500         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4501             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4502             obj->tiling_mode != I915_TILING_NONE)
4503                 i915_gem_object_unpin_pages(obj);
4504
4505         if (WARN_ON(obj->pages_pin_count))
4506                 obj->pages_pin_count = 0;
4507         if (discard_backing_storage(obj))
4508                 obj->madv = I915_MADV_DONTNEED;
4509         i915_gem_object_put_pages(obj);
4510         i915_gem_object_free_mmap_offset(obj);
4511
4512         BUG_ON(obj->pages);
4513
4514         if (obj->base.import_attach)
4515                 drm_prime_gem_destroy(&obj->base, NULL);
4516
4517         if (obj->ops->release)
4518                 obj->ops->release(obj);
4519
4520         drm_gem_object_release(&obj->base);
4521         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4522
4523         kfree(obj->bit_17);
4524         i915_gem_object_free(obj);
4525
4526         intel_runtime_pm_put(dev_priv);
4527 }
4528
4529 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4530                                      struct i915_address_space *vm)
4531 {
4532         struct i915_vma *vma;
4533         list_for_each_entry(vma, &obj->vma_list, vma_link) {
4534                 if (i915_is_ggtt(vma->vm) &&
4535                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4536                         continue;
4537                 if (vma->vm == vm)
4538                         return vma;
4539         }
4540         return NULL;
4541 }
4542
4543 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4544                                            const struct i915_ggtt_view *view)
4545 {
4546         struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
4547         struct i915_vma *vma;
4548
4549         if (WARN_ONCE(!view, "no view specified"))
4550                 return ERR_PTR(-EINVAL);
4551
4552         list_for_each_entry(vma, &obj->vma_list, vma_link)
4553                 if (vma->vm == ggtt &&
4554                     i915_ggtt_view_equal(&vma->ggtt_view, view))
4555                         return vma;
4556         return NULL;
4557 }
4558
4559 void i915_gem_vma_destroy(struct i915_vma *vma)
4560 {
4561         struct i915_address_space *vm = NULL;
4562         WARN_ON(vma->node.allocated);
4563
4564         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4565         if (!list_empty(&vma->exec_list))
4566                 return;
4567
4568         vm = vma->vm;
4569
4570         if (!i915_is_ggtt(vm))
4571                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4572
4573         list_del(&vma->vma_link);
4574
4575         kfree(vma);
4576 }
4577
4578 static void
4579 i915_gem_stop_ringbuffers(struct drm_device *dev)
4580 {
4581         struct drm_i915_private *dev_priv = dev->dev_private;
4582         struct intel_engine_cs *ring;
4583         int i;
4584
4585         for_each_ring(ring, dev_priv, i)
4586                 dev_priv->gt.stop_ring(ring);
4587 }
4588
4589 int
4590 i915_gem_suspend(struct drm_device *dev)
4591 {
4592         struct drm_i915_private *dev_priv = dev->dev_private;
4593         int ret = 0;
4594
4595         mutex_lock(&dev->struct_mutex);
4596         ret = i915_gpu_idle(dev);
4597         if (ret)
4598                 goto err;
4599
4600         i915_gem_retire_requests(dev);
4601
4602         i915_gem_stop_ringbuffers(dev);
4603         mutex_unlock(&dev->struct_mutex);
4604
4605         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4606         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4607         flush_delayed_work(&dev_priv->mm.idle_work);
4608
4609         /* Assert that we sucessfully flushed all the work and
4610          * reset the GPU back to its idle, low power state.
4611          */
4612         WARN_ON(dev_priv->mm.busy);
4613
4614         return 0;
4615
4616 err:
4617         mutex_unlock(&dev->struct_mutex);
4618         return ret;
4619 }
4620
4621 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4622 {
4623         struct drm_device *dev = ring->dev;
4624         struct drm_i915_private *dev_priv = dev->dev_private;
4625         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4626         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4627         int i, ret;
4628
4629         if (!HAS_L3_DPF(dev) || !remap_info)
4630                 return 0;
4631
4632         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4633         if (ret)
4634                 return ret;
4635
4636         /*
4637          * Note: We do not worry about the concurrent register cacheline hang
4638          * here because no other code should access these registers other than
4639          * at initialization time.
4640          */
4641         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4642                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4643                 intel_ring_emit(ring, reg_base + i);
4644                 intel_ring_emit(ring, remap_info[i/4]);
4645         }
4646
4647         intel_ring_advance(ring);
4648
4649         return ret;
4650 }
4651
4652 void i915_gem_init_swizzling(struct drm_device *dev)
4653 {
4654         struct drm_i915_private *dev_priv = dev->dev_private;
4655
4656         if (INTEL_INFO(dev)->gen < 5 ||
4657             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4658                 return;
4659
4660         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4661                                  DISP_TILE_SURFACE_SWIZZLING);
4662
4663         if (IS_GEN5(dev))
4664                 return;
4665
4666         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4667         if (IS_GEN6(dev))
4668                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4669         else if (IS_GEN7(dev))
4670                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4671         else if (IS_GEN8(dev))
4672                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4673         else
4674                 BUG();
4675 }
4676
4677 static bool
4678 intel_enable_blt(struct drm_device *dev)
4679 {
4680         if (!HAS_BLT(dev))
4681                 return false;
4682
4683         /* The blitter was dysfunctional on early prototypes */
4684         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4685                 DRM_INFO("BLT not supported on this pre-production hardware;"
4686                          " graphics performance will be degraded.\n");
4687                 return false;
4688         }
4689
4690         return true;
4691 }
4692
4693 static void init_unused_ring(struct drm_device *dev, u32 base)
4694 {
4695         struct drm_i915_private *dev_priv = dev->dev_private;
4696
4697         I915_WRITE(RING_CTL(base), 0);
4698         I915_WRITE(RING_HEAD(base), 0);
4699         I915_WRITE(RING_TAIL(base), 0);
4700         I915_WRITE(RING_START(base), 0);
4701 }
4702
4703 static void init_unused_rings(struct drm_device *dev)
4704 {
4705         if (IS_I830(dev)) {
4706                 init_unused_ring(dev, PRB1_BASE);
4707                 init_unused_ring(dev, SRB0_BASE);
4708                 init_unused_ring(dev, SRB1_BASE);
4709                 init_unused_ring(dev, SRB2_BASE);
4710                 init_unused_ring(dev, SRB3_BASE);
4711         } else if (IS_GEN2(dev)) {
4712                 init_unused_ring(dev, SRB0_BASE);
4713                 init_unused_ring(dev, SRB1_BASE);
4714         } else if (IS_GEN3(dev)) {
4715                 init_unused_ring(dev, PRB1_BASE);
4716                 init_unused_ring(dev, PRB2_BASE);
4717         }
4718 }
4719
4720 int i915_gem_init_rings(struct drm_device *dev)
4721 {
4722         struct drm_i915_private *dev_priv = dev->dev_private;
4723         int ret;
4724
4725         ret = intel_init_render_ring_buffer(dev);
4726         if (ret)
4727                 return ret;
4728
4729         if (HAS_BSD(dev)) {
4730                 ret = intel_init_bsd_ring_buffer(dev);
4731                 if (ret)
4732                         goto cleanup_render_ring;
4733         }
4734
4735         if (intel_enable_blt(dev)) {
4736                 ret = intel_init_blt_ring_buffer(dev);
4737                 if (ret)
4738                         goto cleanup_bsd_ring;
4739         }
4740
4741         if (HAS_VEBOX(dev)) {
4742                 ret = intel_init_vebox_ring_buffer(dev);
4743                 if (ret)
4744                         goto cleanup_blt_ring;
4745         }
4746
4747         if (HAS_BSD2(dev)) {
4748                 ret = intel_init_bsd2_ring_buffer(dev);
4749                 if (ret)
4750                         goto cleanup_vebox_ring;
4751         }
4752
4753         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4754         if (ret)
4755                 goto cleanup_bsd2_ring;
4756
4757         return 0;
4758
4759 cleanup_bsd2_ring:
4760         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4761 cleanup_vebox_ring:
4762         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4763 cleanup_blt_ring:
4764         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4765 cleanup_bsd_ring:
4766         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4767 cleanup_render_ring:
4768         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4769
4770         return ret;
4771 }
4772
4773 int
4774 i915_gem_init_hw(struct drm_device *dev)
4775 {
4776         struct drm_i915_private *dev_priv = dev->dev_private;
4777         struct intel_engine_cs *ring;
4778         int ret, i;
4779
4780         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4781                 return -EIO;
4782
4783         /* Double layer security blanket, see i915_gem_init() */
4784         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4785
4786         if (dev_priv->ellc_size)
4787                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4788
4789         if (IS_HASWELL(dev))
4790                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4791                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4792
4793         if (HAS_PCH_NOP(dev)) {
4794                 if (IS_IVYBRIDGE(dev)) {
4795                         u32 temp = I915_READ(GEN7_MSG_CTL);
4796                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4797                         I915_WRITE(GEN7_MSG_CTL, temp);
4798                 } else if (INTEL_INFO(dev)->gen >= 7) {
4799                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4800                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4801                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4802                 }
4803         }
4804
4805         i915_gem_init_swizzling(dev);
4806
4807         /*
4808          * At least 830 can leave some of the unused rings
4809          * "active" (ie. head != tail) after resume which
4810          * will prevent c3 entry. Makes sure all unused rings
4811          * are totally idle.
4812          */
4813         init_unused_rings(dev);
4814
4815         for_each_ring(ring, dev_priv, i) {
4816                 ret = ring->init_hw(ring);
4817                 if (ret)
4818                         goto out;
4819         }
4820
4821         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4822                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4823
4824         ret = i915_ppgtt_init_hw(dev);
4825         if (ret && ret != -EIO) {
4826                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4827                 i915_gem_cleanup_ringbuffer(dev);
4828         }
4829
4830         ret = i915_gem_context_enable(dev_priv);
4831         if (ret && ret != -EIO) {
4832                 DRM_ERROR("Context enable failed %d\n", ret);
4833                 i915_gem_cleanup_ringbuffer(dev);
4834
4835                 goto out;
4836         }
4837
4838 out:
4839         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4840         return ret;
4841 }
4842
4843 int i915_gem_init(struct drm_device *dev)
4844 {
4845         struct drm_i915_private *dev_priv = dev->dev_private;
4846         int ret;
4847
4848         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4849                         i915.enable_execlists);
4850
4851         mutex_lock(&dev->struct_mutex);
4852
4853         if (IS_VALLEYVIEW(dev)) {
4854                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4855                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4856                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4857                               VLV_GTLC_ALLOWWAKEACK), 10))
4858                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4859         }
4860
4861         if (!i915.enable_execlists) {
4862                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4863                 dev_priv->gt.init_rings = i915_gem_init_rings;
4864                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4865                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4866         } else {
4867                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4868                 dev_priv->gt.init_rings = intel_logical_rings_init;
4869                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4870                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4871         }
4872
4873         /* This is just a security blanket to placate dragons.
4874          * On some systems, we very sporadically observe that the first TLBs
4875          * used by the CS may be stale, despite us poking the TLB reset. If
4876          * we hold the forcewake during initialisation these problems
4877          * just magically go away.
4878          */
4879         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4880
4881         ret = i915_gem_init_userptr(dev);
4882         if (ret)
4883                 goto out_unlock;
4884
4885         i915_gem_init_global_gtt(dev);
4886
4887         ret = i915_gem_context_init(dev);
4888         if (ret)
4889                 goto out_unlock;
4890
4891         ret = dev_priv->gt.init_rings(dev);
4892         if (ret)
4893                 goto out_unlock;
4894
4895         ret = i915_gem_init_hw(dev);
4896         if (ret == -EIO) {
4897                 /* Allow ring initialisation to fail by marking the GPU as
4898                  * wedged. But we only want to do this where the GPU is angry,
4899                  * for all other failure, such as an allocation failure, bail.
4900                  */
4901                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4902                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4903                 ret = 0;
4904         }
4905
4906 out_unlock:
4907         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4908         mutex_unlock(&dev->struct_mutex);
4909
4910         return ret;
4911 }
4912
4913 void
4914 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4915 {
4916         struct drm_i915_private *dev_priv = dev->dev_private;
4917         struct intel_engine_cs *ring;
4918         int i;
4919
4920         for_each_ring(ring, dev_priv, i)
4921                 dev_priv->gt.cleanup_ring(ring);
4922 }
4923
4924 static void
4925 init_ring_lists(struct intel_engine_cs *ring)
4926 {
4927         INIT_LIST_HEAD(&ring->active_list);
4928         INIT_LIST_HEAD(&ring->request_list);
4929 }
4930
4931 void i915_init_vm(struct drm_i915_private *dev_priv,
4932                   struct i915_address_space *vm)
4933 {
4934         if (!i915_is_ggtt(vm))
4935                 drm_mm_init(&vm->mm, vm->start, vm->total);
4936         vm->dev = dev_priv->dev;
4937         INIT_LIST_HEAD(&vm->active_list);
4938         INIT_LIST_HEAD(&vm->inactive_list);
4939         INIT_LIST_HEAD(&vm->global_link);
4940         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4941 }
4942
4943 void
4944 i915_gem_load(struct drm_device *dev)
4945 {
4946         struct drm_i915_private *dev_priv = dev->dev_private;
4947         int i;
4948
4949         dev_priv->slab =
4950                 kmem_cache_create("i915_gem_object",
4951                                   sizeof(struct drm_i915_gem_object), 0,
4952                                   SLAB_HWCACHE_ALIGN,
4953                                   NULL);
4954
4955         INIT_LIST_HEAD(&dev_priv->vm_list);
4956         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4957
4958         INIT_LIST_HEAD(&dev_priv->context_list);
4959         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4960         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4961         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4962         for (i = 0; i < I915_NUM_RINGS; i++)
4963                 init_ring_lists(&dev_priv->ring[i]);
4964         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4965                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4966         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4967                           i915_gem_retire_work_handler);
4968         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4969                           i915_gem_idle_work_handler);
4970         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4971
4972         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4973
4974         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4975                 dev_priv->num_fence_regs = 32;
4976         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4977                 dev_priv->num_fence_regs = 16;
4978         else
4979                 dev_priv->num_fence_regs = 8;
4980
4981         if (intel_vgpu_active(dev))
4982                 dev_priv->num_fence_regs =
4983                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4984
4985         /* Initialize fence registers to zero */
4986         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4987         i915_gem_restore_fences(dev);
4988
4989         i915_gem_detect_bit_6_swizzle(dev);
4990         init_waitqueue_head(&dev_priv->pending_flip_queue);
4991
4992         dev_priv->mm.interruptible = true;
4993
4994         i915_gem_shrinker_init(dev_priv);
4995
4996         i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
4997
4998         mutex_init(&dev_priv->fb_tracking.lock);
4999 }
5000
5001 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5002 {
5003         struct drm_i915_file_private *file_priv = file->driver_priv;
5004
5005         cancel_delayed_work_sync(&file_priv->mm.idle_work);
5006
5007         /* Clean up our request list when the client is going away, so that
5008          * later retire_requests won't dereference our soon-to-be-gone
5009          * file_priv.
5010          */
5011         spin_lock(&file_priv->mm.lock);
5012         while (!list_empty(&file_priv->mm.request_list)) {
5013                 struct drm_i915_gem_request *request;
5014
5015                 request = list_first_entry(&file_priv->mm.request_list,
5016                                            struct drm_i915_gem_request,
5017                                            client_list);
5018                 list_del(&request->client_list);
5019                 request->file_priv = NULL;
5020         }
5021         spin_unlock(&file_priv->mm.lock);
5022 }
5023
5024 static void
5025 i915_gem_file_idle_work_handler(struct work_struct *work)
5026 {
5027         struct drm_i915_file_private *file_priv =
5028                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5029
5030         atomic_set(&file_priv->rps_wait_boost, false);
5031 }
5032
5033 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5034 {
5035         struct drm_i915_file_private *file_priv;
5036         int ret;
5037
5038         DRM_DEBUG_DRIVER("\n");
5039
5040         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5041         if (!file_priv)
5042                 return -ENOMEM;
5043
5044         file->driver_priv = file_priv;
5045         file_priv->dev_priv = dev->dev_private;
5046         file_priv->file = file;
5047
5048         spin_lock_init(&file_priv->mm.lock);
5049         INIT_LIST_HEAD(&file_priv->mm.request_list);
5050         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5051                           i915_gem_file_idle_work_handler);
5052
5053         ret = i915_gem_context_open(dev, file);
5054         if (ret)
5055                 kfree(file_priv);
5056
5057         return ret;
5058 }
5059
5060 /**
5061  * i915_gem_track_fb - update frontbuffer tracking
5062  * old: current GEM buffer for the frontbuffer slots
5063  * new: new GEM buffer for the frontbuffer slots
5064  * frontbuffer_bits: bitmask of frontbuffer slots
5065  *
5066  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5067  * from @old and setting them in @new. Both @old and @new can be NULL.
5068  */
5069 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5070                        struct drm_i915_gem_object *new,
5071                        unsigned frontbuffer_bits)
5072 {
5073         if (old) {
5074                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5075                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5076                 old->frontbuffer_bits &= ~frontbuffer_bits;
5077         }
5078
5079         if (new) {
5080                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5081                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5082                 new->frontbuffer_bits |= frontbuffer_bits;
5083         }
5084 }
5085
5086 /* All the new VM stuff */
5087 unsigned long
5088 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5089                     struct i915_address_space *vm)
5090 {
5091         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5092         struct i915_vma *vma;
5093
5094         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5095
5096         list_for_each_entry(vma, &o->vma_list, vma_link) {
5097                 if (i915_is_ggtt(vma->vm) &&
5098                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5099                         continue;
5100                 if (vma->vm == vm)
5101                         return vma->node.start;
5102         }
5103
5104         WARN(1, "%s vma for this object not found.\n",
5105              i915_is_ggtt(vm) ? "global" : "ppgtt");
5106         return -1;
5107 }
5108
5109 unsigned long
5110 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5111                               const struct i915_ggtt_view *view)
5112 {
5113         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5114         struct i915_vma *vma;
5115
5116         list_for_each_entry(vma, &o->vma_list, vma_link)
5117                 if (vma->vm == ggtt &&
5118                     i915_ggtt_view_equal(&vma->ggtt_view, view))
5119                         return vma->node.start;
5120
5121         WARN(1, "global vma for this object not found.\n");
5122         return -1;
5123 }
5124
5125 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5126                         struct i915_address_space *vm)
5127 {
5128         struct i915_vma *vma;
5129
5130         list_for_each_entry(vma, &o->vma_list, vma_link) {
5131                 if (i915_is_ggtt(vma->vm) &&
5132                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5133                         continue;
5134                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5135                         return true;
5136         }
5137
5138         return false;
5139 }
5140
5141 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5142                                   const struct i915_ggtt_view *view)
5143 {
5144         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5145         struct i915_vma *vma;
5146
5147         list_for_each_entry(vma, &o->vma_list, vma_link)
5148                 if (vma->vm == ggtt &&
5149                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5150                     drm_mm_node_allocated(&vma->node))
5151                         return true;
5152
5153         return false;
5154 }
5155
5156 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5157 {
5158         struct i915_vma *vma;
5159
5160         list_for_each_entry(vma, &o->vma_list, vma_link)
5161                 if (drm_mm_node_allocated(&vma->node))
5162                         return true;
5163
5164         return false;
5165 }
5166
5167 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5168                                 struct i915_address_space *vm)
5169 {
5170         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5171         struct i915_vma *vma;
5172
5173         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5174
5175         BUG_ON(list_empty(&o->vma_list));
5176
5177         list_for_each_entry(vma, &o->vma_list, vma_link) {
5178                 if (i915_is_ggtt(vma->vm) &&
5179                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5180                         continue;
5181                 if (vma->vm == vm)
5182                         return vma->node.size;
5183         }
5184         return 0;
5185 }
5186
5187 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5188 {
5189         struct i915_vma *vma;
5190         list_for_each_entry(vma, &obj->vma_list, vma_link) {
5191                 if (i915_is_ggtt(vma->vm) &&
5192                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5193                         continue;
5194                 if (vma->pin_count > 0)
5195                         return true;
5196         }
5197         return false;
5198 }
5199