Merge commit 'ed30f24e8d07d30aa3e69d1f508f4d7bd2e8ea14' of git://git.linaro.org/landi...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43
44 #define TTM_ASSERT_LOCKED(param)
45 #define TTM_DEBUG(fmt, arg...)
46 #define TTM_BO_HASH_ORDER 13
47
48 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52 static struct attribute ttm_bo_count = {
53         .name = "bo_count",
54         .mode = S_IRUGO
55 };
56
57 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
58 {
59         int i;
60
61         for (i = 0; i <= TTM_PL_PRIV5; i++)
62                 if (flags & (1 << i)) {
63                         *mem_type = i;
64                         return 0;
65                 }
66         return -EINVAL;
67 }
68
69 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
70 {
71         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
72
73         pr_err("    has_type: %d\n", man->has_type);
74         pr_err("    use_type: %d\n", man->use_type);
75         pr_err("    flags: 0x%08X\n", man->flags);
76         pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
77         pr_err("    size: %llu\n", man->size);
78         pr_err("    available_caching: 0x%08X\n", man->available_caching);
79         pr_err("    default_caching: 0x%08X\n", man->default_caching);
80         if (mem_type != TTM_PL_SYSTEM)
81                 (*man->func->debug)(man, TTM_PFX);
82 }
83
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85                                         struct ttm_placement *placement)
86 {
87         int i, ret, mem_type;
88
89         pr_err("No space for %p (%lu pages, %luK, %luM)\n",
90                bo, bo->mem.num_pages, bo->mem.size >> 10,
91                bo->mem.size >> 20);
92         for (i = 0; i < placement->num_placement; i++) {
93                 ret = ttm_mem_type_from_flags(placement->placement[i],
94                                                 &mem_type);
95                 if (ret)
96                         return;
97                 pr_err("  placement[%d]=0x%08X (%d)\n",
98                        i, placement->placement[i], mem_type);
99                 ttm_mem_type_debug(bo->bdev, mem_type);
100         }
101 }
102
103 static ssize_t ttm_bo_global_show(struct kobject *kobj,
104                                   struct attribute *attr,
105                                   char *buffer)
106 {
107         struct ttm_bo_global *glob =
108                 container_of(kobj, struct ttm_bo_global, kobj);
109
110         return snprintf(buffer, PAGE_SIZE, "%lu\n",
111                         (unsigned long) atomic_read(&glob->bo_count));
112 }
113
114 static struct attribute *ttm_bo_global_attrs[] = {
115         &ttm_bo_count,
116         NULL
117 };
118
119 static const struct sysfs_ops ttm_bo_global_ops = {
120         .show = &ttm_bo_global_show
121 };
122
123 static struct kobj_type ttm_bo_glob_kobj_type  = {
124         .release = &ttm_bo_global_kobj_release,
125         .sysfs_ops = &ttm_bo_global_ops,
126         .default_attrs = ttm_bo_global_attrs
127 };
128
129
130 static inline uint32_t ttm_bo_type_flags(unsigned type)
131 {
132         return 1 << (type);
133 }
134
135 static void ttm_bo_release_list(struct kref *list_kref)
136 {
137         struct ttm_buffer_object *bo =
138             container_of(list_kref, struct ttm_buffer_object, list_kref);
139         struct ttm_bo_device *bdev = bo->bdev;
140         size_t acc_size = bo->acc_size;
141
142         BUG_ON(atomic_read(&bo->list_kref.refcount));
143         BUG_ON(atomic_read(&bo->kref.refcount));
144         BUG_ON(atomic_read(&bo->cpu_writers));
145         BUG_ON(bo->sync_obj != NULL);
146         BUG_ON(bo->mem.mm_node != NULL);
147         BUG_ON(!list_empty(&bo->lru));
148         BUG_ON(!list_empty(&bo->ddestroy));
149
150         if (bo->ttm)
151                 ttm_tt_destroy(bo->ttm);
152         atomic_dec(&bo->glob->bo_count);
153         if (bo->destroy)
154                 bo->destroy(bo);
155         else {
156                 kfree(bo);
157         }
158         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159 }
160
161 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162                                   bool interruptible)
163 {
164         if (interruptible) {
165                 return wait_event_interruptible(bo->event_queue,
166                                                !ttm_bo_is_reserved(bo));
167         } else {
168                 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
169                 return 0;
170         }
171 }
172
173 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174 {
175         struct ttm_bo_device *bdev = bo->bdev;
176         struct ttm_mem_type_manager *man;
177
178         BUG_ON(!ttm_bo_is_reserved(bo));
179
180         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
181
182                 BUG_ON(!list_empty(&bo->lru));
183
184                 man = &bdev->man[bo->mem.mem_type];
185                 list_add_tail(&bo->lru, &man->lru);
186                 kref_get(&bo->list_kref);
187
188                 if (bo->ttm != NULL) {
189                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
190                         kref_get(&bo->list_kref);
191                 }
192         }
193 }
194
195 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
196 {
197         int put_count = 0;
198
199         if (!list_empty(&bo->swap)) {
200                 list_del_init(&bo->swap);
201                 ++put_count;
202         }
203         if (!list_empty(&bo->lru)) {
204                 list_del_init(&bo->lru);
205                 ++put_count;
206         }
207
208         /*
209          * TODO: Add a driver hook to delete from
210          * driver-specific LRU's here.
211          */
212
213         return put_count;
214 }
215
216 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217                           bool interruptible,
218                           bool no_wait, bool use_sequence, uint32_t sequence)
219 {
220         int ret;
221
222         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
223                 /**
224                  * Deadlock avoidance for multi-bo reserving.
225                  */
226                 if (use_sequence && bo->seq_valid) {
227                         /**
228                          * We've already reserved this one.
229                          */
230                         if (unlikely(sequence == bo->val_seq))
231                                 return -EDEADLK;
232                         /**
233                          * Already reserved by a thread that will not back
234                          * off for us. We need to back off.
235                          */
236                         if (unlikely(sequence - bo->val_seq < (1 << 31)))
237                                 return -EAGAIN;
238                 }
239
240                 if (no_wait)
241                         return -EBUSY;
242
243                 ret = ttm_bo_wait_unreserved(bo, interruptible);
244
245                 if (unlikely(ret))
246                         return ret;
247         }
248
249         if (use_sequence) {
250                 bool wake_up = false;
251                 /**
252                  * Wake up waiters that may need to recheck for deadlock,
253                  * if we decreased the sequence number.
254                  */
255                 if (unlikely((bo->val_seq - sequence < (1 << 31))
256                              || !bo->seq_valid))
257                         wake_up = true;
258
259                 /*
260                  * In the worst case with memory ordering these values can be
261                  * seen in the wrong order. However since we call wake_up_all
262                  * in that case, this will hopefully not pose a problem,
263                  * and the worst case would only cause someone to accidentally
264                  * hit -EAGAIN in ttm_bo_reserve when they see old value of
265                  * val_seq. However this would only happen if seq_valid was
266                  * written before val_seq was, and just means some slightly
267                  * increased cpu usage
268                  */
269                 bo->val_seq = sequence;
270                 bo->seq_valid = true;
271                 if (wake_up)
272                         wake_up_all(&bo->event_queue);
273         } else {
274                 bo->seq_valid = false;
275         }
276
277         return 0;
278 }
279 EXPORT_SYMBOL(ttm_bo_reserve);
280
281 static void ttm_bo_ref_bug(struct kref *list_kref)
282 {
283         BUG();
284 }
285
286 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
287                          bool never_free)
288 {
289         kref_sub(&bo->list_kref, count,
290                  (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
291 }
292
293 int ttm_bo_reserve(struct ttm_buffer_object *bo,
294                    bool interruptible,
295                    bool no_wait, bool use_sequence, uint32_t sequence)
296 {
297         struct ttm_bo_global *glob = bo->glob;
298         int put_count = 0;
299         int ret;
300
301         ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
302                                    sequence);
303         if (likely(ret == 0)) {
304                 spin_lock(&glob->lru_lock);
305                 put_count = ttm_bo_del_from_lru(bo);
306                 spin_unlock(&glob->lru_lock);
307                 ttm_bo_list_ref_sub(bo, put_count, true);
308         }
309
310         return ret;
311 }
312
313 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314                                   bool interruptible, uint32_t sequence)
315 {
316         bool wake_up = false;
317         int ret;
318
319         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320                 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
321
322                 ret = ttm_bo_wait_unreserved(bo, interruptible);
323
324                 if (unlikely(ret))
325                         return ret;
326         }
327
328         if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
329                 wake_up = true;
330
331         /**
332          * Wake up waiters that may need to recheck for deadlock,
333          * if we decreased the sequence number.
334          */
335         bo->val_seq = sequence;
336         bo->seq_valid = true;
337         if (wake_up)
338                 wake_up_all(&bo->event_queue);
339
340         return 0;
341 }
342
343 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344                             bool interruptible, uint32_t sequence)
345 {
346         struct ttm_bo_global *glob = bo->glob;
347         int put_count, ret;
348
349         ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
350         if (likely(!ret)) {
351                 spin_lock(&glob->lru_lock);
352                 put_count = ttm_bo_del_from_lru(bo);
353                 spin_unlock(&glob->lru_lock);
354                 ttm_bo_list_ref_sub(bo, put_count, true);
355         }
356         return ret;
357 }
358 EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359
360 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
361 {
362         ttm_bo_add_to_lru(bo);
363         atomic_set(&bo->reserved, 0);
364         wake_up_all(&bo->event_queue);
365 }
366
367 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
368 {
369         struct ttm_bo_global *glob = bo->glob;
370
371         spin_lock(&glob->lru_lock);
372         ttm_bo_unreserve_locked(bo);
373         spin_unlock(&glob->lru_lock);
374 }
375 EXPORT_SYMBOL(ttm_bo_unreserve);
376
377 /*
378  * Call bo->mutex locked.
379  */
380 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
381 {
382         struct ttm_bo_device *bdev = bo->bdev;
383         struct ttm_bo_global *glob = bo->glob;
384         int ret = 0;
385         uint32_t page_flags = 0;
386
387         TTM_ASSERT_LOCKED(&bo->mutex);
388         bo->ttm = NULL;
389
390         if (bdev->need_dma32)
391                 page_flags |= TTM_PAGE_FLAG_DMA32;
392
393         switch (bo->type) {
394         case ttm_bo_type_device:
395                 if (zero_alloc)
396                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
397         case ttm_bo_type_kernel:
398                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
399                                                       page_flags, glob->dummy_read_page);
400                 if (unlikely(bo->ttm == NULL))
401                         ret = -ENOMEM;
402                 break;
403         case ttm_bo_type_sg:
404                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
405                                                       page_flags | TTM_PAGE_FLAG_SG,
406                                                       glob->dummy_read_page);
407                 if (unlikely(bo->ttm == NULL)) {
408                         ret = -ENOMEM;
409                         break;
410                 }
411                 bo->ttm->sg = bo->sg;
412                 break;
413         default:
414                 pr_err("Illegal buffer object type\n");
415                 ret = -EINVAL;
416                 break;
417         }
418
419         return ret;
420 }
421
422 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
423                                   struct ttm_mem_reg *mem,
424                                   bool evict, bool interruptible,
425                                   bool no_wait_gpu)
426 {
427         struct ttm_bo_device *bdev = bo->bdev;
428         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
429         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
430         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
431         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
432         int ret = 0;
433
434         if (old_is_pci || new_is_pci ||
435             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
436                 ret = ttm_mem_io_lock(old_man, true);
437                 if (unlikely(ret != 0))
438                         goto out_err;
439                 ttm_bo_unmap_virtual_locked(bo);
440                 ttm_mem_io_unlock(old_man);
441         }
442
443         /*
444          * Create and bind a ttm if required.
445          */
446
447         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
448                 if (bo->ttm == NULL) {
449                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
450                         ret = ttm_bo_add_ttm(bo, zero);
451                         if (ret)
452                                 goto out_err;
453                 }
454
455                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
456                 if (ret)
457                         goto out_err;
458
459                 if (mem->mem_type != TTM_PL_SYSTEM) {
460                         ret = ttm_tt_bind(bo->ttm, mem);
461                         if (ret)
462                                 goto out_err;
463                 }
464
465                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
466                         if (bdev->driver->move_notify)
467                                 bdev->driver->move_notify(bo, mem);
468                         bo->mem = *mem;
469                         mem->mm_node = NULL;
470                         goto moved;
471                 }
472         }
473
474         if (bdev->driver->move_notify)
475                 bdev->driver->move_notify(bo, mem);
476
477         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
478             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
479                 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
480         else if (bdev->driver->move)
481                 ret = bdev->driver->move(bo, evict, interruptible,
482                                          no_wait_gpu, mem);
483         else
484                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
485
486         if (ret) {
487                 if (bdev->driver->move_notify) {
488                         struct ttm_mem_reg tmp_mem = *mem;
489                         *mem = bo->mem;
490                         bo->mem = tmp_mem;
491                         bdev->driver->move_notify(bo, mem);
492                         bo->mem = *mem;
493                         *mem = tmp_mem;
494                 }
495
496                 goto out_err;
497         }
498
499 moved:
500         if (bo->evicted) {
501                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
502                 if (ret)
503                         pr_err("Can not flush read caches\n");
504                 bo->evicted = false;
505         }
506
507         if (bo->mem.mm_node) {
508                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
509                     bdev->man[bo->mem.mem_type].gpu_offset;
510                 bo->cur_placement = bo->mem.placement;
511         } else
512                 bo->offset = 0;
513
514         return 0;
515
516 out_err:
517         new_man = &bdev->man[bo->mem.mem_type];
518         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
519                 ttm_tt_unbind(bo->ttm);
520                 ttm_tt_destroy(bo->ttm);
521                 bo->ttm = NULL;
522         }
523
524         return ret;
525 }
526
527 /**
528  * Call bo::reserved.
529  * Will release GPU memory type usage on destruction.
530  * This is the place to put in driver specific hooks to release
531  * driver private resources.
532  * Will release the bo::reserved lock.
533  */
534
535 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
536 {
537         if (bo->bdev->driver->move_notify)
538                 bo->bdev->driver->move_notify(bo, NULL);
539
540         if (bo->ttm) {
541                 ttm_tt_unbind(bo->ttm);
542                 ttm_tt_destroy(bo->ttm);
543                 bo->ttm = NULL;
544         }
545         ttm_bo_mem_put(bo, &bo->mem);
546
547         atomic_set(&bo->reserved, 0);
548         wake_up_all(&bo->event_queue);
549
550         /*
551          * Since the final reference to this bo may not be dropped by
552          * the current task we have to put a memory barrier here to make
553          * sure the changes done in this function are always visible.
554          *
555          * This function only needs protection against the final kref_put.
556          */
557         smp_mb__before_atomic_dec();
558 }
559
560 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
561 {
562         struct ttm_bo_device *bdev = bo->bdev;
563         struct ttm_bo_global *glob = bo->glob;
564         struct ttm_bo_driver *driver = bdev->driver;
565         void *sync_obj = NULL;
566         int put_count;
567         int ret;
568
569         spin_lock(&glob->lru_lock);
570         ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
571
572         spin_lock(&bdev->fence_lock);
573         (void) ttm_bo_wait(bo, false, false, true);
574         if (!ret && !bo->sync_obj) {
575                 spin_unlock(&bdev->fence_lock);
576                 put_count = ttm_bo_del_from_lru(bo);
577
578                 spin_unlock(&glob->lru_lock);
579                 ttm_bo_cleanup_memtype_use(bo);
580
581                 ttm_bo_list_ref_sub(bo, put_count, true);
582
583                 return;
584         }
585         if (bo->sync_obj)
586                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
587         spin_unlock(&bdev->fence_lock);
588
589         if (!ret) {
590                 atomic_set(&bo->reserved, 0);
591                 wake_up_all(&bo->event_queue);
592         }
593
594         kref_get(&bo->list_kref);
595         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
596         spin_unlock(&glob->lru_lock);
597
598         if (sync_obj) {
599                 driver->sync_obj_flush(sync_obj);
600                 driver->sync_obj_unref(&sync_obj);
601         }
602         schedule_delayed_work(&bdev->wq,
603                               ((HZ / 100) < 1) ? 1 : HZ / 100);
604 }
605
606 /**
607  * function ttm_bo_cleanup_refs_and_unlock
608  * If bo idle, remove from delayed- and lru lists, and unref.
609  * If not idle, do nothing.
610  *
611  * Must be called with lru_lock and reservation held, this function
612  * will drop both before returning.
613  *
614  * @interruptible         Any sleeps should occur interruptibly.
615  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
616  */
617
618 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
619                                           bool interruptible,
620                                           bool no_wait_gpu)
621 {
622         struct ttm_bo_device *bdev = bo->bdev;
623         struct ttm_bo_driver *driver = bdev->driver;
624         struct ttm_bo_global *glob = bo->glob;
625         int put_count;
626         int ret;
627
628         spin_lock(&bdev->fence_lock);
629         ret = ttm_bo_wait(bo, false, false, true);
630
631         if (ret && !no_wait_gpu) {
632                 void *sync_obj;
633
634                 /*
635                  * Take a reference to the fence and unreserve,
636                  * at this point the buffer should be dead, so
637                  * no new sync objects can be attached.
638                  */
639                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
640                 spin_unlock(&bdev->fence_lock);
641
642                 atomic_set(&bo->reserved, 0);
643                 wake_up_all(&bo->event_queue);
644                 spin_unlock(&glob->lru_lock);
645
646                 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
647                 driver->sync_obj_unref(&sync_obj);
648                 if (ret)
649                         return ret;
650
651                 /*
652                  * remove sync_obj with ttm_bo_wait, the wait should be
653                  * finished, and no new wait object should have been added.
654                  */
655                 spin_lock(&bdev->fence_lock);
656                 ret = ttm_bo_wait(bo, false, false, true);
657                 WARN_ON(ret);
658                 spin_unlock(&bdev->fence_lock);
659                 if (ret)
660                         return ret;
661
662                 spin_lock(&glob->lru_lock);
663                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
664
665                 /*
666                  * We raced, and lost, someone else holds the reservation now,
667                  * and is probably busy in ttm_bo_cleanup_memtype_use.
668                  *
669                  * Even if it's not the case, because we finished waiting any
670                  * delayed destruction would succeed, so just return success
671                  * here.
672                  */
673                 if (ret) {
674                         spin_unlock(&glob->lru_lock);
675                         return 0;
676                 }
677         } else
678                 spin_unlock(&bdev->fence_lock);
679
680         if (ret || unlikely(list_empty(&bo->ddestroy))) {
681                 atomic_set(&bo->reserved, 0);
682                 wake_up_all(&bo->event_queue);
683                 spin_unlock(&glob->lru_lock);
684                 return ret;
685         }
686
687         put_count = ttm_bo_del_from_lru(bo);
688         list_del_init(&bo->ddestroy);
689         ++put_count;
690
691         spin_unlock(&glob->lru_lock);
692         ttm_bo_cleanup_memtype_use(bo);
693
694         ttm_bo_list_ref_sub(bo, put_count, true);
695
696         return 0;
697 }
698
699 /**
700  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
701  * encountered buffers.
702  */
703
704 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
705 {
706         struct ttm_bo_global *glob = bdev->glob;
707         struct ttm_buffer_object *entry = NULL;
708         int ret = 0;
709
710         spin_lock(&glob->lru_lock);
711         if (list_empty(&bdev->ddestroy))
712                 goto out_unlock;
713
714         entry = list_first_entry(&bdev->ddestroy,
715                 struct ttm_buffer_object, ddestroy);
716         kref_get(&entry->list_kref);
717
718         for (;;) {
719                 struct ttm_buffer_object *nentry = NULL;
720
721                 if (entry->ddestroy.next != &bdev->ddestroy) {
722                         nentry = list_first_entry(&entry->ddestroy,
723                                 struct ttm_buffer_object, ddestroy);
724                         kref_get(&nentry->list_kref);
725                 }
726
727                 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
728                 if (remove_all && ret) {
729                         spin_unlock(&glob->lru_lock);
730                         ret = ttm_bo_reserve_nolru(entry, false, false,
731                                                    false, 0);
732                         spin_lock(&glob->lru_lock);
733                 }
734
735                 if (!ret)
736                         ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
737                                                              !remove_all);
738                 else
739                         spin_unlock(&glob->lru_lock);
740
741                 kref_put(&entry->list_kref, ttm_bo_release_list);
742                 entry = nentry;
743
744                 if (ret || !entry)
745                         goto out;
746
747                 spin_lock(&glob->lru_lock);
748                 if (list_empty(&entry->ddestroy))
749                         break;
750         }
751
752 out_unlock:
753         spin_unlock(&glob->lru_lock);
754 out:
755         if (entry)
756                 kref_put(&entry->list_kref, ttm_bo_release_list);
757         return ret;
758 }
759
760 static void ttm_bo_delayed_workqueue(struct work_struct *work)
761 {
762         struct ttm_bo_device *bdev =
763             container_of(work, struct ttm_bo_device, wq.work);
764
765         if (ttm_bo_delayed_delete(bdev, false)) {
766                 schedule_delayed_work(&bdev->wq,
767                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
768         }
769 }
770
771 static void ttm_bo_release(struct kref *kref)
772 {
773         struct ttm_buffer_object *bo =
774             container_of(kref, struct ttm_buffer_object, kref);
775         struct ttm_bo_device *bdev = bo->bdev;
776         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
777
778         write_lock(&bdev->vm_lock);
779         if (likely(bo->vm_node != NULL)) {
780                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
781                 drm_mm_put_block(bo->vm_node);
782                 bo->vm_node = NULL;
783         }
784         write_unlock(&bdev->vm_lock);
785         ttm_mem_io_lock(man, false);
786         ttm_mem_io_free_vm(bo);
787         ttm_mem_io_unlock(man);
788         ttm_bo_cleanup_refs_or_queue(bo);
789         kref_put(&bo->list_kref, ttm_bo_release_list);
790 }
791
792 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
793 {
794         struct ttm_buffer_object *bo = *p_bo;
795
796         *p_bo = NULL;
797         kref_put(&bo->kref, ttm_bo_release);
798 }
799 EXPORT_SYMBOL(ttm_bo_unref);
800
801 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
802 {
803         return cancel_delayed_work_sync(&bdev->wq);
804 }
805 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
806
807 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
808 {
809         if (resched)
810                 schedule_delayed_work(&bdev->wq,
811                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
812 }
813 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
814
815 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
816                         bool no_wait_gpu)
817 {
818         struct ttm_bo_device *bdev = bo->bdev;
819         struct ttm_mem_reg evict_mem;
820         struct ttm_placement placement;
821         int ret = 0;
822
823         spin_lock(&bdev->fence_lock);
824         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
825         spin_unlock(&bdev->fence_lock);
826
827         if (unlikely(ret != 0)) {
828                 if (ret != -ERESTARTSYS) {
829                         pr_err("Failed to expire sync object before buffer eviction\n");
830                 }
831                 goto out;
832         }
833
834         BUG_ON(!ttm_bo_is_reserved(bo));
835
836         evict_mem = bo->mem;
837         evict_mem.mm_node = NULL;
838         evict_mem.bus.io_reserved_vm = false;
839         evict_mem.bus.io_reserved_count = 0;
840
841         placement.fpfn = 0;
842         placement.lpfn = 0;
843         placement.num_placement = 0;
844         placement.num_busy_placement = 0;
845         bdev->driver->evict_flags(bo, &placement);
846         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
847                                 no_wait_gpu);
848         if (ret) {
849                 if (ret != -ERESTARTSYS) {
850                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
851                                bo);
852                         ttm_bo_mem_space_debug(bo, &placement);
853                 }
854                 goto out;
855         }
856
857         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
858                                      no_wait_gpu);
859         if (ret) {
860                 if (ret != -ERESTARTSYS)
861                         pr_err("Buffer eviction failed\n");
862                 ttm_bo_mem_put(bo, &evict_mem);
863                 goto out;
864         }
865         bo->evicted = true;
866 out:
867         return ret;
868 }
869
870 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
871                                 uint32_t mem_type,
872                                 bool interruptible,
873                                 bool no_wait_gpu)
874 {
875         struct ttm_bo_global *glob = bdev->glob;
876         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
877         struct ttm_buffer_object *bo;
878         int ret = -EBUSY, put_count;
879
880         spin_lock(&glob->lru_lock);
881         list_for_each_entry(bo, &man->lru, lru) {
882                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
883                 if (!ret)
884                         break;
885         }
886
887         if (ret) {
888                 spin_unlock(&glob->lru_lock);
889                 return ret;
890         }
891
892         kref_get(&bo->list_kref);
893
894         if (!list_empty(&bo->ddestroy)) {
895                 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
896                                                      no_wait_gpu);
897                 kref_put(&bo->list_kref, ttm_bo_release_list);
898                 return ret;
899         }
900
901         put_count = ttm_bo_del_from_lru(bo);
902         spin_unlock(&glob->lru_lock);
903
904         BUG_ON(ret != 0);
905
906         ttm_bo_list_ref_sub(bo, put_count, true);
907
908         ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
909         ttm_bo_unreserve(bo);
910
911         kref_put(&bo->list_kref, ttm_bo_release_list);
912         return ret;
913 }
914
915 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
916 {
917         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
918
919         if (mem->mm_node)
920                 (*man->func->put_node)(man, mem);
921 }
922 EXPORT_SYMBOL(ttm_bo_mem_put);
923
924 /**
925  * Repeatedly evict memory from the LRU for @mem_type until we create enough
926  * space, or we've evicted everything and there isn't enough space.
927  */
928 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
929                                         uint32_t mem_type,
930                                         struct ttm_placement *placement,
931                                         struct ttm_mem_reg *mem,
932                                         bool interruptible,
933                                         bool no_wait_gpu)
934 {
935         struct ttm_bo_device *bdev = bo->bdev;
936         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
937         int ret;
938
939         do {
940                 ret = (*man->func->get_node)(man, bo, placement, mem);
941                 if (unlikely(ret != 0))
942                         return ret;
943                 if (mem->mm_node)
944                         break;
945                 ret = ttm_mem_evict_first(bdev, mem_type,
946                                           interruptible, no_wait_gpu);
947                 if (unlikely(ret != 0))
948                         return ret;
949         } while (1);
950         if (mem->mm_node == NULL)
951                 return -ENOMEM;
952         mem->mem_type = mem_type;
953         return 0;
954 }
955
956 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
957                                       uint32_t cur_placement,
958                                       uint32_t proposed_placement)
959 {
960         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
961         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
962
963         /**
964          * Keep current caching if possible.
965          */
966
967         if ((cur_placement & caching) != 0)
968                 result |= (cur_placement & caching);
969         else if ((man->default_caching & caching) != 0)
970                 result |= man->default_caching;
971         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
972                 result |= TTM_PL_FLAG_CACHED;
973         else if ((TTM_PL_FLAG_WC & caching) != 0)
974                 result |= TTM_PL_FLAG_WC;
975         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
976                 result |= TTM_PL_FLAG_UNCACHED;
977
978         return result;
979 }
980
981 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
982                                  uint32_t mem_type,
983                                  uint32_t proposed_placement,
984                                  uint32_t *masked_placement)
985 {
986         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
987
988         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
989                 return false;
990
991         if ((proposed_placement & man->available_caching) == 0)
992                 return false;
993
994         cur_flags |= (proposed_placement & man->available_caching);
995
996         *masked_placement = cur_flags;
997         return true;
998 }
999
1000 /**
1001  * Creates space for memory region @mem according to its type.
1002  *
1003  * This function first searches for free space in compatible memory types in
1004  * the priority order defined by the driver.  If free space isn't found, then
1005  * ttm_bo_mem_force_space is attempted in priority order to evict and find
1006  * space.
1007  */
1008 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1009                         struct ttm_placement *placement,
1010                         struct ttm_mem_reg *mem,
1011                         bool interruptible,
1012                         bool no_wait_gpu)
1013 {
1014         struct ttm_bo_device *bdev = bo->bdev;
1015         struct ttm_mem_type_manager *man;
1016         uint32_t mem_type = TTM_PL_SYSTEM;
1017         uint32_t cur_flags = 0;
1018         bool type_found = false;
1019         bool type_ok = false;
1020         bool has_erestartsys = false;
1021         int i, ret;
1022
1023         mem->mm_node = NULL;
1024         for (i = 0; i < placement->num_placement; ++i) {
1025                 ret = ttm_mem_type_from_flags(placement->placement[i],
1026                                                 &mem_type);
1027                 if (ret)
1028                         return ret;
1029                 man = &bdev->man[mem_type];
1030
1031                 type_ok = ttm_bo_mt_compatible(man,
1032                                                 mem_type,
1033                                                 placement->placement[i],
1034                                                 &cur_flags);
1035
1036                 if (!type_ok)
1037                         continue;
1038
1039                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1040                                                   cur_flags);
1041                 /*
1042                  * Use the access and other non-mapping-related flag bits from
1043                  * the memory placement flags to the current flags
1044                  */
1045                 ttm_flag_masked(&cur_flags, placement->placement[i],
1046                                 ~TTM_PL_MASK_MEMTYPE);
1047
1048                 if (mem_type == TTM_PL_SYSTEM)
1049                         break;
1050
1051                 if (man->has_type && man->use_type) {
1052                         type_found = true;
1053                         ret = (*man->func->get_node)(man, bo, placement, mem);
1054                         if (unlikely(ret))
1055                                 return ret;
1056                 }
1057                 if (mem->mm_node)
1058                         break;
1059         }
1060
1061         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1062                 mem->mem_type = mem_type;
1063                 mem->placement = cur_flags;
1064                 return 0;
1065         }
1066
1067         if (!type_found)
1068                 return -EINVAL;
1069
1070         for (i = 0; i < placement->num_busy_placement; ++i) {
1071                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1072                                                 &mem_type);
1073                 if (ret)
1074                         return ret;
1075                 man = &bdev->man[mem_type];
1076                 if (!man->has_type)
1077                         continue;
1078                 if (!ttm_bo_mt_compatible(man,
1079                                                 mem_type,
1080                                                 placement->busy_placement[i],
1081                                                 &cur_flags))
1082                         continue;
1083
1084                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1085                                                   cur_flags);
1086                 /*
1087                  * Use the access and other non-mapping-related flag bits from
1088                  * the memory placement flags to the current flags
1089                  */
1090                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1091                                 ~TTM_PL_MASK_MEMTYPE);
1092
1093
1094                 if (mem_type == TTM_PL_SYSTEM) {
1095                         mem->mem_type = mem_type;
1096                         mem->placement = cur_flags;
1097                         mem->mm_node = NULL;
1098                         return 0;
1099                 }
1100
1101                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1102                                                 interruptible, no_wait_gpu);
1103                 if (ret == 0 && mem->mm_node) {
1104                         mem->placement = cur_flags;
1105                         return 0;
1106                 }
1107                 if (ret == -ERESTARTSYS)
1108                         has_erestartsys = true;
1109         }
1110         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1111         return ret;
1112 }
1113 EXPORT_SYMBOL(ttm_bo_mem_space);
1114
1115 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1116                         struct ttm_placement *placement,
1117                         bool interruptible,
1118                         bool no_wait_gpu)
1119 {
1120         int ret = 0;
1121         struct ttm_mem_reg mem;
1122         struct ttm_bo_device *bdev = bo->bdev;
1123
1124         BUG_ON(!ttm_bo_is_reserved(bo));
1125
1126         /*
1127          * FIXME: It's possible to pipeline buffer moves.
1128          * Have the driver move function wait for idle when necessary,
1129          * instead of doing it here.
1130          */
1131         spin_lock(&bdev->fence_lock);
1132         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1133         spin_unlock(&bdev->fence_lock);
1134         if (ret)
1135                 return ret;
1136         mem.num_pages = bo->num_pages;
1137         mem.size = mem.num_pages << PAGE_SHIFT;
1138         mem.page_alignment = bo->mem.page_alignment;
1139         mem.bus.io_reserved_vm = false;
1140         mem.bus.io_reserved_count = 0;
1141         /*
1142          * Determine where to move the buffer.
1143          */
1144         ret = ttm_bo_mem_space(bo, placement, &mem,
1145                                interruptible, no_wait_gpu);
1146         if (ret)
1147                 goto out_unlock;
1148         ret = ttm_bo_handle_move_mem(bo, &mem, false,
1149                                      interruptible, no_wait_gpu);
1150 out_unlock:
1151         if (ret && mem.mm_node)
1152                 ttm_bo_mem_put(bo, &mem);
1153         return ret;
1154 }
1155
1156 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1157                              struct ttm_mem_reg *mem)
1158 {
1159         int i;
1160
1161         if (mem->mm_node && placement->lpfn != 0 &&
1162             (mem->start < placement->fpfn ||
1163              mem->start + mem->num_pages > placement->lpfn))
1164                 return -1;
1165
1166         for (i = 0; i < placement->num_placement; i++) {
1167                 if ((placement->placement[i] & mem->placement &
1168                         TTM_PL_MASK_CACHING) &&
1169                         (placement->placement[i] & mem->placement &
1170                         TTM_PL_MASK_MEM))
1171                         return i;
1172         }
1173         return -1;
1174 }
1175
1176 int ttm_bo_validate(struct ttm_buffer_object *bo,
1177                         struct ttm_placement *placement,
1178                         bool interruptible,
1179                         bool no_wait_gpu)
1180 {
1181         int ret;
1182
1183         BUG_ON(!ttm_bo_is_reserved(bo));
1184         /* Check that range is valid */
1185         if (placement->lpfn || placement->fpfn)
1186                 if (placement->fpfn > placement->lpfn ||
1187                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1188                         return -EINVAL;
1189         /*
1190          * Check whether we need to move buffer.
1191          */
1192         ret = ttm_bo_mem_compat(placement, &bo->mem);
1193         if (ret < 0) {
1194                 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1195                                          no_wait_gpu);
1196                 if (ret)
1197                         return ret;
1198         } else {
1199                 /*
1200                  * Use the access and other non-mapping-related flag bits from
1201                  * the compatible memory placement flags to the active flags
1202                  */
1203                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1204                                 ~TTM_PL_MASK_MEMTYPE);
1205         }
1206         /*
1207          * We might need to add a TTM.
1208          */
1209         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1210                 ret = ttm_bo_add_ttm(bo, true);
1211                 if (ret)
1212                         return ret;
1213         }
1214         return 0;
1215 }
1216 EXPORT_SYMBOL(ttm_bo_validate);
1217
1218 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1219                                 struct ttm_placement *placement)
1220 {
1221         BUG_ON((placement->fpfn || placement->lpfn) &&
1222                (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1223
1224         return 0;
1225 }
1226
1227 int ttm_bo_init(struct ttm_bo_device *bdev,
1228                 struct ttm_buffer_object *bo,
1229                 unsigned long size,
1230                 enum ttm_bo_type type,
1231                 struct ttm_placement *placement,
1232                 uint32_t page_alignment,
1233                 bool interruptible,
1234                 struct file *persistent_swap_storage,
1235                 size_t acc_size,
1236                 struct sg_table *sg,
1237                 void (*destroy) (struct ttm_buffer_object *))
1238 {
1239         int ret = 0;
1240         unsigned long num_pages;
1241         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1242
1243         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1244         if (ret) {
1245                 pr_err("Out of kernel memory\n");
1246                 if (destroy)
1247                         (*destroy)(bo);
1248                 else
1249                         kfree(bo);
1250                 return -ENOMEM;
1251         }
1252
1253         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1254         if (num_pages == 0) {
1255                 pr_err("Illegal buffer object size\n");
1256                 if (destroy)
1257                         (*destroy)(bo);
1258                 else
1259                         kfree(bo);
1260                 ttm_mem_global_free(mem_glob, acc_size);
1261                 return -EINVAL;
1262         }
1263         bo->destroy = destroy;
1264
1265         kref_init(&bo->kref);
1266         kref_init(&bo->list_kref);
1267         atomic_set(&bo->cpu_writers, 0);
1268         atomic_set(&bo->reserved, 1);
1269         init_waitqueue_head(&bo->event_queue);
1270         INIT_LIST_HEAD(&bo->lru);
1271         INIT_LIST_HEAD(&bo->ddestroy);
1272         INIT_LIST_HEAD(&bo->swap);
1273         INIT_LIST_HEAD(&bo->io_reserve_lru);
1274         bo->bdev = bdev;
1275         bo->glob = bdev->glob;
1276         bo->type = type;
1277         bo->num_pages = num_pages;
1278         bo->mem.size = num_pages << PAGE_SHIFT;
1279         bo->mem.mem_type = TTM_PL_SYSTEM;
1280         bo->mem.num_pages = bo->num_pages;
1281         bo->mem.mm_node = NULL;
1282         bo->mem.page_alignment = page_alignment;
1283         bo->mem.bus.io_reserved_vm = false;
1284         bo->mem.bus.io_reserved_count = 0;
1285         bo->priv_flags = 0;
1286         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1287         bo->seq_valid = false;
1288         bo->persistent_swap_storage = persistent_swap_storage;
1289         bo->acc_size = acc_size;
1290         bo->sg = sg;
1291         atomic_inc(&bo->glob->bo_count);
1292
1293         ret = ttm_bo_check_placement(bo, placement);
1294         if (unlikely(ret != 0))
1295                 goto out_err;
1296
1297         /*
1298          * For ttm_bo_type_device buffers, allocate
1299          * address space from the device.
1300          */
1301         if (bo->type == ttm_bo_type_device ||
1302             bo->type == ttm_bo_type_sg) {
1303                 ret = ttm_bo_setup_vm(bo);
1304                 if (ret)
1305                         goto out_err;
1306         }
1307
1308         ret = ttm_bo_validate(bo, placement, interruptible, false);
1309         if (ret)
1310                 goto out_err;
1311
1312         ttm_bo_unreserve(bo);
1313         return 0;
1314
1315 out_err:
1316         ttm_bo_unreserve(bo);
1317         ttm_bo_unref(&bo);
1318
1319         return ret;
1320 }
1321 EXPORT_SYMBOL(ttm_bo_init);
1322
1323 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1324                        unsigned long bo_size,
1325                        unsigned struct_size)
1326 {
1327         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1328         size_t size = 0;
1329
1330         size += ttm_round_pot(struct_size);
1331         size += PAGE_ALIGN(npages * sizeof(void *));
1332         size += ttm_round_pot(sizeof(struct ttm_tt));
1333         return size;
1334 }
1335 EXPORT_SYMBOL(ttm_bo_acc_size);
1336
1337 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1338                            unsigned long bo_size,
1339                            unsigned struct_size)
1340 {
1341         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1342         size_t size = 0;
1343
1344         size += ttm_round_pot(struct_size);
1345         size += PAGE_ALIGN(npages * sizeof(void *));
1346         size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1347         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1348         return size;
1349 }
1350 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1351
1352 int ttm_bo_create(struct ttm_bo_device *bdev,
1353                         unsigned long size,
1354                         enum ttm_bo_type type,
1355                         struct ttm_placement *placement,
1356                         uint32_t page_alignment,
1357                         bool interruptible,
1358                         struct file *persistent_swap_storage,
1359                         struct ttm_buffer_object **p_bo)
1360 {
1361         struct ttm_buffer_object *bo;
1362         size_t acc_size;
1363         int ret;
1364
1365         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1366         if (unlikely(bo == NULL))
1367                 return -ENOMEM;
1368
1369         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1370         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1371                           interruptible, persistent_swap_storage, acc_size,
1372                           NULL, NULL);
1373         if (likely(ret == 0))
1374                 *p_bo = bo;
1375
1376         return ret;
1377 }
1378 EXPORT_SYMBOL(ttm_bo_create);
1379
1380 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1381                                         unsigned mem_type, bool allow_errors)
1382 {
1383         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1384         struct ttm_bo_global *glob = bdev->glob;
1385         int ret;
1386
1387         /*
1388          * Can't use standard list traversal since we're unlocking.
1389          */
1390
1391         spin_lock(&glob->lru_lock);
1392         while (!list_empty(&man->lru)) {
1393                 spin_unlock(&glob->lru_lock);
1394                 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1395                 if (ret) {
1396                         if (allow_errors) {
1397                                 return ret;
1398                         } else {
1399                                 pr_err("Cleanup eviction failed\n");
1400                         }
1401                 }
1402                 spin_lock(&glob->lru_lock);
1403         }
1404         spin_unlock(&glob->lru_lock);
1405         return 0;
1406 }
1407
1408 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1409 {
1410         struct ttm_mem_type_manager *man;
1411         int ret = -EINVAL;
1412
1413         if (mem_type >= TTM_NUM_MEM_TYPES) {
1414                 pr_err("Illegal memory type %d\n", mem_type);
1415                 return ret;
1416         }
1417         man = &bdev->man[mem_type];
1418
1419         if (!man->has_type) {
1420                 pr_err("Trying to take down uninitialized memory manager type %u\n",
1421                        mem_type);
1422                 return ret;
1423         }
1424
1425         man->use_type = false;
1426         man->has_type = false;
1427
1428         ret = 0;
1429         if (mem_type > 0) {
1430                 ttm_bo_force_list_clean(bdev, mem_type, false);
1431
1432                 ret = (*man->func->takedown)(man);
1433         }
1434
1435         return ret;
1436 }
1437 EXPORT_SYMBOL(ttm_bo_clean_mm);
1438
1439 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1440 {
1441         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1442
1443         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1444                 pr_err("Illegal memory manager memory type %u\n", mem_type);
1445                 return -EINVAL;
1446         }
1447
1448         if (!man->has_type) {
1449                 pr_err("Memory type %u has not been initialized\n", mem_type);
1450                 return 0;
1451         }
1452
1453         return ttm_bo_force_list_clean(bdev, mem_type, true);
1454 }
1455 EXPORT_SYMBOL(ttm_bo_evict_mm);
1456
1457 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1458                         unsigned long p_size)
1459 {
1460         int ret = -EINVAL;
1461         struct ttm_mem_type_manager *man;
1462
1463         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1464         man = &bdev->man[type];
1465         BUG_ON(man->has_type);
1466         man->io_reserve_fastpath = true;
1467         man->use_io_reserve_lru = false;
1468         mutex_init(&man->io_reserve_mutex);
1469         INIT_LIST_HEAD(&man->io_reserve_lru);
1470
1471         ret = bdev->driver->init_mem_type(bdev, type, man);
1472         if (ret)
1473                 return ret;
1474         man->bdev = bdev;
1475
1476         ret = 0;
1477         if (type != TTM_PL_SYSTEM) {
1478                 ret = (*man->func->init)(man, p_size);
1479                 if (ret)
1480                         return ret;
1481         }
1482         man->has_type = true;
1483         man->use_type = true;
1484         man->size = p_size;
1485
1486         INIT_LIST_HEAD(&man->lru);
1487
1488         return 0;
1489 }
1490 EXPORT_SYMBOL(ttm_bo_init_mm);
1491
1492 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1493 {
1494         struct ttm_bo_global *glob =
1495                 container_of(kobj, struct ttm_bo_global, kobj);
1496
1497         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1498         __free_page(glob->dummy_read_page);
1499         kfree(glob);
1500 }
1501
1502 void ttm_bo_global_release(struct drm_global_reference *ref)
1503 {
1504         struct ttm_bo_global *glob = ref->object;
1505
1506         kobject_del(&glob->kobj);
1507         kobject_put(&glob->kobj);
1508 }
1509 EXPORT_SYMBOL(ttm_bo_global_release);
1510
1511 int ttm_bo_global_init(struct drm_global_reference *ref)
1512 {
1513         struct ttm_bo_global_ref *bo_ref =
1514                 container_of(ref, struct ttm_bo_global_ref, ref);
1515         struct ttm_bo_global *glob = ref->object;
1516         int ret;
1517
1518         mutex_init(&glob->device_list_mutex);
1519         spin_lock_init(&glob->lru_lock);
1520         glob->mem_glob = bo_ref->mem_glob;
1521         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1522
1523         if (unlikely(glob->dummy_read_page == NULL)) {
1524                 ret = -ENOMEM;
1525                 goto out_no_drp;
1526         }
1527
1528         INIT_LIST_HEAD(&glob->swap_lru);
1529         INIT_LIST_HEAD(&glob->device_list);
1530
1531         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1532         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1533         if (unlikely(ret != 0)) {
1534                 pr_err("Could not register buffer object swapout\n");
1535                 goto out_no_shrink;
1536         }
1537
1538         atomic_set(&glob->bo_count, 0);
1539
1540         ret = kobject_init_and_add(
1541                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1542         if (unlikely(ret != 0))
1543                 kobject_put(&glob->kobj);
1544         return ret;
1545 out_no_shrink:
1546         __free_page(glob->dummy_read_page);
1547 out_no_drp:
1548         kfree(glob);
1549         return ret;
1550 }
1551 EXPORT_SYMBOL(ttm_bo_global_init);
1552
1553
1554 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1555 {
1556         int ret = 0;
1557         unsigned i = TTM_NUM_MEM_TYPES;
1558         struct ttm_mem_type_manager *man;
1559         struct ttm_bo_global *glob = bdev->glob;
1560
1561         while (i--) {
1562                 man = &bdev->man[i];
1563                 if (man->has_type) {
1564                         man->use_type = false;
1565                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1566                                 ret = -EBUSY;
1567                                 pr_err("DRM memory manager type %d is not clean\n",
1568                                        i);
1569                         }
1570                         man->has_type = false;
1571                 }
1572         }
1573
1574         mutex_lock(&glob->device_list_mutex);
1575         list_del(&bdev->device_list);
1576         mutex_unlock(&glob->device_list_mutex);
1577
1578         cancel_delayed_work_sync(&bdev->wq);
1579
1580         while (ttm_bo_delayed_delete(bdev, true))
1581                 ;
1582
1583         spin_lock(&glob->lru_lock);
1584         if (list_empty(&bdev->ddestroy))
1585                 TTM_DEBUG("Delayed destroy list was clean\n");
1586
1587         if (list_empty(&bdev->man[0].lru))
1588                 TTM_DEBUG("Swap list was clean\n");
1589         spin_unlock(&glob->lru_lock);
1590
1591         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1592         write_lock(&bdev->vm_lock);
1593         drm_mm_takedown(&bdev->addr_space_mm);
1594         write_unlock(&bdev->vm_lock);
1595
1596         return ret;
1597 }
1598 EXPORT_SYMBOL(ttm_bo_device_release);
1599
1600 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1601                        struct ttm_bo_global *glob,
1602                        struct ttm_bo_driver *driver,
1603                        uint64_t file_page_offset,
1604                        bool need_dma32)
1605 {
1606         int ret = -EINVAL;
1607
1608         rwlock_init(&bdev->vm_lock);
1609         bdev->driver = driver;
1610
1611         memset(bdev->man, 0, sizeof(bdev->man));
1612
1613         /*
1614          * Initialize the system memory buffer type.
1615          * Other types need to be driver / IOCTL initialized.
1616          */
1617         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1618         if (unlikely(ret != 0))
1619                 goto out_no_sys;
1620
1621         bdev->addr_space_rb = RB_ROOT;
1622         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1623         if (unlikely(ret != 0))
1624                 goto out_no_addr_mm;
1625
1626         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1627         INIT_LIST_HEAD(&bdev->ddestroy);
1628         bdev->dev_mapping = NULL;
1629         bdev->glob = glob;
1630         bdev->need_dma32 = need_dma32;
1631         bdev->val_seq = 0;
1632         spin_lock_init(&bdev->fence_lock);
1633         mutex_lock(&glob->device_list_mutex);
1634         list_add_tail(&bdev->device_list, &glob->device_list);
1635         mutex_unlock(&glob->device_list_mutex);
1636
1637         return 0;
1638 out_no_addr_mm:
1639         ttm_bo_clean_mm(bdev, 0);
1640 out_no_sys:
1641         return ret;
1642 }
1643 EXPORT_SYMBOL(ttm_bo_device_init);
1644
1645 /*
1646  * buffer object vm functions.
1647  */
1648
1649 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1650 {
1651         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1652
1653         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1654                 if (mem->mem_type == TTM_PL_SYSTEM)
1655                         return false;
1656
1657                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1658                         return false;
1659
1660                 if (mem->placement & TTM_PL_FLAG_CACHED)
1661                         return false;
1662         }
1663         return true;
1664 }
1665
1666 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1667 {
1668         struct ttm_bo_device *bdev = bo->bdev;
1669         loff_t offset = (loff_t) bo->addr_space_offset;
1670         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1671
1672         if (!bdev->dev_mapping)
1673                 return;
1674         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1675         ttm_mem_io_free_vm(bo);
1676 }
1677
1678 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1679 {
1680         struct ttm_bo_device *bdev = bo->bdev;
1681         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1682
1683         ttm_mem_io_lock(man, false);
1684         ttm_bo_unmap_virtual_locked(bo);
1685         ttm_mem_io_unlock(man);
1686 }
1687
1688
1689 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1690
1691 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1692 {
1693         struct ttm_bo_device *bdev = bo->bdev;
1694         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1695         struct rb_node *parent = NULL;
1696         struct ttm_buffer_object *cur_bo;
1697         unsigned long offset = bo->vm_node->start;
1698         unsigned long cur_offset;
1699
1700         while (*cur) {
1701                 parent = *cur;
1702                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1703                 cur_offset = cur_bo->vm_node->start;
1704                 if (offset < cur_offset)
1705                         cur = &parent->rb_left;
1706                 else if (offset > cur_offset)
1707                         cur = &parent->rb_right;
1708                 else
1709                         BUG();
1710         }
1711
1712         rb_link_node(&bo->vm_rb, parent, cur);
1713         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1714 }
1715
1716 /**
1717  * ttm_bo_setup_vm:
1718  *
1719  * @bo: the buffer to allocate address space for
1720  *
1721  * Allocate address space in the drm device so that applications
1722  * can mmap the buffer and access the contents. This only
1723  * applies to ttm_bo_type_device objects as others are not
1724  * placed in the drm device address space.
1725  */
1726
1727 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1728 {
1729         struct ttm_bo_device *bdev = bo->bdev;
1730         int ret;
1731
1732 retry_pre_get:
1733         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1734         if (unlikely(ret != 0))
1735                 return ret;
1736
1737         write_lock(&bdev->vm_lock);
1738         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1739                                          bo->mem.num_pages, 0, 0);
1740
1741         if (unlikely(bo->vm_node == NULL)) {
1742                 ret = -ENOMEM;
1743                 goto out_unlock;
1744         }
1745
1746         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1747                                               bo->mem.num_pages, 0);
1748
1749         if (unlikely(bo->vm_node == NULL)) {
1750                 write_unlock(&bdev->vm_lock);
1751                 goto retry_pre_get;
1752         }
1753
1754         ttm_bo_vm_insert_rb(bo);
1755         write_unlock(&bdev->vm_lock);
1756         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1757
1758         return 0;
1759 out_unlock:
1760         write_unlock(&bdev->vm_lock);
1761         return ret;
1762 }
1763
1764 int ttm_bo_wait(struct ttm_buffer_object *bo,
1765                 bool lazy, bool interruptible, bool no_wait)
1766 {
1767         struct ttm_bo_driver *driver = bo->bdev->driver;
1768         struct ttm_bo_device *bdev = bo->bdev;
1769         void *sync_obj;
1770         int ret = 0;
1771
1772         if (likely(bo->sync_obj == NULL))
1773                 return 0;
1774
1775         while (bo->sync_obj) {
1776
1777                 if (driver->sync_obj_signaled(bo->sync_obj)) {
1778                         void *tmp_obj = bo->sync_obj;
1779                         bo->sync_obj = NULL;
1780                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1781                         spin_unlock(&bdev->fence_lock);
1782                         driver->sync_obj_unref(&tmp_obj);
1783                         spin_lock(&bdev->fence_lock);
1784                         continue;
1785                 }
1786
1787                 if (no_wait)
1788                         return -EBUSY;
1789
1790                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1791                 spin_unlock(&bdev->fence_lock);
1792                 ret = driver->sync_obj_wait(sync_obj,
1793                                             lazy, interruptible);
1794                 if (unlikely(ret != 0)) {
1795                         driver->sync_obj_unref(&sync_obj);
1796                         spin_lock(&bdev->fence_lock);
1797                         return ret;
1798                 }
1799                 spin_lock(&bdev->fence_lock);
1800                 if (likely(bo->sync_obj == sync_obj)) {
1801                         void *tmp_obj = bo->sync_obj;
1802                         bo->sync_obj = NULL;
1803                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1804                                   &bo->priv_flags);
1805                         spin_unlock(&bdev->fence_lock);
1806                         driver->sync_obj_unref(&sync_obj);
1807                         driver->sync_obj_unref(&tmp_obj);
1808                         spin_lock(&bdev->fence_lock);
1809                 } else {
1810                         spin_unlock(&bdev->fence_lock);
1811                         driver->sync_obj_unref(&sync_obj);
1812                         spin_lock(&bdev->fence_lock);
1813                 }
1814         }
1815         return 0;
1816 }
1817 EXPORT_SYMBOL(ttm_bo_wait);
1818
1819 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1820 {
1821         struct ttm_bo_device *bdev = bo->bdev;
1822         int ret = 0;
1823
1824         /*
1825          * Using ttm_bo_reserve makes sure the lru lists are updated.
1826          */
1827
1828         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1829         if (unlikely(ret != 0))
1830                 return ret;
1831         spin_lock(&bdev->fence_lock);
1832         ret = ttm_bo_wait(bo, false, true, no_wait);
1833         spin_unlock(&bdev->fence_lock);
1834         if (likely(ret == 0))
1835                 atomic_inc(&bo->cpu_writers);
1836         ttm_bo_unreserve(bo);
1837         return ret;
1838 }
1839 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1840
1841 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1842 {
1843         atomic_dec(&bo->cpu_writers);
1844 }
1845 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1846
1847 /**
1848  * A buffer object shrink method that tries to swap out the first
1849  * buffer object on the bo_global::swap_lru list.
1850  */
1851
1852 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1853 {
1854         struct ttm_bo_global *glob =
1855             container_of(shrink, struct ttm_bo_global, shrink);
1856         struct ttm_buffer_object *bo;
1857         int ret = -EBUSY;
1858         int put_count;
1859         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1860
1861         spin_lock(&glob->lru_lock);
1862         list_for_each_entry(bo, &glob->swap_lru, swap) {
1863                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1864                 if (!ret)
1865                         break;
1866         }
1867
1868         if (ret) {
1869                 spin_unlock(&glob->lru_lock);
1870                 return ret;
1871         }
1872
1873         kref_get(&bo->list_kref);
1874
1875         if (!list_empty(&bo->ddestroy)) {
1876                 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1877                 kref_put(&bo->list_kref, ttm_bo_release_list);
1878                 return ret;
1879         }
1880
1881         put_count = ttm_bo_del_from_lru(bo);
1882         spin_unlock(&glob->lru_lock);
1883
1884         ttm_bo_list_ref_sub(bo, put_count, true);
1885
1886         /**
1887          * Wait for GPU, then move to system cached.
1888          */
1889
1890         spin_lock(&bo->bdev->fence_lock);
1891         ret = ttm_bo_wait(bo, false, false, false);
1892         spin_unlock(&bo->bdev->fence_lock);
1893
1894         if (unlikely(ret != 0))
1895                 goto out;
1896
1897         if ((bo->mem.placement & swap_placement) != swap_placement) {
1898                 struct ttm_mem_reg evict_mem;
1899
1900                 evict_mem = bo->mem;
1901                 evict_mem.mm_node = NULL;
1902                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1903                 evict_mem.mem_type = TTM_PL_SYSTEM;
1904
1905                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1906                                              false, false);
1907                 if (unlikely(ret != 0))
1908                         goto out;
1909         }
1910
1911         ttm_bo_unmap_virtual(bo);
1912
1913         /**
1914          * Swap out. Buffer will be swapped in again as soon as
1915          * anyone tries to access a ttm page.
1916          */
1917
1918         if (bo->bdev->driver->swap_notify)
1919                 bo->bdev->driver->swap_notify(bo);
1920
1921         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1922 out:
1923
1924         /**
1925          *
1926          * Unreserve without putting on LRU to avoid swapping out an
1927          * already swapped buffer.
1928          */
1929
1930         atomic_set(&bo->reserved, 0);
1931         wake_up_all(&bo->event_queue);
1932         kref_put(&bo->list_kref, ttm_bo_release_list);
1933         return ret;
1934 }
1935
1936 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1937 {
1938         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1939                 ;
1940 }
1941 EXPORT_SYMBOL(ttm_bo_swapout_all);