Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem_debug.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32
33 #if WATCH_LISTS
34 int
35 i915_verify_lists(struct drm_device *dev)
36 {
37         static int warned;
38         drm_i915_private_t *dev_priv = dev->dev_private;
39         struct drm_i915_gem_object *obj;
40         int err = 0;
41
42         if (warned)
43                 return 0;
44
45         list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
46                 if (obj->base.dev != dev ||
47                     !atomic_read(&obj->base.refcount.refcount)) {
48                         DRM_ERROR("freed render active %p\n", obj);
49                         err++;
50                         break;
51                 } else if (!obj->active ||
52                            (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53                         DRM_ERROR("invalid render active %p (a %d r %x)\n",
54                                   obj,
55                                   obj->active,
56                                   obj->base.read_domains);
57                         err++;
58                 } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59                         DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60                                   obj,
61                                   obj->base.write_domain,
62                                   !list_empty(&obj->gpu_write_list));
63                         err++;
64                 }
65         }
66
67         list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68                 if (obj->base.dev != dev ||
69                     !atomic_read(&obj->base.refcount.refcount)) {
70                         DRM_ERROR("freed flushing %p\n", obj);
71                         err++;
72                         break;
73                 } else if (!obj->active ||
74                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75                            list_empty(&obj->gpu_write_list)) {
76                         DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
77                                   obj,
78                                   obj->active,
79                                   obj->base.write_domain,
80                                   !list_empty(&obj->gpu_write_list));
81                         err++;
82                 }
83         }
84
85         list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86                 if (obj->base.dev != dev ||
87                     !atomic_read(&obj->base.refcount.refcount)) {
88                         DRM_ERROR("freed gpu write %p\n", obj);
89                         err++;
90                         break;
91                 } else if (!obj->active ||
92                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93                         DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
94                                   obj,
95                                   obj->active,
96                                   obj->base.write_domain);
97                         err++;
98                 }
99         }
100
101         list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102                 if (obj->base.dev != dev ||
103                     !atomic_read(&obj->base.refcount.refcount)) {
104                         DRM_ERROR("freed inactive %p\n", obj);
105                         err++;
106                         break;
107                 } else if (obj->pin_count || obj->active ||
108                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109                         DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110                                   obj,
111                                   obj->pin_count, obj->active,
112                                   obj->base.write_domain);
113                         err++;
114                 }
115         }
116
117         return warned = err;
118 }
119 #endif /* WATCH_INACTIVE */
120
121 #if WATCH_COHERENCY
122 void
123 i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
124 {
125         struct drm_device *dev = obj->base.dev;
126         int page;
127         uint32_t *gtt_mapping;
128         uint32_t *backing_map = NULL;
129         int bad_count = 0;
130
131         DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
132                  __func__, obj, obj->gtt_offset, handle,
133                  obj->size / 1024);
134
135         gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
136         if (gtt_mapping == NULL) {
137                 DRM_ERROR("failed to map GTT space\n");
138                 return;
139         }
140
141         for (page = 0; page < obj->size / PAGE_SIZE; page++) {
142                 int i;
143
144                 backing_map = kmap_atomic(obj->pages[page]);
145
146                 if (backing_map == NULL) {
147                         DRM_ERROR("failed to map backing page\n");
148                         goto out;
149                 }
150
151                 for (i = 0; i < PAGE_SIZE / 4; i++) {
152                         uint32_t cpuval = backing_map[i];
153                         uint32_t gttval = readl(gtt_mapping +
154                                                 page * 1024 + i);
155
156                         if (cpuval != gttval) {
157                                 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
158                                          "0x%08x vs 0x%08x\n",
159                                          (int)(obj->gtt_offset +
160                                                page * PAGE_SIZE + i * 4),
161                                          cpuval, gttval);
162                                 if (bad_count++ >= 8) {
163                                         DRM_INFO("...\n");
164                                         goto out;
165                                 }
166                         }
167                 }
168                 kunmap_atomic(backing_map);
169                 backing_map = NULL;
170         }
171
172  out:
173         if (backing_map != NULL)
174                 kunmap_atomic(backing_map);
175         iounmap(gtt_mapping);
176
177         /* give syslog time to catch up */
178         msleep(1);
179
180         /* Directly flush the object, since we just loaded values with the CPU
181          * from the backing pages and we don't want to disturb the cache
182          * management that we're trying to observe.
183          */
184
185         i915_gem_clflush_object(obj);
186 }
187 #endif