2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
40 #define DRM_I915_RING_DEBUG 1
43 #if defined(CONFIG_DEBUG_FS)
53 static const char *yesno(int v)
55 return v ? "yes" : "no";
58 static int i915_capabilities(struct seq_file *m, void *data)
60 struct drm_info_node *node = (struct drm_info_node *) m->private;
61 struct drm_device *dev = node->minor->dev;
62 const struct intel_device_info *info = INTEL_INFO(dev);
64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
80 B(cursor_needs_physical);
82 B(overlay_needs_physical);
92 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
94 if (obj->user_pin_count > 0)
96 else if (obj->pin_count > 0)
102 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
104 switch (obj->tiling_mode) {
106 case I915_TILING_NONE: return " ";
107 case I915_TILING_X: return "X";
108 case I915_TILING_Y: return "Y";
112 static const char *cache_level_str(int type)
115 case I915_CACHE_NONE: return " uncached";
116 case I915_CACHE_LLC: return " snooped (LLC)";
117 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
128 get_tiling_flag(obj),
129 obj->base.size / 1024,
130 obj->base.read_domains,
131 obj->base.write_domain,
132 obj->last_rendering_seqno,
133 obj->last_fenced_seqno,
134 cache_level_str(obj->cache_level),
135 obj->dirty ? " dirty" : "",
136 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
138 seq_printf(m, " (name: %d)", obj->base.name);
139 if (obj->fence_reg != I915_FENCE_REG_NONE)
140 seq_printf(m, " (fence: %d)", obj->fence_reg);
141 if (obj->gtt_space != NULL)
142 seq_printf(m, " (gtt offset: %08x, size: %08x)",
143 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
144 if (obj->pin_mappable || obj->fault_mappable) {
146 if (obj->pin_mappable)
148 if (obj->fault_mappable)
151 seq_printf(m, " (%s mappable)", s);
153 if (obj->ring != NULL)
154 seq_printf(m, " (%s)", obj->ring->name);
157 static int i915_gem_object_list_info(struct seq_file *m, void *data)
159 struct drm_info_node *node = (struct drm_info_node *) m->private;
160 uintptr_t list = (uintptr_t) node->info_ent->data;
161 struct list_head *head;
162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private;
164 struct drm_i915_gem_object *obj;
165 size_t total_obj_size, total_gtt_size;
168 ret = mutex_lock_interruptible(&dev->struct_mutex);
174 seq_printf(m, "Active:\n");
175 head = &dev_priv->mm.active_list;
178 seq_printf(m, "Inactive:\n");
179 head = &dev_priv->mm.inactive_list;
182 seq_printf(m, "Pinned:\n");
183 head = &dev_priv->mm.pinned_list;
186 seq_printf(m, "Flushing:\n");
187 head = &dev_priv->mm.flushing_list;
189 case DEFERRED_FREE_LIST:
190 seq_printf(m, "Deferred free:\n");
191 head = &dev_priv->mm.deferred_free_list;
194 mutex_unlock(&dev->struct_mutex);
198 total_obj_size = total_gtt_size = count = 0;
199 list_for_each_entry(obj, head, mm_list) {
201 describe_obj(m, obj);
203 total_obj_size += obj->base.size;
204 total_gtt_size += obj->gtt_space->size;
207 mutex_unlock(&dev->struct_mutex);
209 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
210 count, total_obj_size, total_gtt_size);
214 #define count_objects(list, member) do { \
215 list_for_each_entry(obj, list, member) { \
216 size += obj->gtt_space->size; \
218 if (obj->map_and_fenceable) { \
219 mappable_size += obj->gtt_space->size; \
225 static int i915_gem_object_info(struct seq_file *m, void* data)
227 struct drm_info_node *node = (struct drm_info_node *) m->private;
228 struct drm_device *dev = node->minor->dev;
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 u32 count, mappable_count;
231 size_t size, mappable_size;
232 struct drm_i915_gem_object *obj;
235 ret = mutex_lock_interruptible(&dev->struct_mutex);
239 seq_printf(m, "%u objects, %zu bytes\n",
240 dev_priv->mm.object_count,
241 dev_priv->mm.object_memory);
243 size = count = mappable_size = mappable_count = 0;
244 count_objects(&dev_priv->mm.gtt_list, gtt_list);
245 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
246 count, mappable_count, size, mappable_size);
248 size = count = mappable_size = mappable_count = 0;
249 count_objects(&dev_priv->mm.active_list, mm_list);
250 count_objects(&dev_priv->mm.flushing_list, mm_list);
251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count, mappable_count, size, mappable_size);
254 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.pinned_list, mm_list);
256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size);
259 size = count = mappable_size = mappable_count = 0;
260 count_objects(&dev_priv->mm.inactive_list, mm_list);
261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
262 count, mappable_count, size, mappable_size);
264 size = count = mappable_size = mappable_count = 0;
265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
267 count, mappable_count, size, mappable_size);
269 size = count = mappable_size = mappable_count = 0;
270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
271 if (obj->fault_mappable) {
272 size += obj->gtt_space->size;
275 if (obj->pin_mappable) {
276 mappable_size += obj->gtt_space->size;
280 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
281 mappable_count, mappable_size);
282 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
285 seq_printf(m, "%zu [%zu] gtt total\n",
286 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
288 mutex_unlock(&dev->struct_mutex);
293 static int i915_gem_gtt_info(struct seq_file *m, void* data)
295 struct drm_info_node *node = (struct drm_info_node *) m->private;
296 struct drm_device *dev = node->minor->dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj;
299 size_t total_obj_size, total_gtt_size;
302 ret = mutex_lock_interruptible(&dev->struct_mutex);
306 total_obj_size = total_gtt_size = count = 0;
307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
309 describe_obj(m, obj);
311 total_obj_size += obj->base.size;
312 total_gtt_size += obj->gtt_space->size;
316 mutex_unlock(&dev->struct_mutex);
318 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
319 count, total_obj_size, total_gtt_size);
325 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
327 struct drm_info_node *node = (struct drm_info_node *) m->private;
328 struct drm_device *dev = node->minor->dev;
330 struct intel_crtc *crtc;
332 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
333 const char pipe = pipe_name(crtc->pipe);
334 const char plane = plane_name(crtc->plane);
335 struct intel_unpin_work *work;
337 spin_lock_irqsave(&dev->event_lock, flags);
338 work = crtc->unpin_work;
340 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
343 if (!work->pending) {
344 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
347 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
350 if (work->enable_stall_check)
351 seq_printf(m, "Stall check enabled, ");
353 seq_printf(m, "Stall check waiting for page flip ioctl, ");
354 seq_printf(m, "%d prepares\n", work->pending);
356 if (work->old_fb_obj) {
357 struct drm_i915_gem_object *obj = work->old_fb_obj;
359 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
361 if (work->pending_flip_obj) {
362 struct drm_i915_gem_object *obj = work->pending_flip_obj;
364 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
367 spin_unlock_irqrestore(&dev->event_lock, flags);
373 static int i915_gem_request_info(struct seq_file *m, void *data)
375 struct drm_info_node *node = (struct drm_info_node *) m->private;
376 struct drm_device *dev = node->minor->dev;
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 struct drm_i915_gem_request *gem_request;
381 ret = mutex_lock_interruptible(&dev->struct_mutex);
386 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
387 seq_printf(m, "Render requests:\n");
388 list_for_each_entry(gem_request,
389 &dev_priv->ring[RCS].request_list,
391 seq_printf(m, " %d @ %d\n",
393 (int) (jiffies - gem_request->emitted_jiffies));
397 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
398 seq_printf(m, "BSD requests:\n");
399 list_for_each_entry(gem_request,
400 &dev_priv->ring[VCS].request_list,
402 seq_printf(m, " %d @ %d\n",
404 (int) (jiffies - gem_request->emitted_jiffies));
408 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
409 seq_printf(m, "BLT requests:\n");
410 list_for_each_entry(gem_request,
411 &dev_priv->ring[BCS].request_list,
413 seq_printf(m, " %d @ %d\n",
415 (int) (jiffies - gem_request->emitted_jiffies));
419 mutex_unlock(&dev->struct_mutex);
422 seq_printf(m, "No requests\n");
427 static void i915_ring_seqno_info(struct seq_file *m,
428 struct intel_ring_buffer *ring)
430 if (ring->get_seqno) {
431 seq_printf(m, "Current sequence (%s): %d\n",
432 ring->name, ring->get_seqno(ring));
433 seq_printf(m, "Waiter sequence (%s): %d\n",
434 ring->name, ring->waiting_seqno);
435 seq_printf(m, "IRQ sequence (%s): %d\n",
436 ring->name, ring->irq_seqno);
440 static int i915_gem_seqno_info(struct seq_file *m, void *data)
442 struct drm_info_node *node = (struct drm_info_node *) m->private;
443 struct drm_device *dev = node->minor->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
447 ret = mutex_lock_interruptible(&dev->struct_mutex);
451 for (i = 0; i < I915_NUM_RINGS; i++)
452 i915_ring_seqno_info(m, &dev_priv->ring[i]);
454 mutex_unlock(&dev->struct_mutex);
460 static int i915_interrupt_info(struct seq_file *m, void *data)
462 struct drm_info_node *node = (struct drm_info_node *) m->private;
463 struct drm_device *dev = node->minor->dev;
464 drm_i915_private_t *dev_priv = dev->dev_private;
467 ret = mutex_lock_interruptible(&dev->struct_mutex);
471 if (!HAS_PCH_SPLIT(dev)) {
472 seq_printf(m, "Interrupt enable: %08x\n",
474 seq_printf(m, "Interrupt identity: %08x\n",
476 seq_printf(m, "Interrupt mask: %08x\n",
479 seq_printf(m, "Pipe %c stat: %08x\n",
481 I915_READ(PIPESTAT(pipe)));
483 seq_printf(m, "North Display Interrupt enable: %08x\n",
485 seq_printf(m, "North Display Interrupt identity: %08x\n",
487 seq_printf(m, "North Display Interrupt mask: %08x\n",
489 seq_printf(m, "South Display Interrupt enable: %08x\n",
491 seq_printf(m, "South Display Interrupt identity: %08x\n",
493 seq_printf(m, "South Display Interrupt mask: %08x\n",
495 seq_printf(m, "Graphics Interrupt enable: %08x\n",
497 seq_printf(m, "Graphics Interrupt identity: %08x\n",
499 seq_printf(m, "Graphics Interrupt mask: %08x\n",
502 seq_printf(m, "Interrupts received: %d\n",
503 atomic_read(&dev_priv->irq_received));
504 for (i = 0; i < I915_NUM_RINGS; i++) {
505 if (IS_GEN6(dev) || IS_GEN7(dev)) {
506 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
507 dev_priv->ring[i].name,
508 I915_READ_IMR(&dev_priv->ring[i]));
510 i915_ring_seqno_info(m, &dev_priv->ring[i]);
512 mutex_unlock(&dev->struct_mutex);
517 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
519 struct drm_info_node *node = (struct drm_info_node *) m->private;
520 struct drm_device *dev = node->minor->dev;
521 drm_i915_private_t *dev_priv = dev->dev_private;
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
528 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
529 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
530 for (i = 0; i < dev_priv->num_fence_regs; i++) {
531 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
533 seq_printf(m, "Fenced object[%2d] = ", i);
535 seq_printf(m, "unused");
537 describe_obj(m, obj);
541 mutex_unlock(&dev->struct_mutex);
545 static int i915_hws_info(struct seq_file *m, void *data)
547 struct drm_info_node *node = (struct drm_info_node *) m->private;
548 struct drm_device *dev = node->minor->dev;
549 drm_i915_private_t *dev_priv = dev->dev_private;
550 struct intel_ring_buffer *ring;
551 const volatile u32 __iomem *hws;
554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
555 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
559 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
560 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
562 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
567 static void i915_dump_object(struct seq_file *m,
568 struct io_mapping *mapping,
569 struct drm_i915_gem_object *obj)
571 int page, page_count, i;
573 page_count = obj->base.size / PAGE_SIZE;
574 for (page = 0; page < page_count; page++) {
575 u32 *mem = io_mapping_map_wc(mapping,
576 obj->gtt_offset + page * PAGE_SIZE);
577 for (i = 0; i < PAGE_SIZE; i += 4)
578 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
579 io_mapping_unmap(mem);
583 static int i915_batchbuffer_info(struct seq_file *m, void *data)
585 struct drm_info_node *node = (struct drm_info_node *) m->private;
586 struct drm_device *dev = node->minor->dev;
587 drm_i915_private_t *dev_priv = dev->dev_private;
588 struct drm_i915_gem_object *obj;
591 ret = mutex_lock_interruptible(&dev->struct_mutex);
595 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
596 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
597 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
598 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
602 mutex_unlock(&dev->struct_mutex);
606 static int i915_ringbuffer_data(struct seq_file *m, void *data)
608 struct drm_info_node *node = (struct drm_info_node *) m->private;
609 struct drm_device *dev = node->minor->dev;
610 drm_i915_private_t *dev_priv = dev->dev_private;
611 struct intel_ring_buffer *ring;
614 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
620 seq_printf(m, "No ringbuffer setup\n");
622 const u8 __iomem *virt = ring->virtual_start;
625 for (off = 0; off < ring->size; off += 4) {
626 uint32_t *ptr = (uint32_t *)(virt + off);
627 seq_printf(m, "%08x : %08x\n", off, *ptr);
630 mutex_unlock(&dev->struct_mutex);
635 static int i915_ringbuffer_info(struct seq_file *m, void *data)
637 struct drm_info_node *node = (struct drm_info_node *) m->private;
638 struct drm_device *dev = node->minor->dev;
639 drm_i915_private_t *dev_priv = dev->dev_private;
640 struct intel_ring_buffer *ring;
643 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
647 ret = mutex_lock_interruptible(&dev->struct_mutex);
651 seq_printf(m, "Ring %s:\n", ring->name);
652 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
653 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
654 seq_printf(m, " Size : %08x\n", ring->size);
655 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
656 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
657 if (IS_GEN6(dev) || IS_GEN7(dev)) {
658 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
659 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
661 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
662 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
664 mutex_unlock(&dev->struct_mutex);
669 static const char *ring_str(int ring)
672 case RING_RENDER: return " render";
673 case RING_BSD: return " bsd";
674 case RING_BLT: return " blt";
679 static const char *pin_flag(int pinned)
689 static const char *tiling_flag(int tiling)
693 case I915_TILING_NONE: return "";
694 case I915_TILING_X: return " X";
695 case I915_TILING_Y: return " Y";
699 static const char *dirty_flag(int dirty)
701 return dirty ? " dirty" : "";
704 static const char *purgeable_flag(int purgeable)
706 return purgeable ? " purgeable" : "";
709 static void print_error_buffers(struct seq_file *m,
711 struct drm_i915_error_buffer *err,
714 seq_printf(m, "%s [%d]:\n", name, count);
717 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
723 pin_flag(err->pinned),
724 tiling_flag(err->tiling),
725 dirty_flag(err->dirty),
726 purgeable_flag(err->purgeable),
728 cache_level_str(err->cache_level));
731 seq_printf(m, " (name: %d)", err->name);
732 if (err->fence_reg != I915_FENCE_REG_NONE)
733 seq_printf(m, " (fence: %d)", err->fence_reg);
740 static int i915_error_state(struct seq_file *m, void *unused)
742 struct drm_info_node *node = (struct drm_info_node *) m->private;
743 struct drm_device *dev = node->minor->dev;
744 drm_i915_private_t *dev_priv = dev->dev_private;
745 struct drm_i915_error_state *error;
747 int i, page, offset, elt;
749 spin_lock_irqsave(&dev_priv->error_lock, flags);
750 if (!dev_priv->first_error) {
751 seq_printf(m, "no error state collected\n");
755 error = dev_priv->first_error;
757 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
758 error->time.tv_usec);
759 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
760 seq_printf(m, "EIR: 0x%08x\n", error->eir);
761 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
762 if (INTEL_INFO(dev)->gen >= 6) {
763 seq_printf(m, "ERROR: 0x%08x\n", error->error);
764 seq_printf(m, "Blitter command stream:\n");
765 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
766 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
767 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
768 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
769 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
770 seq_printf(m, "Video (BSD) command stream:\n");
771 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
772 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
773 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
774 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
775 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
777 seq_printf(m, "Render command stream:\n");
778 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
779 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
780 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
781 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
782 if (INTEL_INFO(dev)->gen >= 4) {
783 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
784 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
786 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
787 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
789 for (i = 0; i < dev_priv->num_fence_regs; i++)
790 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
792 if (error->active_bo)
793 print_error_buffers(m, "Active",
795 error->active_bo_count);
797 if (error->pinned_bo)
798 print_error_buffers(m, "Pinned",
800 error->pinned_bo_count);
802 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
803 if (error->batchbuffer[i]) {
804 struct drm_i915_error_object *obj = error->batchbuffer[i];
806 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
807 dev_priv->ring[i].name,
810 for (page = 0; page < obj->page_count; page++) {
811 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
812 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
819 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
820 if (error->ringbuffer[i]) {
821 struct drm_i915_error_object *obj = error->ringbuffer[i];
822 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
823 dev_priv->ring[i].name,
826 for (page = 0; page < obj->page_count; page++) {
827 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
828 seq_printf(m, "%08x : %08x\n",
830 obj->pages[page][elt]);
838 intel_overlay_print_error_state(m, error->overlay);
841 intel_display_print_error_state(m, dev, error->display);
844 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
849 static int i915_rstdby_delays(struct seq_file *m, void *unused)
851 struct drm_info_node *node = (struct drm_info_node *) m->private;
852 struct drm_device *dev = node->minor->dev;
853 drm_i915_private_t *dev_priv = dev->dev_private;
857 ret = mutex_lock_interruptible(&dev->struct_mutex);
861 crstanddelay = I915_READ16(CRSTANDVID);
863 mutex_unlock(&dev->struct_mutex);
865 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
870 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
872 struct drm_info_node *node = (struct drm_info_node *) m->private;
873 struct drm_device *dev = node->minor->dev;
874 drm_i915_private_t *dev_priv = dev->dev_private;
878 u16 rgvswctl = I915_READ16(MEMSWCTL);
879 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
881 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
882 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
883 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
885 seq_printf(m, "Current P-state: %d\n",
886 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
887 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
888 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
889 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
890 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
892 u32 rpupei, rpcurup, rpprevup;
893 u32 rpdownei, rpcurdown, rpprevdown;
896 /* RPSTAT1 is in the GT power well */
897 ret = mutex_lock_interruptible(&dev->struct_mutex);
901 gen6_gt_force_wake_get(dev_priv);
903 rpstat = I915_READ(GEN6_RPSTAT1);
904 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
905 rpcurup = I915_READ(GEN6_RP_CUR_UP);
906 rpprevup = I915_READ(GEN6_RP_PREV_UP);
907 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
908 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
909 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
911 gen6_gt_force_wake_put(dev_priv);
912 mutex_unlock(&dev->struct_mutex);
914 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
915 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
916 seq_printf(m, "Render p-state ratio: %d\n",
917 (gt_perf_status & 0xff00) >> 8);
918 seq_printf(m, "Render p-state VID: %d\n",
919 gt_perf_status & 0xff);
920 seq_printf(m, "Render p-state limit: %d\n",
921 rp_state_limits & 0xff);
922 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
923 GEN6_CAGF_SHIFT) * 50);
924 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
926 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
927 GEN6_CURBSYTAVG_MASK);
928 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
929 GEN6_CURBSYTAVG_MASK);
930 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
932 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
933 GEN6_CURBSYTAVG_MASK);
934 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
935 GEN6_CURBSYTAVG_MASK);
937 max_freq = (rp_state_cap & 0xff0000) >> 16;
938 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
941 max_freq = (rp_state_cap & 0xff00) >> 8;
942 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
945 max_freq = rp_state_cap & 0xff;
946 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
949 seq_printf(m, "no P-state info available\n");
955 static int i915_delayfreq_table(struct seq_file *m, void *unused)
957 struct drm_info_node *node = (struct drm_info_node *) m->private;
958 struct drm_device *dev = node->minor->dev;
959 drm_i915_private_t *dev_priv = dev->dev_private;
963 ret = mutex_lock_interruptible(&dev->struct_mutex);
967 for (i = 0; i < 16; i++) {
968 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
969 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
970 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
973 mutex_unlock(&dev->struct_mutex);
978 static inline int MAP_TO_MV(int map)
980 return 1250 - (map * 25);
983 static int i915_inttoext_table(struct seq_file *m, void *unused)
985 struct drm_info_node *node = (struct drm_info_node *) m->private;
986 struct drm_device *dev = node->minor->dev;
987 drm_i915_private_t *dev_priv = dev->dev_private;
991 ret = mutex_lock_interruptible(&dev->struct_mutex);
995 for (i = 1; i <= 32; i++) {
996 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
997 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1000 mutex_unlock(&dev->struct_mutex);
1005 static int ironlake_drpc_info(struct seq_file *m)
1007 struct drm_info_node *node = (struct drm_info_node *) m->private;
1008 struct drm_device *dev = node->minor->dev;
1009 drm_i915_private_t *dev_priv = dev->dev_private;
1010 u32 rgvmodectl, rstdbyctl;
1014 ret = mutex_lock_interruptible(&dev->struct_mutex);
1018 rgvmodectl = I915_READ(MEMMODECTL);
1019 rstdbyctl = I915_READ(RSTDBYCTL);
1020 crstandvid = I915_READ16(CRSTANDVID);
1022 mutex_unlock(&dev->struct_mutex);
1024 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1026 seq_printf(m, "Boost freq: %d\n",
1027 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1028 MEMMODE_BOOST_FREQ_SHIFT);
1029 seq_printf(m, "HW control enabled: %s\n",
1030 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1031 seq_printf(m, "SW control enabled: %s\n",
1032 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1033 seq_printf(m, "Gated voltage change: %s\n",
1034 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1035 seq_printf(m, "Starting frequency: P%d\n",
1036 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1037 seq_printf(m, "Max P-state: P%d\n",
1038 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1039 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1040 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1041 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1042 seq_printf(m, "Render standby enabled: %s\n",
1043 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1044 seq_printf(m, "Current RS state: ");
1045 switch (rstdbyctl & RSX_STATUS_MASK) {
1047 seq_printf(m, "on\n");
1049 case RSX_STATUS_RC1:
1050 seq_printf(m, "RC1\n");
1052 case RSX_STATUS_RC1E:
1053 seq_printf(m, "RC1E\n");
1055 case RSX_STATUS_RS1:
1056 seq_printf(m, "RS1\n");
1058 case RSX_STATUS_RS2:
1059 seq_printf(m, "RS2 (RC6)\n");
1061 case RSX_STATUS_RS3:
1062 seq_printf(m, "RC3 (RC6+)\n");
1065 seq_printf(m, "unknown\n");
1072 static int gen6_drpc_info(struct seq_file *m)
1075 struct drm_info_node *node = (struct drm_info_node *) m->private;
1076 struct drm_device *dev = node->minor->dev;
1077 struct drm_i915_private *dev_priv = dev->dev_private;
1078 u32 rpmodectl1, gt_core_status, rcctl1;
1079 unsigned forcewake_count;
1083 ret = mutex_lock_interruptible(&dev->struct_mutex);
1087 spin_lock_irq(&dev_priv->gt_lock);
1088 forcewake_count = dev_priv->forcewake_count;
1089 spin_unlock_irq(&dev_priv->gt_lock);
1091 if (forcewake_count) {
1092 seq_printf(m, "RC information inaccurate because somebody "
1093 "holds a forcewake reference \n");
1095 /* NB: we cannot use forcewake, else we read the wrong values */
1096 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1098 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1101 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1102 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1104 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1105 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1106 mutex_unlock(&dev->struct_mutex);
1108 seq_printf(m, "Video Turbo Mode: %s\n",
1109 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1110 seq_printf(m, "HW control enabled: %s\n",
1111 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1112 seq_printf(m, "SW control enabled: %s\n",
1113 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1114 GEN6_RP_MEDIA_SW_MODE));
1115 seq_printf(m, "RC1e Enabled: %s\n",
1116 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1117 seq_printf(m, "RC6 Enabled: %s\n",
1118 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1119 seq_printf(m, "Deep RC6 Enabled: %s\n",
1120 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1121 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1122 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1123 seq_printf(m, "Current RC state: ");
1124 switch (gt_core_status & GEN6_RCn_MASK) {
1126 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1127 seq_printf(m, "Core Power Down\n");
1129 seq_printf(m, "on\n");
1132 seq_printf(m, "RC3\n");
1135 seq_printf(m, "RC6\n");
1138 seq_printf(m, "RC7\n");
1141 seq_printf(m, "Unknown\n");
1145 seq_printf(m, "Core Power Down: %s\n",
1146 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1150 static int i915_drpc_info(struct seq_file *m, void *unused)
1152 struct drm_info_node *node = (struct drm_info_node *) m->private;
1153 struct drm_device *dev = node->minor->dev;
1155 if (IS_GEN6(dev) || IS_GEN7(dev))
1156 return gen6_drpc_info(m);
1158 return ironlake_drpc_info(m);
1161 static int i915_fbc_status(struct seq_file *m, void *unused)
1163 struct drm_info_node *node = (struct drm_info_node *) m->private;
1164 struct drm_device *dev = node->minor->dev;
1165 drm_i915_private_t *dev_priv = dev->dev_private;
1167 if (!I915_HAS_FBC(dev)) {
1168 seq_printf(m, "FBC unsupported on this chipset\n");
1172 if (intel_fbc_enabled(dev)) {
1173 seq_printf(m, "FBC enabled\n");
1175 seq_printf(m, "FBC disabled: ");
1176 switch (dev_priv->no_fbc_reason) {
1178 seq_printf(m, "no outputs");
1180 case FBC_STOLEN_TOO_SMALL:
1181 seq_printf(m, "not enough stolen memory");
1183 case FBC_UNSUPPORTED_MODE:
1184 seq_printf(m, "mode not supported");
1186 case FBC_MODE_TOO_LARGE:
1187 seq_printf(m, "mode too large");
1190 seq_printf(m, "FBC unsupported on plane");
1193 seq_printf(m, "scanout buffer not tiled");
1195 case FBC_MULTIPLE_PIPES:
1196 seq_printf(m, "multiple pipes are enabled");
1198 case FBC_MODULE_PARAM:
1199 seq_printf(m, "disabled per module param (default off)");
1202 seq_printf(m, "unknown reason");
1204 seq_printf(m, "\n");
1209 static int i915_sr_status(struct seq_file *m, void *unused)
1211 struct drm_info_node *node = (struct drm_info_node *) m->private;
1212 struct drm_device *dev = node->minor->dev;
1213 drm_i915_private_t *dev_priv = dev->dev_private;
1214 bool sr_enabled = false;
1216 if (HAS_PCH_SPLIT(dev))
1217 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1218 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1219 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1220 else if (IS_I915GM(dev))
1221 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1222 else if (IS_PINEVIEW(dev))
1223 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1225 seq_printf(m, "self-refresh: %s\n",
1226 sr_enabled ? "enabled" : "disabled");
1231 static int i915_emon_status(struct seq_file *m, void *unused)
1233 struct drm_info_node *node = (struct drm_info_node *) m->private;
1234 struct drm_device *dev = node->minor->dev;
1235 drm_i915_private_t *dev_priv = dev->dev_private;
1236 unsigned long temp, chipset, gfx;
1239 ret = mutex_lock_interruptible(&dev->struct_mutex);
1243 temp = i915_mch_val(dev_priv);
1244 chipset = i915_chipset_val(dev_priv);
1245 gfx = i915_gfx_val(dev_priv);
1246 mutex_unlock(&dev->struct_mutex);
1248 seq_printf(m, "GMCH temp: %ld\n", temp);
1249 seq_printf(m, "Chipset power: %ld\n", chipset);
1250 seq_printf(m, "GFX power: %ld\n", gfx);
1251 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1256 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1258 struct drm_info_node *node = (struct drm_info_node *) m->private;
1259 struct drm_device *dev = node->minor->dev;
1260 drm_i915_private_t *dev_priv = dev->dev_private;
1262 int gpu_freq, ia_freq;
1264 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1265 seq_printf(m, "unsupported on this chipset\n");
1269 ret = mutex_lock_interruptible(&dev->struct_mutex);
1273 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1275 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1277 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1278 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1279 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1280 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1281 GEN6_PCODE_READY) == 0, 10)) {
1282 DRM_ERROR("pcode read of freq table timed out\n");
1285 ia_freq = I915_READ(GEN6_PCODE_DATA);
1286 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1289 mutex_unlock(&dev->struct_mutex);
1294 static int i915_gfxec(struct seq_file *m, void *unused)
1296 struct drm_info_node *node = (struct drm_info_node *) m->private;
1297 struct drm_device *dev = node->minor->dev;
1298 drm_i915_private_t *dev_priv = dev->dev_private;
1301 ret = mutex_lock_interruptible(&dev->struct_mutex);
1305 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1307 mutex_unlock(&dev->struct_mutex);
1312 static int i915_opregion(struct seq_file *m, void *unused)
1314 struct drm_info_node *node = (struct drm_info_node *) m->private;
1315 struct drm_device *dev = node->minor->dev;
1316 drm_i915_private_t *dev_priv = dev->dev_private;
1317 struct intel_opregion *opregion = &dev_priv->opregion;
1320 ret = mutex_lock_interruptible(&dev->struct_mutex);
1324 if (opregion->header)
1325 seq_write(m, opregion->header, OPREGION_SIZE);
1327 mutex_unlock(&dev->struct_mutex);
1332 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1334 struct drm_info_node *node = (struct drm_info_node *) m->private;
1335 struct drm_device *dev = node->minor->dev;
1336 drm_i915_private_t *dev_priv = dev->dev_private;
1337 struct intel_fbdev *ifbdev;
1338 struct intel_framebuffer *fb;
1341 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1345 ifbdev = dev_priv->fbdev;
1346 fb = to_intel_framebuffer(ifbdev->helper.fb);
1348 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1352 fb->base.bits_per_pixel);
1353 describe_obj(m, fb->obj);
1354 seq_printf(m, "\n");
1356 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1357 if (&fb->base == ifbdev->helper.fb)
1360 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1364 fb->base.bits_per_pixel);
1365 describe_obj(m, fb->obj);
1366 seq_printf(m, "\n");
1369 mutex_unlock(&dev->mode_config.mutex);
1374 static int i915_context_status(struct seq_file *m, void *unused)
1376 struct drm_info_node *node = (struct drm_info_node *) m->private;
1377 struct drm_device *dev = node->minor->dev;
1378 drm_i915_private_t *dev_priv = dev->dev_private;
1381 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1385 if (dev_priv->pwrctx) {
1386 seq_printf(m, "power context ");
1387 describe_obj(m, dev_priv->pwrctx);
1388 seq_printf(m, "\n");
1391 if (dev_priv->renderctx) {
1392 seq_printf(m, "render context ");
1393 describe_obj(m, dev_priv->renderctx);
1394 seq_printf(m, "\n");
1397 mutex_unlock(&dev->mode_config.mutex);
1402 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1404 struct drm_info_node *node = (struct drm_info_node *) m->private;
1405 struct drm_device *dev = node->minor->dev;
1406 struct drm_i915_private *dev_priv = dev->dev_private;
1407 unsigned forcewake_count;
1409 spin_lock_irq(&dev_priv->gt_lock);
1410 forcewake_count = dev_priv->forcewake_count;
1411 spin_unlock_irq(&dev_priv->gt_lock);
1413 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1419 i915_wedged_open(struct inode *inode,
1422 filp->private_data = inode->i_private;
1427 i915_wedged_read(struct file *filp,
1432 struct drm_device *dev = filp->private_data;
1433 drm_i915_private_t *dev_priv = dev->dev_private;
1437 len = snprintf(buf, sizeof(buf),
1439 atomic_read(&dev_priv->mm.wedged));
1441 if (len > sizeof(buf))
1444 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1448 i915_wedged_write(struct file *filp,
1449 const char __user *ubuf,
1453 struct drm_device *dev = filp->private_data;
1458 if (cnt > sizeof(buf) - 1)
1461 if (copy_from_user(buf, ubuf, cnt))
1465 val = simple_strtoul(buf, NULL, 0);
1468 DRM_INFO("Manually setting wedged to %d\n", val);
1469 i915_handle_error(dev, val);
1474 static const struct file_operations i915_wedged_fops = {
1475 .owner = THIS_MODULE,
1476 .open = i915_wedged_open,
1477 .read = i915_wedged_read,
1478 .write = i915_wedged_write,
1479 .llseek = default_llseek,
1483 i915_max_freq_open(struct inode *inode,
1486 filp->private_data = inode->i_private;
1491 i915_max_freq_read(struct file *filp,
1496 struct drm_device *dev = filp->private_data;
1497 drm_i915_private_t *dev_priv = dev->dev_private;
1501 len = snprintf(buf, sizeof(buf),
1502 "max freq: %d\n", dev_priv->max_delay * 50);
1504 if (len > sizeof(buf))
1507 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1511 i915_max_freq_write(struct file *filp,
1512 const char __user *ubuf,
1516 struct drm_device *dev = filp->private_data;
1517 struct drm_i915_private *dev_priv = dev->dev_private;
1522 if (cnt > sizeof(buf) - 1)
1525 if (copy_from_user(buf, ubuf, cnt))
1529 val = simple_strtoul(buf, NULL, 0);
1532 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1535 * Turbo will still be enabled, but won't go above the set value.
1537 dev_priv->max_delay = val / 50;
1539 gen6_set_rps(dev, val / 50);
1544 static const struct file_operations i915_max_freq_fops = {
1545 .owner = THIS_MODULE,
1546 .open = i915_max_freq_open,
1547 .read = i915_max_freq_read,
1548 .write = i915_max_freq_write,
1549 .llseek = default_llseek,
1553 i915_cache_sharing_open(struct inode *inode,
1556 filp->private_data = inode->i_private;
1561 i915_cache_sharing_read(struct file *filp,
1566 struct drm_device *dev = filp->private_data;
1567 drm_i915_private_t *dev_priv = dev->dev_private;
1572 mutex_lock(&dev_priv->dev->struct_mutex);
1573 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1574 mutex_unlock(&dev_priv->dev->struct_mutex);
1576 len = snprintf(buf, sizeof(buf),
1577 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1578 GEN6_MBC_SNPCR_SHIFT);
1580 if (len > sizeof(buf))
1583 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1587 i915_cache_sharing_write(struct file *filp,
1588 const char __user *ubuf,
1592 struct drm_device *dev = filp->private_data;
1593 struct drm_i915_private *dev_priv = dev->dev_private;
1599 if (cnt > sizeof(buf) - 1)
1602 if (copy_from_user(buf, ubuf, cnt))
1606 val = simple_strtoul(buf, NULL, 0);
1609 if (val < 0 || val > 3)
1612 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1614 /* Update the cache sharing policy here as well */
1615 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1616 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1617 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1618 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1623 static const struct file_operations i915_cache_sharing_fops = {
1624 .owner = THIS_MODULE,
1625 .open = i915_cache_sharing_open,
1626 .read = i915_cache_sharing_read,
1627 .write = i915_cache_sharing_write,
1628 .llseek = default_llseek,
1631 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1632 * allocated we need to hook into the minor for release. */
1634 drm_add_fake_info_node(struct drm_minor *minor,
1638 struct drm_info_node *node;
1640 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1642 debugfs_remove(ent);
1646 node->minor = minor;
1648 node->info_ent = (void *) key;
1650 mutex_lock(&minor->debugfs_lock);
1651 list_add(&node->list, &minor->debugfs_list);
1652 mutex_unlock(&minor->debugfs_lock);
1657 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1659 struct drm_device *dev = minor->dev;
1662 ent = debugfs_create_file("i915_wedged",
1667 return PTR_ERR(ent);
1669 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1672 static int i915_forcewake_open(struct inode *inode, struct file *file)
1674 struct drm_device *dev = inode->i_private;
1675 struct drm_i915_private *dev_priv = dev->dev_private;
1678 if (INTEL_INFO(dev)->gen < 6)
1681 ret = mutex_lock_interruptible(&dev->struct_mutex);
1684 gen6_gt_force_wake_get(dev_priv);
1685 mutex_unlock(&dev->struct_mutex);
1690 int i915_forcewake_release(struct inode *inode, struct file *file)
1692 struct drm_device *dev = inode->i_private;
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1695 if (INTEL_INFO(dev)->gen < 6)
1699 * It's bad that we can potentially hang userspace if struct_mutex gets
1700 * forever stuck. However, if we cannot acquire this lock it means that
1701 * almost certainly the driver has hung, is not unload-able. Therefore
1702 * hanging here is probably a minor inconvenience not to be seen my
1703 * almost every user.
1705 mutex_lock(&dev->struct_mutex);
1706 gen6_gt_force_wake_put(dev_priv);
1707 mutex_unlock(&dev->struct_mutex);
1712 static const struct file_operations i915_forcewake_fops = {
1713 .owner = THIS_MODULE,
1714 .open = i915_forcewake_open,
1715 .release = i915_forcewake_release,
1718 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1720 struct drm_device *dev = minor->dev;
1723 ent = debugfs_create_file("i915_forcewake_user",
1726 &i915_forcewake_fops);
1728 return PTR_ERR(ent);
1730 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1733 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
1735 struct drm_device *dev = minor->dev;
1738 ent = debugfs_create_file("i915_max_freq",
1741 &i915_max_freq_fops);
1743 return PTR_ERR(ent);
1745 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
1748 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
1750 struct drm_device *dev = minor->dev;
1753 ent = debugfs_create_file("i915_cache_sharing",
1756 &i915_cache_sharing_fops);
1758 return PTR_ERR(ent);
1760 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
1763 static struct drm_info_list i915_debugfs_list[] = {
1764 {"i915_capabilities", i915_capabilities, 0},
1765 {"i915_gem_objects", i915_gem_object_info, 0},
1766 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1767 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1768 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1769 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1770 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1771 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1772 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1773 {"i915_gem_request", i915_gem_request_info, 0},
1774 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1775 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1776 {"i915_gem_interrupt", i915_interrupt_info, 0},
1777 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1778 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1779 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1780 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1781 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1782 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1783 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1784 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1785 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1786 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1787 {"i915_error_state", i915_error_state, 0},
1788 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1789 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1790 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1791 {"i915_inttoext_table", i915_inttoext_table, 0},
1792 {"i915_drpc_info", i915_drpc_info, 0},
1793 {"i915_emon_status", i915_emon_status, 0},
1794 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1795 {"i915_gfxec", i915_gfxec, 0},
1796 {"i915_fbc_status", i915_fbc_status, 0},
1797 {"i915_sr_status", i915_sr_status, 0},
1798 {"i915_opregion", i915_opregion, 0},
1799 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1800 {"i915_context_status", i915_context_status, 0},
1801 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1803 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1805 int i915_debugfs_init(struct drm_minor *minor)
1809 ret = i915_wedged_create(minor->debugfs_root, minor);
1813 ret = i915_forcewake_create(minor->debugfs_root, minor);
1816 ret = i915_max_freq_create(minor->debugfs_root, minor);
1819 ret = i915_cache_sharing_create(minor->debugfs_root, minor);
1823 return drm_debugfs_create_files(i915_debugfs_list,
1824 I915_DEBUGFS_ENTRIES,
1825 minor->debugfs_root, minor);
1828 void i915_debugfs_cleanup(struct drm_minor *minor)
1830 drm_debugfs_remove_files(i915_debugfs_list,
1831 I915_DEBUGFS_ENTRIES, minor);
1832 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1834 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1836 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1838 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1842 #endif /* CONFIG_DEBUG_FS */