2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <dev/drm2/drmP.h>
33 #include <dev/drm2/drm.h>
34 #include <dev/drm2/i915/i915_drm.h>
35 #include <dev/drm2/i915/i915_drv.h>
36 #include <dev/drm2/i915/intel_drv.h>
37 #include <dev/drm2/i915/intel_ringbuffer.h>
39 #include <sys/sysctl.h>
52 return (v ? "yes" : "no");
56 i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
58 const struct intel_device_info *info = INTEL_INFO(dev);
60 sbuf_printf(m, "gen: %d\n", info->gen);
61 if (HAS_PCH_SPLIT(dev))
62 sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
63 #define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x))
75 B(cursor_needs_physical);
77 B(overlay_needs_physical);
88 get_pin_flag(struct drm_i915_gem_object *obj)
90 if (obj->user_pin_count > 0)
92 else if (obj->pin_count > 0)
99 get_tiling_flag(struct drm_i915_gem_object *obj)
101 switch (obj->tiling_mode) {
103 case I915_TILING_NONE: return (" ");
104 case I915_TILING_X: return ("X");
105 case I915_TILING_Y: return ("Y");
110 cache_level_str(int type)
113 case I915_CACHE_NONE: return " uncached";
114 case I915_CACHE_LLC: return " snooped (LLC)";
115 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
116 default: return ("");
121 describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
124 sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
127 get_tiling_flag(obj),
128 obj->base.size / 1024,
129 obj->base.read_domains,
130 obj->base.write_domain,
131 obj->last_rendering_seqno,
132 obj->last_fenced_seqno,
133 cache_level_str(obj->cache_level),
134 obj->dirty ? " dirty" : "",
135 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
137 sbuf_printf(m, " (name: %d)", obj->base.name);
138 if (obj->fence_reg != I915_FENCE_REG_NONE)
139 sbuf_printf(m, " (fence: %d)", obj->fence_reg);
140 if (obj->gtt_space != NULL)
141 sbuf_printf(m, " (gtt offset: %08x, size: %08x)",
142 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
143 if (obj->pin_mappable || obj->fault_mappable) {
145 if (obj->pin_mappable)
147 if (obj->fault_mappable)
150 sbuf_printf(m, " (%s mappable)", s);
152 if (obj->ring != NULL)
153 sbuf_printf(m, " (%s)", obj->ring->name);
157 i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
159 uintptr_t list = (uintptr_t)data;
160 struct list_head *head;
161 drm_i915_private_t *dev_priv = dev->dev_private;
162 struct drm_i915_gem_object *obj;
163 size_t total_obj_size, total_gtt_size;
166 if (sx_xlock_sig(&dev->dev_struct_lock))
171 sbuf_printf(m, "Active:\n");
172 head = &dev_priv->mm.active_list;
175 sbuf_printf(m, "Inactive:\n");
176 head = &dev_priv->mm.inactive_list;
179 sbuf_printf(m, "Pinned:\n");
180 head = &dev_priv->mm.pinned_list;
183 sbuf_printf(m, "Flushing:\n");
184 head = &dev_priv->mm.flushing_list;
186 case DEFERRED_FREE_LIST:
187 sbuf_printf(m, "Deferred free:\n");
188 head = &dev_priv->mm.deferred_free_list;
195 total_obj_size = total_gtt_size = count = 0;
196 list_for_each_entry(obj, head, mm_list) {
198 describe_obj(m, obj);
199 sbuf_printf(m, "\n");
200 total_obj_size += obj->base.size;
201 total_gtt_size += obj->gtt_space->size;
206 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
207 count, total_obj_size, total_gtt_size);
211 #define count_objects(list, member) do { \
212 list_for_each_entry(obj, list, member) { \
213 size += obj->gtt_space->size; \
215 if (obj->map_and_fenceable) { \
216 mappable_size += obj->gtt_space->size; \
223 i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
225 struct drm_i915_private *dev_priv = dev->dev_private;
226 u32 count, mappable_count;
227 size_t size, mappable_size;
228 struct drm_i915_gem_object *obj;
230 if (sx_xlock_sig(&dev->dev_struct_lock))
232 sbuf_printf(m, "%u objects, %zu bytes\n",
233 dev_priv->mm.object_count,
234 dev_priv->mm.object_memory);
236 size = count = mappable_size = mappable_count = 0;
237 count_objects(&dev_priv->mm.gtt_list, gtt_list);
238 sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
239 count, mappable_count, size, mappable_size);
241 size = count = mappable_size = mappable_count = 0;
242 count_objects(&dev_priv->mm.active_list, mm_list);
243 count_objects(&dev_priv->mm.flushing_list, mm_list);
244 sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
245 count, mappable_count, size, mappable_size);
247 size = count = mappable_size = mappable_count = 0;
248 count_objects(&dev_priv->mm.pinned_list, mm_list);
249 sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
250 count, mappable_count, size, mappable_size);
252 size = count = mappable_size = mappable_count = 0;
253 count_objects(&dev_priv->mm.inactive_list, mm_list);
254 sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
255 count, mappable_count, size, mappable_size);
257 size = count = mappable_size = mappable_count = 0;
258 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
259 sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
260 count, mappable_count, size, mappable_size);
262 size = count = mappable_size = mappable_count = 0;
263 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
264 if (obj->fault_mappable) {
265 size += obj->gtt_space->size;
268 if (obj->pin_mappable) {
269 mappable_size += obj->gtt_space->size;
273 sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n",
274 mappable_count, mappable_size);
275 sbuf_printf(m, "%u fault mappable objects, %zu bytes\n",
278 sbuf_printf(m, "%zu [%zu] gtt total\n",
279 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
286 i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
288 struct drm_i915_private *dev_priv = dev->dev_private;
289 struct drm_i915_gem_object *obj;
290 size_t total_obj_size, total_gtt_size;
293 if (sx_xlock_sig(&dev->dev_struct_lock))
296 total_obj_size = total_gtt_size = count = 0;
297 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
299 describe_obj(m, obj);
300 sbuf_printf(m, "\n");
301 total_obj_size += obj->base.size;
302 total_gtt_size += obj->gtt_space->size;
308 sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
309 count, total_obj_size, total_gtt_size);
315 i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
317 struct intel_crtc *crtc;
318 struct drm_i915_gem_object *obj;
319 struct intel_unpin_work *work;
323 if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
325 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
326 pipe = pipe_name(crtc->pipe);
327 plane = plane_name(crtc->plane);
329 mtx_lock(&dev->event_lock);
330 work = crtc->unpin_work;
332 sbuf_printf(m, "No flip due on pipe %c (plane %c)\n",
335 if (!work->pending) {
336 sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n",
339 sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
342 if (work->enable_stall_check)
343 sbuf_printf(m, "Stall check enabled, ");
345 sbuf_printf(m, "Stall check waiting for page flip ioctl, ");
346 sbuf_printf(m, "%d prepares\n", work->pending);
348 if (work->old_fb_obj) {
349 obj = work->old_fb_obj;
351 sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
353 if (work->pending_flip_obj) {
354 obj = work->pending_flip_obj;
356 sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
359 mtx_unlock(&dev->event_lock);
366 i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
368 drm_i915_private_t *dev_priv = dev->dev_private;
369 struct drm_i915_gem_request *gem_request;
372 if (sx_xlock_sig(&dev->dev_struct_lock))
376 if (!list_empty(&dev_priv->rings[RCS].request_list)) {
377 sbuf_printf(m, "Render requests:\n");
378 list_for_each_entry(gem_request,
379 &dev_priv->rings[RCS].request_list,
381 sbuf_printf(m, " %d @ %d\n",
383 (int) (jiffies - gem_request->emitted_jiffies));
387 if (!list_empty(&dev_priv->rings[VCS].request_list)) {
388 sbuf_printf(m, "BSD requests:\n");
389 list_for_each_entry(gem_request,
390 &dev_priv->rings[VCS].request_list,
392 sbuf_printf(m, " %d @ %d\n",
394 (int) (jiffies - gem_request->emitted_jiffies));
398 if (!list_empty(&dev_priv->rings[BCS].request_list)) {
399 sbuf_printf(m, "BLT requests:\n");
400 list_for_each_entry(gem_request,
401 &dev_priv->rings[BCS].request_list,
403 sbuf_printf(m, " %d @ %d\n",
405 (int) (jiffies - gem_request->emitted_jiffies));
412 sbuf_printf(m, "No requests\n");
418 i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
420 if (ring->get_seqno) {
421 sbuf_printf(m, "Current sequence (%s): %d\n",
422 ring->name, ring->get_seqno(ring));
423 sbuf_printf(m, "Waiter sequence (%s): %d\n",
424 ring->name, ring->waiting_seqno);
425 sbuf_printf(m, "IRQ sequence (%s): %d\n",
426 ring->name, ring->irq_seqno);
431 i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
433 drm_i915_private_t *dev_priv = dev->dev_private;
436 if (sx_xlock_sig(&dev->dev_struct_lock))
438 for (i = 0; i < I915_NUM_RINGS; i++)
439 i915_ring_seqno_info(m, &dev_priv->rings[i]);
446 i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
448 drm_i915_private_t *dev_priv = dev->dev_private;
451 if (sx_xlock_sig(&dev->dev_struct_lock))
454 if (!HAS_PCH_SPLIT(dev)) {
455 sbuf_printf(m, "Interrupt enable: %08x\n",
457 sbuf_printf(m, "Interrupt identity: %08x\n",
459 sbuf_printf(m, "Interrupt mask: %08x\n",
462 sbuf_printf(m, "Pipe %c stat: %08x\n",
464 I915_READ(PIPESTAT(pipe)));
466 sbuf_printf(m, "North Display Interrupt enable: %08x\n",
468 sbuf_printf(m, "North Display Interrupt identity: %08x\n",
470 sbuf_printf(m, "North Display Interrupt mask: %08x\n",
472 sbuf_printf(m, "South Display Interrupt enable: %08x\n",
474 sbuf_printf(m, "South Display Interrupt identity: %08x\n",
476 sbuf_printf(m, "South Display Interrupt mask: %08x\n",
478 sbuf_printf(m, "Graphics Interrupt enable: %08x\n",
480 sbuf_printf(m, "Graphics Interrupt identity: %08x\n",
482 sbuf_printf(m, "Graphics Interrupt mask: %08x\n",
485 sbuf_printf(m, "Interrupts received: %d\n",
486 atomic_read(&dev_priv->irq_received));
487 for (i = 0; i < I915_NUM_RINGS; i++) {
488 if (IS_GEN6(dev) || IS_GEN7(dev)) {
489 sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n",
490 dev_priv->rings[i].name,
491 I915_READ_IMR(&dev_priv->rings[i]));
493 i915_ring_seqno_info(m, &dev_priv->rings[i]);
501 i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
503 drm_i915_private_t *dev_priv = dev->dev_private;
506 if (sx_xlock_sig(&dev->dev_struct_lock))
509 sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
510 sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
511 for (i = 0; i < dev_priv->num_fence_regs; i++) {
512 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
514 sbuf_printf(m, "Fenced object[%2d] = ", i);
516 sbuf_printf(m, "unused");
518 describe_obj(m, obj);
519 sbuf_printf(m, "\n");
527 i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
529 drm_i915_private_t *dev_priv = dev->dev_private;
530 struct intel_ring_buffer *ring;
531 const volatile u32 *hws;
534 ring = &dev_priv->rings[(uintptr_t)data];
535 hws = (volatile u32 *)ring->status_page.page_addr;
539 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
540 sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
542 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
548 i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data)
550 drm_i915_private_t *dev_priv = dev->dev_private;
551 struct intel_ring_buffer *ring;
553 if (sx_xlock_sig(&dev->dev_struct_lock))
555 ring = &dev_priv->rings[(uintptr_t)data];
557 sbuf_printf(m, "No ringbuffer setup\n");
559 u8 *virt = ring->virtual_start;
562 for (off = 0; off < ring->size; off += 4) {
563 uint32_t *ptr = (uint32_t *)(virt + off);
564 sbuf_printf(m, "%08x : %08x\n", off, *ptr);
572 i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
574 drm_i915_private_t *dev_priv = dev->dev_private;
575 struct intel_ring_buffer *ring;
577 ring = &dev_priv->rings[(uintptr_t)data];
581 if (sx_xlock_sig(&dev->dev_struct_lock))
584 sbuf_printf(m, "Ring %s:\n", ring->name);
585 sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
586 sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
587 sbuf_printf(m, " Size : %08x\n", ring->size);
588 sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
589 sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
590 if (IS_GEN6(dev) || IS_GEN7(dev)) {
591 sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
592 sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
594 sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
595 sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring));
606 case RCS: return (" render");
607 case VCS: return (" bsd");
608 case BCS: return (" blt");
609 default: return ("");
624 static const char *tiling_flag(int tiling)
628 case I915_TILING_NONE: return "";
629 case I915_TILING_X: return " X";
630 case I915_TILING_Y: return " Y";
634 static const char *dirty_flag(int dirty)
636 return dirty ? " dirty" : "";
639 static const char *purgeable_flag(int purgeable)
641 return purgeable ? " purgeable" : "";
644 static void print_error_buffers(struct sbuf *m, const char *name,
645 struct drm_i915_error_buffer *err, int count)
648 sbuf_printf(m, "%s [%d]:\n", name, count);
651 sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
657 pin_flag(err->pinned),
658 tiling_flag(err->tiling),
659 dirty_flag(err->dirty),
660 purgeable_flag(err->purgeable),
661 err->ring != -1 ? " " : "",
663 cache_level_str(err->cache_level));
666 sbuf_printf(m, " (name: %d)", err->name);
667 if (err->fence_reg != I915_FENCE_REG_NONE)
668 sbuf_printf(m, " (fence: %d)", err->fence_reg);
670 sbuf_printf(m, "\n");
676 i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
677 struct drm_i915_error_state *error, unsigned ring)
680 sbuf_printf(m, "%s command stream:\n", ring_str(ring));
681 sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
682 sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
683 sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
684 sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
685 sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
686 sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
687 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
688 sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
689 sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr);
691 if (INTEL_INFO(dev)->gen >= 4)
692 sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
693 sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
694 if (INTEL_INFO(dev)->gen >= 6) {
695 sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
696 sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
697 sbuf_printf(m, " SYNC_0: 0x%08x\n",
698 error->semaphore_mboxes[ring][0]);
699 sbuf_printf(m, " SYNC_1: 0x%08x\n",
700 error->semaphore_mboxes[ring][1]);
702 sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
703 sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
704 sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
707 static int i915_error_state(struct drm_device *dev, struct sbuf *m,
710 drm_i915_private_t *dev_priv = dev->dev_private;
711 struct drm_i915_error_state *error;
712 int i, j, page, offset, elt;
714 mtx_lock(&dev_priv->error_lock);
715 if (!dev_priv->first_error) {
716 sbuf_printf(m, "no error state collected\n");
720 error = dev_priv->first_error;
722 sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec,
723 (intmax_t)error->time.tv_usec);
724 sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
725 sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
726 sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
728 for (i = 0; i < dev_priv->num_fence_regs; i++)
729 sbuf_printf(m, " fence[%d] = %08jx\n", i,
730 (uintmax_t)error->fence[i]);
732 if (INTEL_INFO(dev)->gen >= 6) {
733 sbuf_printf(m, "ERROR: 0x%08x\n", error->error);
734 sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
737 i915_ring_error_state(m, dev, error, RCS);
739 i915_ring_error_state(m, dev, error, BCS);
741 i915_ring_error_state(m, dev, error, VCS);
743 if (error->active_bo)
744 print_error_buffers(m, "Active",
746 error->active_bo_count);
748 if (error->pinned_bo)
749 print_error_buffers(m, "Pinned",
751 error->pinned_bo_count);
753 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
754 struct drm_i915_error_object *obj;
756 if ((obj = error->ring[i].batchbuffer)) {
757 sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
758 dev_priv->rings[i].name,
761 for (page = 0; page < obj->page_count; page++) {
762 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
763 sbuf_printf(m, "%08x : %08x\n",
764 offset, obj->pages[page][elt]);
770 if (error->ring[i].num_requests) {
771 sbuf_printf(m, "%s --- %d requests\n",
772 dev_priv->rings[i].name,
773 error->ring[i].num_requests);
774 for (j = 0; j < error->ring[i].num_requests; j++) {
775 sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
776 error->ring[i].requests[j].seqno,
777 error->ring[i].requests[j].jiffies,
778 error->ring[i].requests[j].tail);
782 if ((obj = error->ring[i].ringbuffer)) {
783 sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n",
784 dev_priv->rings[i].name,
787 for (page = 0; page < obj->page_count; page++) {
788 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
789 sbuf_printf(m, "%08x : %08x\n",
791 obj->pages[page][elt]);
799 intel_overlay_print_error_state(m, error->overlay);
802 intel_display_print_error_state(m, dev, error->display);
805 mtx_unlock(&dev_priv->error_lock);
811 i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
813 drm_i915_private_t *dev_priv = dev->dev_private;
816 if (sx_xlock_sig(&dev->dev_struct_lock))
818 crstanddelay = I915_READ16(CRSTANDVID);
821 sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n",
822 (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
828 i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
830 drm_i915_private_t *dev_priv = dev->dev_private;
833 u16 rgvswctl = I915_READ16(MEMSWCTL);
834 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
836 sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
837 sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
838 sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
840 sbuf_printf(m, "Current P-state: %d\n",
841 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
842 } else if (IS_GEN6(dev)) {
843 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
844 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
845 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
847 u32 rpupei, rpcurup, rpprevup;
848 u32 rpdownei, rpcurdown, rpprevdown;
851 /* RPSTAT1 is in the GT power well */
852 if (sx_xlock_sig(&dev->dev_struct_lock))
854 gen6_gt_force_wake_get(dev_priv);
856 rpstat = I915_READ(GEN6_RPSTAT1);
857 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
858 rpcurup = I915_READ(GEN6_RP_CUR_UP);
859 rpprevup = I915_READ(GEN6_RP_PREV_UP);
860 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
861 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
862 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
864 gen6_gt_force_wake_put(dev_priv);
867 sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
868 sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
869 sbuf_printf(m, "Render p-state ratio: %d\n",
870 (gt_perf_status & 0xff00) >> 8);
871 sbuf_printf(m, "Render p-state VID: %d\n",
872 gt_perf_status & 0xff);
873 sbuf_printf(m, "Render p-state limit: %d\n",
874 rp_state_limits & 0xff);
875 sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
876 GEN6_CAGF_SHIFT) * 50);
877 sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei &
879 sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup &
880 GEN6_CURBSYTAVG_MASK);
881 sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup &
882 GEN6_CURBSYTAVG_MASK);
883 sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
885 sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
886 GEN6_CURBSYTAVG_MASK);
887 sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
888 GEN6_CURBSYTAVG_MASK);
890 max_freq = (rp_state_cap & 0xff0000) >> 16;
891 sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n",
894 max_freq = (rp_state_cap & 0xff00) >> 8;
895 sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n",
898 max_freq = rp_state_cap & 0xff;
899 sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
902 sbuf_printf(m, "no P-state info available\n");
909 i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
911 drm_i915_private_t *dev_priv = dev->dev_private;
915 if (sx_xlock_sig(&dev->dev_struct_lock))
917 for (i = 0; i < 16; i++) {
918 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
919 sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
920 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
929 return 1250 - (map * 25);
933 i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
935 drm_i915_private_t *dev_priv = dev->dev_private;
939 if (sx_xlock_sig(&dev->dev_struct_lock))
941 for (i = 1; i <= 32; i++) {
942 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
943 sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
951 ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
953 drm_i915_private_t *dev_priv = dev->dev_private;
958 if (sx_xlock_sig(&dev->dev_struct_lock))
960 rgvmodectl = I915_READ(MEMMODECTL);
961 rstdbyctl = I915_READ(RSTDBYCTL);
962 crstandvid = I915_READ16(CRSTANDVID);
965 sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
967 sbuf_printf(m, "Boost freq: %d\n",
968 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
969 MEMMODE_BOOST_FREQ_SHIFT);
970 sbuf_printf(m, "HW control enabled: %s\n",
971 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
972 sbuf_printf(m, "SW control enabled: %s\n",
973 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
974 sbuf_printf(m, "Gated voltage change: %s\n",
975 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
976 sbuf_printf(m, "Starting frequency: P%d\n",
977 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
978 sbuf_printf(m, "Max P-state: P%d\n",
979 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
980 sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
981 sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
982 sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
983 sbuf_printf(m, "Render standby enabled: %s\n",
984 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
985 sbuf_printf(m, "Current RS state: ");
986 switch (rstdbyctl & RSX_STATUS_MASK) {
988 sbuf_printf(m, "on\n");
991 sbuf_printf(m, "RC1\n");
993 case RSX_STATUS_RC1E:
994 sbuf_printf(m, "RC1E\n");
997 sbuf_printf(m, "RS1\n");
1000 sbuf_printf(m, "RS2 (RC6)\n");
1002 case RSX_STATUS_RS3:
1003 sbuf_printf(m, "RC3 (RC6+)\n");
1006 sbuf_printf(m, "unknown\n");
1014 gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
1016 drm_i915_private_t *dev_priv = dev->dev_private;
1017 u32 rpmodectl1, gt_core_status, rcctl1;
1018 unsigned forcewake_count;
1021 if (sx_xlock_sig(&dev->dev_struct_lock))
1024 mtx_lock(&dev_priv->gt_lock);
1025 forcewake_count = dev_priv->forcewake_count;
1026 mtx_unlock(&dev_priv->gt_lock);
1028 if (forcewake_count) {
1029 sbuf_printf(m, "RC information inaccurate because userspace "
1030 "holds a reference \n");
1032 /* NB: we cannot use forcewake, else we read the wrong values */
1033 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1035 sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1038 gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS);
1039 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1041 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1042 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1045 sbuf_printf(m, "Video Turbo Mode: %s\n",
1046 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1047 sbuf_printf(m, "HW control enabled: %s\n",
1048 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1049 sbuf_printf(m, "SW control enabled: %s\n",
1050 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1051 GEN6_RP_MEDIA_SW_MODE));
1052 sbuf_printf(m, "RC1e Enabled: %s\n",
1053 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1054 sbuf_printf(m, "RC6 Enabled: %s\n",
1055 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1056 sbuf_printf(m, "Deep RC6 Enabled: %s\n",
1057 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1058 sbuf_printf(m, "Deepest RC6 Enabled: %s\n",
1059 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1060 sbuf_printf(m, "Current RC state: ");
1061 switch (gt_core_status & GEN6_RCn_MASK) {
1063 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1064 sbuf_printf(m, "Core Power Down\n");
1066 sbuf_printf(m, "on\n");
1069 sbuf_printf(m, "RC3\n");
1072 sbuf_printf(m, "RC6\n");
1075 sbuf_printf(m, "RC7\n");
1078 sbuf_printf(m, "Unknown\n");
1082 sbuf_printf(m, "Core Power Down: %s\n",
1083 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1087 static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused)
1090 if (IS_GEN6(dev) || IS_GEN7(dev))
1091 return (gen6_drpc_info(dev, m));
1093 return (ironlake_drpc_info(dev, m));
1096 i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
1098 drm_i915_private_t *dev_priv = dev->dev_private;
1100 if (!I915_HAS_FBC(dev)) {
1101 sbuf_printf(m, "FBC unsupported on this chipset");
1105 if (intel_fbc_enabled(dev)) {
1106 sbuf_printf(m, "FBC enabled");
1108 sbuf_printf(m, "FBC disabled: ");
1109 switch (dev_priv->no_fbc_reason) {
1111 sbuf_printf(m, "no outputs");
1113 case FBC_STOLEN_TOO_SMALL:
1114 sbuf_printf(m, "not enough stolen memory");
1116 case FBC_UNSUPPORTED_MODE:
1117 sbuf_printf(m, "mode not supported");
1119 case FBC_MODE_TOO_LARGE:
1120 sbuf_printf(m, "mode too large");
1123 sbuf_printf(m, "FBC unsupported on plane");
1126 sbuf_printf(m, "scanout buffer not tiled");
1128 case FBC_MULTIPLE_PIPES:
1129 sbuf_printf(m, "multiple pipes are enabled");
1132 sbuf_printf(m, "unknown reason");
1139 i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
1141 drm_i915_private_t *dev_priv = dev->dev_private;
1142 bool sr_enabled = false;
1144 if (HAS_PCH_SPLIT(dev))
1145 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1146 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1147 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1148 else if (IS_I915GM(dev))
1149 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1150 else if (IS_PINEVIEW(dev))
1151 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1153 sbuf_printf(m, "self-refresh: %s",
1154 sr_enabled ? "enabled" : "disabled");
1159 static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
1162 drm_i915_private_t *dev_priv = dev->dev_private;
1163 int gpu_freq, ia_freq;
1165 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1166 sbuf_printf(m, "unsupported on this chipset");
1170 if (sx_xlock_sig(&dev->dev_struct_lock))
1173 sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1175 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1177 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1178 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1179 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1180 if (_intel_wait_for(dev,
1181 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
1183 DRM_ERROR("pcode read of freq table timed out\n");
1186 ia_freq = I915_READ(GEN6_PCODE_DATA);
1187 sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1196 i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
1198 drm_i915_private_t *dev_priv = dev->dev_private;
1199 unsigned long temp, chipset, gfx;
1201 if (!IS_GEN5(dev)) {
1202 sbuf_printf(m, "Not supported\n");
1206 if (sx_xlock_sig(&dev->dev_struct_lock))
1208 temp = i915_mch_val(dev_priv);
1209 chipset = i915_chipset_val(dev_priv);
1210 gfx = i915_gfx_val(dev_priv);
1213 sbuf_printf(m, "GMCH temp: %ld\n", temp);
1214 sbuf_printf(m, "Chipset power: %ld\n", chipset);
1215 sbuf_printf(m, "GFX power: %ld\n", gfx);
1216 sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
1222 i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
1224 drm_i915_private_t *dev_priv = dev->dev_private;
1226 if (sx_xlock_sig(&dev->dev_struct_lock))
1228 sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1236 i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
1238 drm_i915_private_t *dev_priv = dev->dev_private;
1239 struct intel_opregion *opregion = &dev_priv->opregion;
1241 if (sx_xlock_sig(&dev->dev_struct_lock))
1243 if (opregion->header)
1244 seq_write(m, opregion->header, OPREGION_SIZE);
1252 i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
1254 drm_i915_private_t *dev_priv = dev->dev_private;
1255 struct intel_fbdev *ifbdev;
1256 struct intel_framebuffer *fb;
1258 if (sx_xlock_sig(&dev->dev_struct_lock))
1261 ifbdev = dev_priv->fbdev;
1262 if (ifbdev == NULL) {
1266 fb = to_intel_framebuffer(ifbdev->helper.fb);
1268 sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1272 fb->base.bits_per_pixel);
1273 describe_obj(m, fb->obj);
1274 sbuf_printf(m, "\n");
1276 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1277 if (&fb->base == ifbdev->helper.fb)
1280 sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1284 fb->base.bits_per_pixel);
1285 describe_obj(m, fb->obj);
1286 sbuf_printf(m, "\n");
1295 i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
1297 drm_i915_private_t *dev_priv;
1300 if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
1303 dev_priv = dev->dev_private;
1304 ret = sx_xlock_sig(&dev->mode_config.mutex);
1308 if (dev_priv->pwrctx != NULL) {
1309 sbuf_printf(m, "power context ");
1310 describe_obj(m, dev_priv->pwrctx);
1311 sbuf_printf(m, "\n");
1314 if (dev_priv->renderctx != NULL) {
1315 sbuf_printf(m, "render context ");
1316 describe_obj(m, dev_priv->renderctx);
1317 sbuf_printf(m, "\n");
1320 sx_xunlock(&dev->mode_config.mutex);
1326 i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
1329 struct drm_i915_private *dev_priv;
1330 unsigned forcewake_count;
1332 dev_priv = dev->dev_private;
1333 mtx_lock(&dev_priv->gt_lock);
1334 forcewake_count = dev_priv->forcewake_count;
1335 mtx_unlock(&dev_priv->gt_lock);
1337 sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
1343 swizzle_string(unsigned swizzle)
1347 case I915_BIT_6_SWIZZLE_NONE:
1349 case I915_BIT_6_SWIZZLE_9:
1351 case I915_BIT_6_SWIZZLE_9_10:
1352 return "bit9/bit10";
1353 case I915_BIT_6_SWIZZLE_9_11:
1354 return "bit9/bit11";
1355 case I915_BIT_6_SWIZZLE_9_10_11:
1356 return "bit9/bit10/bit11";
1357 case I915_BIT_6_SWIZZLE_9_17:
1358 return "bit9/bit17";
1359 case I915_BIT_6_SWIZZLE_9_10_17:
1360 return "bit9/bit10/bit17";
1361 case I915_BIT_6_SWIZZLE_UNKNOWN:
1369 i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
1371 struct drm_i915_private *dev_priv;
1374 dev_priv = dev->dev_private;
1375 ret = sx_xlock_sig(&dev->dev_struct_lock);
1379 sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n",
1380 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1381 sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1382 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1384 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1385 sbuf_printf(m, "DDC = 0x%08x\n",
1387 sbuf_printf(m, "C0DRB3 = 0x%04x\n",
1388 I915_READ16(C0DRB3));
1389 sbuf_printf(m, "C1DRB3 = 0x%04x\n",
1390 I915_READ16(C1DRB3));
1391 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1392 sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1393 I915_READ(MAD_DIMM_C0));
1394 sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1395 I915_READ(MAD_DIMM_C1));
1396 sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1397 I915_READ(MAD_DIMM_C2));
1398 sbuf_printf(m, "TILECTL = 0x%08x\n",
1399 I915_READ(TILECTL));
1400 sbuf_printf(m, "ARB_MODE = 0x%08x\n",
1401 I915_READ(ARB_MODE));
1402 sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1403 I915_READ(DISP_ARB_CTL));
1411 i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
1413 struct drm_i915_private *dev_priv;
1414 struct intel_ring_buffer *ring;
1417 dev_priv = dev->dev_private;
1419 ret = sx_xlock_sig(&dev->dev_struct_lock);
1422 if (INTEL_INFO(dev)->gen == 6)
1423 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1425 for (i = 0; i < I915_NUM_RINGS; i++) {
1426 ring = &dev_priv->rings[i];
1428 sbuf_printf(m, "%s\n", ring->name);
1429 if (INTEL_INFO(dev)->gen == 7)
1430 sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1431 sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1432 sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1433 sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1435 if (dev_priv->mm.aliasing_ppgtt) {
1436 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1438 sbuf_printf(m, "aliasing PPGTT:\n");
1439 sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1441 sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1448 i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
1450 struct drm_device *dev;
1451 drm_i915_private_t *dev_priv;
1455 dev_priv = dev->dev_private;
1456 if (dev_priv == NULL)
1458 wedged = dev_priv->mm.wedged;
1459 error = sysctl_handle_int(oidp, &wedged, 0, req);
1460 if (error || !req->newptr)
1462 DRM_INFO("Manually setting wedged to %d\n", wedged);
1463 i915_handle_error(dev, wedged);
1468 i915_max_freq(SYSCTL_HANDLER_ARGS)
1470 struct drm_device *dev;
1471 drm_i915_private_t *dev_priv;
1472 int error, max_freq;
1475 dev_priv = dev->dev_private;
1476 if (dev_priv == NULL)
1478 max_freq = dev_priv->max_delay * 50;
1479 error = sysctl_handle_int(oidp, &max_freq, 0, req);
1480 if (error || !req->newptr)
1482 DRM_DEBUG("Manually setting max freq to %d\n", max_freq);
1484 * Turbo will still be enabled, but won't go above the set value.
1486 dev_priv->max_delay = max_freq / 50;
1487 gen6_set_rps(dev, max_freq / 50);
1492 i915_cache_sharing(SYSCTL_HANDLER_ARGS)
1494 struct drm_device *dev;
1495 drm_i915_private_t *dev_priv;
1496 int error, snpcr, cache_sharing;
1499 dev_priv = dev->dev_private;
1500 if (dev_priv == NULL)
1503 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1505 cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1506 error = sysctl_handle_int(oidp, &cache_sharing, 0, req);
1507 if (error || !req->newptr)
1509 if (cache_sharing < 0 || cache_sharing > 3)
1511 DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing);
1514 /* Update the cache sharing policy here as well */
1515 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1516 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1517 snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT);
1518 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1523 static struct i915_info_sysctl_list {
1525 int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data);
1528 } i915_info_sysctl_list[] = {
1529 {"i915_capabilities", i915_capabilities, 0},
1530 {"i915_gem_objects", i915_gem_object_info, 0},
1531 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1532 {"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST},
1533 {"i915_gem_flushing", i915_gem_object_list_info, 0,
1534 (void *)FLUSHING_LIST},
1535 {"i915_gem_inactive", i915_gem_object_list_info, 0,
1536 (void *)INACTIVE_LIST},
1537 {"i915_gem_pinned", i915_gem_object_list_info, 0,
1538 (void *)PINNED_LIST},
1539 {"i915_gem_deferred_free", i915_gem_object_list_info, 0,
1540 (void *)DEFERRED_FREE_LIST},
1541 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1542 {"i915_gem_request", i915_gem_request_info, 0},
1543 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1544 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1545 {"i915_gem_interrupt", i915_interrupt_info, 0},
1546 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1547 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1548 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1549 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1550 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1551 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1552 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1553 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1554 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1555 {"i915_error_state", i915_error_state, 0},
1556 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1557 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1558 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1559 {"i915_inttoext_table", i915_inttoext_table, 0},
1560 {"i915_drpc_info", i915_drpc_info, 0},
1561 {"i915_emon_status", i915_emon_status, 0},
1562 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1563 {"i915_gfxec", i915_gfxec, 0},
1564 {"i915_fbc_status", i915_fbc_status, 0},
1565 {"i915_sr_status", i915_sr_status, 0},
1567 {"i915_opregion", i915_opregion, 0},
1569 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1570 {"i915_context_status", i915_context_status, 0},
1571 {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0},
1572 {"i915_swizzle_info", i915_swizzle_info, 0},
1573 {"i915_ppgtt_info", i915_ppgtt_info, 0},
1576 struct i915_info_sysctl_thunk {
1577 struct drm_device *dev;
1583 i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
1586 struct i915_info_sysctl_thunk *thunk;
1587 struct drm_device *dev;
1588 drm_i915_private_t *dev_priv;
1593 dev_priv = dev->dev_private;
1594 if (dev_priv == NULL)
1596 error = sysctl_wire_old_buffer(req, 0);
1599 sbuf_new_for_sysctl(&m, NULL, 128, req);
1600 error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
1603 error = sbuf_finish(&m);
1608 extern int i915_gem_sync_exec_requests;
1609 extern int i915_fix_mi_batchbuffer_end;
1610 extern int i915_intr_pf;
1611 extern long i915_gem_wired_pages_cnt;
1614 i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1615 struct sysctl_oid *top)
1617 struct sysctl_oid *oid, *info;
1618 struct i915_info_sysctl_thunk *thunks;
1621 thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list),
1622 DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
1623 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
1624 thunks[i].dev = dev;
1626 thunks[i].arg = i915_info_sysctl_list[i].data;
1628 dev->sysctl_private = thunks;
1629 info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info",
1630 CTLFLAG_RW, NULL, NULL);
1633 for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
1634 oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
1635 i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD,
1636 &thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
1640 oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
1641 "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt,
1643 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged",
1644 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
1645 i915_debug_set_wedged, "I", NULL);
1648 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
1649 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq,
1653 oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1654 "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
1655 0, i915_cache_sharing, "I", NULL);
1658 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
1659 CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
1662 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
1663 CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
1666 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
1667 CTLFLAG_RW, &i915_intr_pf, 0, NULL);
1671 error = drm_add_busid_modesetting(dev, ctx, top);
1679 i915_sysctl_cleanup(struct drm_device *dev)
1682 free(dev->sysctl_private, DRM_MEM_DRIVER);