1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
36 #include <dev/agp/agp_i810.h>
37 #include <dev/drm2/drm_mm.h>
38 #include <dev/drm2/i915/i915_reg.h>
39 #include <dev/drm2/i915/intel_ringbuffer.h>
40 #include <dev/drm2/i915/intel_bios.h>
42 /* General customization:
45 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
47 #define DRIVER_NAME "i915"
48 #define DRIVER_DESC "Intel Graphics"
49 #define DRIVER_DATE "20080730"
51 MALLOC_DECLARE(DRM_I915_GEM);
59 #define pipe_name(p) ((p) + 'A')
60 #define I915_NUM_PIPE 2
67 #define plane_name(p) ((p) + 'A')
77 #define port_name(p) ((p) + 'A')
79 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
82 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
84 struct intel_pch_pll {
85 int refcount; /* count of number of CRTCs sharing this PLL */
86 int active; /* count of number of active CRTCs (i.e. DPMS on) */
87 bool on; /* is the PLL actually active? Disabled during modeset */
92 #define I915_NUM_PLLS 2
97 * 1.2: Add Power Management
98 * 1.3: Add vblank support
99 * 1.4: Fix cmdbuffer path, add heap destroy
100 * 1.5: Add vblank pipe configuration
101 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
102 * - Support vertical blank on secondary display pipe
104 #define DRIVER_MAJOR 1
105 #define DRIVER_MINOR 6
106 #define DRIVER_PATCHLEVEL 0
108 #define WATCH_COHERENCY 0
112 #define WATCH_RELOC 0
113 #define WATCH_INACTIVE 0
114 #define WATCH_PWRITE 0
116 #define I915_GEM_PHYS_CURSOR_0 1
117 #define I915_GEM_PHYS_CURSOR_1 2
118 #define I915_GEM_PHYS_OVERLAY_REGS 3
119 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
121 struct drm_i915_gem_phys_object {
123 drm_dma_handle_t *handle;
124 struct drm_i915_gem_object *cur_obj;
127 struct drm_i915_private;
129 struct drm_i915_display_funcs {
130 void (*dpms)(struct drm_crtc *crtc, int mode);
131 bool (*fbc_enabled)(struct drm_device *dev);
132 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
133 void (*disable_fbc)(struct drm_device *dev);
134 int (*get_display_clock_speed)(struct drm_device *dev);
135 int (*get_fifo_size)(struct drm_device *dev, int plane);
136 void (*update_wm)(struct drm_device *dev);
137 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
138 uint32_t sprite_width, int pixel_size);
139 void (*sanitize_pm)(struct drm_device *dev);
140 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
141 struct drm_display_mode *mode);
142 int (*crtc_mode_set)(struct drm_crtc *crtc,
143 struct drm_display_mode *mode,
144 struct drm_display_mode *adjusted_mode,
146 struct drm_framebuffer *old_fb);
147 void (*off)(struct drm_crtc *crtc);
148 void (*write_eld)(struct drm_connector *connector,
149 struct drm_crtc *crtc);
150 void (*fdi_link_train)(struct drm_crtc *crtc);
151 void (*init_clock_gating)(struct drm_device *dev);
152 void (*init_pch_clock_gating)(struct drm_device *dev);
153 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
154 struct drm_framebuffer *fb,
155 struct drm_i915_gem_object *obj);
156 void (*force_wake_get)(struct drm_i915_private *dev_priv);
157 void (*force_wake_put)(struct drm_i915_private *dev_priv);
158 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
160 /* clock updates for mode set */
162 /* render clock increase/decrease */
163 /* display clock increase/decrease */
164 /* pll clock increase/decrease */
167 struct intel_device_info {
187 u8 cursor_needs_physical:1;
189 u8 overlay_needs_physical:1;
196 #define I915_PPGTT_PD_ENTRIES 512
197 #define I915_PPGTT_PT_ENTRIES 1024
198 struct i915_hw_ppgtt {
199 unsigned num_pd_entries;
202 vm_paddr_t *pt_dma_addr;
203 vm_paddr_t scratch_page_dma_addr;
207 /* This must match up with the value previously used for execbuf2.rsvd1. */
208 #define DEFAULT_CONTEXT_ID 0
209 struct i915_hw_context {
212 struct drm_i915_file_private *file_priv;
213 struct intel_ring_buffer *ring;
214 struct drm_i915_gem_object *obj;
218 FBC_NO_OUTPUT, /* no outputs enabled to compress */
219 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
220 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
221 FBC_MODE_TOO_LARGE, /* mode too large for compression */
222 FBC_BAD_PLANE, /* fbc not supported on plane */
223 FBC_NOT_TILED, /* buffer not tiled */
224 FBC_MULTIPLE_PIPES, /* more than one pipe active */
229 struct mem_block *next;
230 struct mem_block *prev;
233 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
236 struct opregion_header;
237 struct opregion_acpi;
238 struct opregion_swsci;
239 struct opregion_asle;
241 struct intel_opregion {
242 struct opregion_header *header;
243 struct opregion_acpi *acpi;
244 struct opregion_swsci *swsci;
245 struct opregion_asle *asle;
249 #define OPREGION_SIZE (8*1024)
251 struct drm_i915_master_private {
252 drm_local_map_t *sarea;
253 struct _drm_i915_sarea *sarea_priv;
255 #define I915_FENCE_REG_NONE -1
256 #define I915_MAX_NUM_FENCES 16
257 /* 16 fences + sign bit for FENCE_REG_NONE */
258 #define I915_MAX_NUM_FENCE_BITS 5
260 struct drm_i915_fence_reg {
261 struct list_head lru_list;
262 struct drm_i915_gem_object *obj;
266 struct sdvo_device_mapping {
276 PCH_IBX, /* Ibexpeak PCH */
277 PCH_CPT, /* Cougarpoint PCH */
278 PCH_LPT, /* Lynxpoint PCH */
281 #define QUIRK_PIPEA_FORCE (1<<0)
282 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
283 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
286 struct intel_fbc_work;
288 typedef struct drm_i915_private {
289 struct drm_device *dev;
291 device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
292 device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
293 device_t gmbus[GMBUS_NUM_PORTS + 1];
294 device_t bbbus[GMBUS_NUM_PORTS + 1];
295 /** gmbus_sx protects against concurrent usage of the single hw gmbus
296 * controller on different i2c buses. */
298 uint32_t gpio_mmio_base;
300 int relative_constants_mode;
302 drm_local_map_t *mmio_map;
304 /** gt_fifo_count and the subsequent register write are synchronized
305 * with dev->struct_mutex. */
306 unsigned gt_fifo_count;
307 /** forcewake_count is protected by gt_lock */
308 unsigned forcewake_count;
309 /** gt_lock is also taken in irq contexts. */
312 /* drm_i915_ring_buffer_t ring; */
313 struct intel_ring_buffer rings[I915_NUM_RINGS];
316 drm_dma_handle_t *status_page_dmah;
317 void *hw_status_page;
318 dma_addr_t dma_status_page;
320 unsigned int status_gfx_addr;
321 struct drm_gem_object *hws_obj;
323 struct drm_i915_gem_object *pwrctx;
324 struct drm_i915_gem_object *renderctx;
332 atomic_t irq_received;
335 /** Cached value of IER to avoid reads in updating the bitfield */
342 struct mtx dpio_lock;
344 u32 hotplug_supported_mask;
346 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
350 /* For hangcheck timer */
351 #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
353 uint32_t last_acthd[I915_NUM_RINGS];
354 uint32_t last_instdone;
355 uint32_t last_instdone1;
357 unsigned int stop_rings;
359 struct intel_opregion opregion;
363 struct intel_overlay *overlay;
364 bool sprite_scaling_enabled;
367 int backlight_level; /* restore backlight to this value */
368 bool backlight_enabled;
369 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
370 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
372 /* Feature bits from the VBIOS */
373 unsigned int int_tv_support:1;
374 unsigned int lvds_dither:1;
375 unsigned int lvds_vbt:1;
376 unsigned int int_crt_support:1;
377 unsigned int lvds_use_ssc:1;
378 unsigned int display_clock_mode:1;
380 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
381 unsigned int lvds_val; /* used for checking LVDS channel mode */
391 struct edp_power_seq pps;
393 bool no_aux_handshake;
396 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
397 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
398 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
400 /* PCH chipset type */
401 enum intel_pch pch_type;
403 /* Display functions */
404 struct drm_i915_display_funcs display;
406 unsigned long quirks;
431 u32 saveTRANS_HTOTAL_A;
432 u32 saveTRANS_HBLANK_A;
433 u32 saveTRANS_HSYNC_A;
434 u32 saveTRANS_VTOTAL_A;
435 u32 saveTRANS_VBLANK_A;
436 u32 saveTRANS_VSYNC_A;
444 u32 savePFIT_PGM_RATIOS;
445 u32 saveBLC_HIST_CTL;
447 u32 saveBLC_PWM_CTL2;
448 u32 saveBLC_CPU_PWM_CTL;
449 u32 saveBLC_CPU_PWM_CTL2;
462 u32 saveTRANS_HTOTAL_B;
463 u32 saveTRANS_HBLANK_B;
464 u32 saveTRANS_HSYNC_B;
465 u32 saveTRANS_VTOTAL_B;
466 u32 saveTRANS_VBLANK_B;
467 u32 saveTRANS_VSYNC_B;
481 u32 savePP_ON_DELAYS;
482 u32 savePP_OFF_DELAYS;
490 u32 savePFIT_CONTROL;
491 u32 save_palette_a[256];
492 u32 save_palette_b[256];
493 u32 saveDPFC_CB_BASE;
494 u32 saveFBC_CFB_BASE;
497 u32 saveFBC_CONTROL2;
507 u32 saveCACHE_MODE_0;
508 u32 saveMI_ARB_STATE;
519 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
530 u32 savePIPEA_GMCH_DATA_M;
531 u32 savePIPEB_GMCH_DATA_M;
532 u32 savePIPEA_GMCH_DATA_N;
533 u32 savePIPEB_GMCH_DATA_N;
534 u32 savePIPEA_DP_LINK_M;
535 u32 savePIPEB_DP_LINK_M;
536 u32 savePIPEA_DP_LINK_N;
537 u32 savePIPEB_DP_LINK_N;
548 u32 savePCH_DREF_CONTROL;
549 u32 saveDISP_ARB_CTL;
550 u32 savePIPEA_DATA_M1;
551 u32 savePIPEA_DATA_N1;
552 u32 savePIPEA_LINK_M1;
553 u32 savePIPEA_LINK_N1;
554 u32 savePIPEB_DATA_M1;
555 u32 savePIPEB_DATA_N1;
556 u32 savePIPEB_LINK_M1;
557 u32 savePIPEB_LINK_N1;
558 u32 saveMCHBAR_RENDER_STANDBY;
559 u32 savePCH_PORT_HOTPLUG;
562 /** Memory allocator for GTT stolen memory */
563 struct drm_mm stolen;
564 /** Memory allocator for GTT */
565 struct drm_mm gtt_space;
566 /** List of all objects in gtt_space. Used to restore gtt
567 * mappings on resume */
568 struct list_head gtt_list;
570 /** Usable portion of the GTT for GEM */
571 unsigned long gtt_start;
572 unsigned long gtt_mappable_end;
573 unsigned long gtt_end;
575 /** PPGTT used for aliasing the PPGTT with the GTT */
576 struct i915_hw_ppgtt *aliasing_ppgtt;
579 * List of objects currently involved in rendering from the
582 * Includes buffers having the contents of their GPU caches
583 * flushed, not necessarily primitives. last_rendering_seqno
584 * represents when the rendering involved will be completed.
586 * A reference is held on the buffer while on this list.
588 struct list_head active_list;
591 * List of objects which are not in the ringbuffer but which
592 * still have a write_domain which needs to be flushed before
595 * A reference is held on the buffer while on this list.
597 struct list_head flushing_list;
600 * LRU list of objects which are not in the ringbuffer and
601 * are ready to unbind, but are still in the GTT.
603 * last_rendering_seqno is 0 while an object is in this list.
605 * A reference is not held on the buffer while on this list,
606 * as merely being GTT-bound shouldn't prevent its being
607 * freed, and we'll pull it off the list in the free path.
609 struct list_head inactive_list;
611 /** LRU list of objects with fence regs on them. */
612 struct list_head fence_list;
615 * We leave the user IRQ off as much as possible,
616 * but this means that requests will finish and never
617 * be retired once the system goes idle. Set a timer to
618 * fire periodically while the ring is running. When it
619 * fires, go retire requests.
621 struct timeout_task retire_task;
624 * Are we in a non-interruptible section of code like
629 uint32_t next_gem_seqno;
632 * Waiting sequence number, if any
634 uint32_t waiting_gem_seqno;
637 * Last seq seen at irq time
639 uint32_t irq_gem_seqno;
642 * Flag if the X Server, and thus DRM, is not currently in
643 * control of the device.
645 * This is set between LeaveVT and EnterVT. It needs to be
646 * replaced with a semaphore. It also needs to be
647 * transitioned away from for kernel modesetting.
652 * Flag if the hardware appears to be wedged.
654 * This is set when attempts to idle the device timeout.
655 * It prevents command submission from occuring and makes
656 * every pending request fail
660 /** Bit 6 swizzling required for X tiling */
661 uint32_t bit_6_swizzle_x;
662 /** Bit 6 swizzling required for Y tiling */
663 uint32_t bit_6_swizzle_y;
665 /* storage for physical objects */
666 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
668 /* accounting, useful for userland debugging */
670 size_t mappable_gtt_total;
671 size_t object_memory;
674 struct intel_gtt gtt;
675 eventhandler_tag i915_lowmem;
678 const struct intel_device_info *info;
680 /* Old dri1 support infrastructure, beware the dragons ya fools entering
683 unsigned allow_batchbuffer : 1;
684 u32 *gfx_hws_cpu_addr;
687 /* Kernel Modesetting */
689 struct sdvo_device_mapping sdvo_mappings[2];
690 /* indicate whether the LVDS_BORDER should be enabled or not */
691 unsigned int lvds_border_bits;
692 /* Panel fitter placement and size for Ironlake+ */
693 u32 pch_pf_pos, pch_pf_size;
695 struct drm_crtc *plane_to_crtc_mapping[3];
696 struct drm_crtc *pipe_to_crtc_mapping[3];
697 /* wait_queue_head_t pending_flip_queue; XXXKIB */
699 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
701 /* Reclocking support */
702 bool render_reclock_avail;
703 bool lvds_downclock_avail;
704 /* indicates the reduced downclock for LVDS*/
706 struct task idle_task;
707 struct callout idle_callout;
711 struct child_device_config *child_dev;
712 struct drm_connector *int_lvds_connector;
713 struct drm_connector *int_edp_connector;
716 bool mchbar_need_disable;
718 struct resource *mch_res;
722 struct task rps_task;
731 unsigned long last_time1;
732 unsigned long chipset_power;
734 struct timespec last_time2;
735 unsigned long gfx_power;
739 struct mtx *mchdev_lock;
741 enum no_fbc_reason no_fbc_reason;
743 struct drm_mm_node *compressed_fb;
744 struct drm_mm_node *compressed_llb;
746 unsigned long cfb_size;
750 struct intel_fbc_work *fbc_work;
752 unsigned int fsb_freq, mem_freq, is_ddr3;
754 struct taskqueue *tq;
755 struct task error_task;
756 struct task hotplug_task;
757 int error_completion;
758 struct mtx error_completion_lock;
759 /* Protected by dev->error_lock. */
760 struct drm_i915_error_state *first_error;
761 struct mtx error_lock;
762 struct callout hangcheck_timer;
764 unsigned long last_gpu_reset;
766 struct intel_fbdev *fbdev;
768 struct drm_property *broadcast_rgb_property;
769 struct drm_property *force_audio_property;
771 bool hw_contexts_disabled;
772 uint32_t hw_context_size;
773 } drm_i915_private_t;
775 /* Iterate over initialised rings */
776 #define for_each_ring(ring__, dev_priv__, i__) \
777 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
778 if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
780 enum hdmi_force_audio {
781 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
782 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
783 HDMI_AUDIO_AUTO, /* trust EDID */
784 HDMI_AUDIO_ON, /* force turn on HDMI audio */
787 enum i915_cache_level {
790 I915_CACHE_LLC_MLC, /* gen6+ */
793 enum intel_chip_family {
800 /** driver private structure attached to each drm_gem_object */
801 struct drm_i915_gem_object {
802 struct drm_gem_object base;
804 /** Current space allocated to this object in the GTT, if any. */
805 struct drm_mm_node *gtt_space;
806 struct list_head gtt_list;
807 /** This object's place on the active/flushing/inactive lists */
808 struct list_head ring_list;
809 struct list_head mm_list;
810 /** This object's place on GPU write list */
811 struct list_head gpu_write_list;
812 /** This object's place in the batchbuffer or on the eviction list */
813 struct list_head exec_list;
816 * This is set if the object is on the active or flushing lists
817 * (has pending rendering), and is not set if it's on inactive (ready
820 unsigned int active:1;
823 * This is set if the object has been written to since last bound
826 unsigned int dirty:1;
829 * This is set if the object has been written to since the last
832 unsigned int pending_gpu_write:1;
835 * Fence register bits (if any) for this object. Will be set
836 * as needed when mapped into the GTT.
837 * Protected by dev->struct_mutex.
839 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
842 * Advice: are the backing pages purgeable?
847 * Current tiling mode for the object.
849 unsigned int tiling_mode:2;
851 * Whether the tiling parameters for the currently associated fence
852 * register have changed. Note that for the purposes of tracking
853 * tiling changes we also treat the unfenced register, the register
854 * slot that the object occupies whilst it executes a fenced
855 * command (such as BLT on gen2/3), as a "fence".
857 unsigned int fence_dirty:1;
859 /** How many users have pinned this object in GTT space. The following
860 * users can each hold at most one reference: pwrite/pread, pin_ioctl
861 * (via user_pin_count), execbuffer (objects are not allowed multiple
862 * times for the same batchbuffer), and the framebuffer code. When
863 * switching/pageflipping, the framebuffer code has at most two buffers
866 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
867 * bits with absolutely no headroom. So use 4 bits. */
868 unsigned int pin_count:4;
869 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
872 * Is the object at the current location in the gtt mappable and
873 * fenceable? Used to avoid costly recalculations.
875 unsigned int map_and_fenceable:1;
878 * Whether the current gtt mapping needs to be mappable (and isn't just
879 * mappable by accident). Track pin and fault separate for a more
880 * accurate mappable working set.
882 unsigned int fault_mappable:1;
883 unsigned int pin_mappable:1;
884 unsigned int pin_display:1;
887 * Is the GPU currently using a fence to access this buffer,
889 unsigned int pending_fenced_gpu_access:1;
890 unsigned int fenced_gpu_access:1;
892 unsigned int cache_level:2;
894 unsigned int has_aliasing_ppgtt_mapping:1;
895 unsigned int has_global_gtt_mapping:1;
903 struct sglist *sg_list;
906 * Used for performing relocations during execbuffer insertion.
908 LIST_ENTRY(drm_i915_gem_object) exec_node;
909 unsigned long exec_handle;
910 struct drm_i915_gem_exec_object2 *exec_entry;
913 * Current offset of the object in GTT space.
915 * This is the same as gtt_space->start
919 struct intel_ring_buffer *ring;
921 /** Breadcrumb of last rendering to the buffer. */
922 uint32_t last_rendering_seqno;
923 /** Breadcrumb of last fenced GPU access to the buffer. */
924 uint32_t last_fenced_seqno;
926 /** Current tiling stride for the object, if it's tiled. */
929 /** Record of address bit 17 of each page at last unbind. */
930 unsigned long *bit_17;
932 /** User space pin count and filp owning the pin */
933 uint32_t user_pin_count;
934 struct drm_file *pin_filp;
936 /** for phy allocated objects */
937 struct drm_i915_gem_phys_object *phys_obj;
940 * Number of crtcs where this object is currently the fb, but
941 * will be page flipped away on the next vblank. When it
942 * reaches 0, dev_priv->pending_flip_queue will be woken up.
947 #define to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base)
950 * Request queue structure.
952 * The request queue allows us to note sequence numbers that have been emitted
953 * and may be associated with active buffers to be retired.
955 * By keeping this list, we can avoid having to do questionable
956 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
957 * an emission time with seqnos for tracking how far ahead of the GPU we are.
959 struct drm_i915_gem_request {
960 /** On Which ring this request was generated */
961 struct intel_ring_buffer *ring;
963 /** GEM sequence number associated with this request. */
966 /** Postion in the ringbuffer of the end of the request */
969 /** Time at which this request was emitted, in jiffies. */
970 unsigned long emitted_jiffies;
972 /** global list entry for this request */
973 struct list_head list;
975 struct drm_i915_file_private *file_priv;
976 /** file_priv list entry for this request */
977 struct list_head client_list;
980 struct drm_i915_file_private {
982 struct list_head request_list;
985 struct drm_gem_names context_idr;
988 struct drm_i915_error_state {
993 bool waiting[I915_NUM_RINGS];
994 u32 pipestat[I915_MAX_PIPES];
995 u32 tail[I915_NUM_RINGS];
996 u32 head[I915_NUM_RINGS];
997 u32 ipeir[I915_NUM_RINGS];
998 u32 ipehr[I915_NUM_RINGS];
999 u32 instdone[I915_NUM_RINGS];
1000 u32 acthd[I915_NUM_RINGS];
1001 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
1002 /* our own tracking of ring head and tail */
1003 u32 cpu_ring_head[I915_NUM_RINGS];
1004 u32 cpu_ring_tail[I915_NUM_RINGS];
1005 u32 error; /* gen6+ */
1006 u32 instpm[I915_NUM_RINGS];
1007 u32 instps[I915_NUM_RINGS];
1009 u32 seqno[I915_NUM_RINGS];
1011 u32 fault_reg[I915_NUM_RINGS];
1013 u32 faddr[I915_NUM_RINGS];
1014 u64 fence[I915_MAX_NUM_FENCES];
1015 struct timeval time;
1016 struct drm_i915_error_ring {
1017 struct drm_i915_error_object {
1021 } *ringbuffer, *batchbuffer;
1022 struct drm_i915_error_request {
1028 } ring[I915_NUM_RINGS];
1029 struct drm_i915_error_buffer {
1036 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
1043 } *active_bo, *pinned_bo;
1044 u32 active_bo_count, pinned_bo_count;
1045 struct intel_overlay_error_state *overlay;
1046 struct intel_display_error_state *display;
1050 * RC6 is a special power stage which allows the GPU to enter an very
1051 * low-voltage mode when idle, using down to 0V while at this stage. This
1052 * stage is entered automatically when the GPU is idle when RC6 support is
1053 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1055 * There are different RC6 modes available in Intel GPU, which differentiate
1056 * among each other with the latency required to enter and leave RC6 and
1057 * voltage consumed by the GPU in different states.
1059 * The combination of the following flags define which states GPU is allowed
1060 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1061 * RC6pp is deepest RC6. Their support by hardware varies according to the
1062 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1063 * which brings the most power savings; deeper states save more power, but
1064 * require higher latency to switch to and wake up.
1066 #define INTEL_RC6_ENABLE (1<<0)
1067 #define INTEL_RC6p_ENABLE (1<<1)
1068 #define INTEL_RC6pp_ENABLE (1<<2)
1070 extern int intel_iommu_enabled;
1071 extern struct drm_ioctl_desc i915_ioctls[];
1072 extern struct drm_driver i915_driver_info;
1073 extern struct cdev_pager_ops i915_gem_pager_ops;
1074 extern unsigned int i915_fbpercrtc;
1075 extern int i915_panel_ignore_lid;
1076 extern int i915_panel_invert_brightness;
1077 extern unsigned int i915_powersave;
1078 extern int i915_prefault_disable;
1079 extern int i915_semaphores;
1080 extern unsigned int i915_lvds_downclock;
1081 extern int i915_lvds_channel_mode;
1082 extern int i915_panel_use_ssc;
1083 extern int i915_vbt_sdvo_panel_type;
1084 extern int i915_enable_rc6;
1085 extern int i915_enable_fbc;
1086 extern int i915_enable_ppgtt;
1087 extern int i915_enable_hangcheck;
1089 const struct intel_device_info *i915_get_device_id(int device);
1091 int i915_reset(struct drm_device *dev);
1092 extern int intel_gpu_reset(struct drm_device *dev);
1095 int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1096 struct sysctl_oid *top);
1097 void i915_sysctl_cleanup(struct drm_device *dev);
1099 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1100 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1103 int i915_batchbuffer(struct drm_device *dev, void *data,
1104 struct drm_file *file_priv);
1105 int i915_cmdbuffer(struct drm_device *dev, void *data,
1106 struct drm_file *file_priv);
1107 int i915_getparam(struct drm_device *dev, void *data,
1108 struct drm_file *file_priv);
1109 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1110 extern void i915_kernel_lost_context(struct drm_device * dev);
1111 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1112 extern int i915_driver_unload(struct drm_device *);
1113 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1114 extern void i915_driver_lastclose(struct drm_device * dev);
1115 extern void i915_driver_preclose(struct drm_device *dev,
1116 struct drm_file *file_priv);
1117 extern void i915_driver_postclose(struct drm_device *dev,
1118 struct drm_file *file_priv);
1119 extern int i915_driver_device_is_agp(struct drm_device * dev);
1120 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1122 extern int i915_emit_box(struct drm_device *dev,
1123 struct drm_clip_rect __user *boxes,
1124 int i, int DR1, int DR4);
1125 int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
1128 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1129 unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1130 void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1131 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1132 unsigned long i915_read_mch_val(void);
1133 bool i915_gpu_raise(void);
1134 bool i915_gpu_lower(void);
1135 bool i915_gpu_busy(void);
1136 bool i915_gpu_turbo_disable(void);
1139 extern int i915_irq_emit(struct drm_device *dev, void *data,
1140 struct drm_file *file_priv);
1141 extern void intel_irq_init(struct drm_device *dev);
1143 void intel_enable_asle(struct drm_device *dev);
1144 void i915_hangcheck_elapsed(void *context);
1145 void i915_handle_error(struct drm_device *dev, bool wedged);
1146 void i915_error_state_free(struct drm_i915_error_state *error);
1148 void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1149 void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1151 void i915_destroy_error_state(struct drm_device *dev);
1154 int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
1155 uint32_t *handle_p);
1156 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1157 struct drm_file *file_priv);
1158 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1159 struct drm_file *file_priv);
1160 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1161 struct drm_file *file_priv);
1162 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1163 struct drm_file *file_priv);
1164 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1165 struct drm_file *file_priv);
1166 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1167 struct drm_file *file_priv);
1168 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1169 struct drm_file *file_priv);
1170 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1171 struct drm_file *file_priv);
1172 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1173 struct drm_file *file_priv);
1174 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1175 struct drm_file *file_priv);
1176 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1177 struct drm_file *file_priv);
1178 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1179 struct drm_file *file_priv);
1180 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1181 struct drm_file *file_priv);
1182 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv);
1184 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1185 struct drm_file *file_priv);
1186 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1187 struct drm_file *file_priv);
1188 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1189 struct drm_file *file_priv);
1190 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1191 struct drm_file *file_priv);
1192 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1193 struct drm_file *file_priv);
1194 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1195 struct drm_file *file_priv);
1196 void i915_gem_load(struct drm_device *dev);
1197 void i915_gem_unload(struct drm_device *dev);
1198 int i915_gem_init_object(struct drm_gem_object *obj);
1199 void i915_gem_free_object(struct drm_gem_object *obj);
1200 int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
1201 bool map_and_fenceable);
1202 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1203 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1204 void i915_gem_lastclose(struct drm_device *dev);
1205 uint32_t i915_get_gem_seqno(struct drm_device *dev);
1208 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1210 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1211 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1212 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1219 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1221 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1222 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1223 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1227 void i915_gem_retire_requests(struct drm_device *dev);
1228 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1229 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1230 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1232 uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1233 uint32_t size, int tiling_mode);
1234 int i915_mutex_lock_interruptible(struct drm_device *dev);
1235 int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1237 int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
1239 int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1240 u32 alignment, struct intel_ring_buffer *pipelined);
1241 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1242 int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1243 int i915_gem_flush_ring(struct intel_ring_buffer *ring,
1244 uint32_t invalidate_domains, uint32_t flush_domains);
1245 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1246 int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1247 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1248 struct intel_ring_buffer *to);
1249 int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1250 int i915_gem_idle(struct drm_device *dev);
1251 int i915_gem_init(struct drm_device *dev);
1252 int i915_gem_init_hw(struct drm_device *dev);
1253 void i915_gem_init_swizzling(struct drm_device *dev);
1254 void i915_gem_init_ppgtt(struct drm_device *dev);
1255 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1256 int i915_gpu_idle(struct drm_device *dev);
1257 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1258 struct intel_ring_buffer *ring, uint32_t seqno);
1259 int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
1260 struct drm_i915_gem_request *request);
1261 int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1262 void i915_gem_reset(struct drm_device *dev);
1263 int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
1264 int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
1265 int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
1267 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1268 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1269 enum i915_cache_level cache_level);
1271 /* i915_gem_context.c */
1272 void i915_gem_context_init(struct drm_device *dev);
1273 void i915_gem_context_fini(struct drm_device *dev);
1274 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1275 int i915_switch_context(struct intel_ring_buffer *ring,
1276 struct drm_file *file, int to_id);
1277 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1278 struct drm_file *file);
1279 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1280 struct drm_file *file);
1282 void i915_gem_free_all_phys_object(struct drm_device *dev);
1283 void i915_gem_detach_phys_object(struct drm_device *dev,
1284 struct drm_i915_gem_object *obj);
1285 int i915_gem_attach_phys_object(struct drm_device *dev,
1286 struct drm_i915_gem_object *obj, int id, int align);
1288 int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1289 struct drm_mode_create_dumb *args);
1290 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1291 uint32_t handle, uint64_t *offset);
1292 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1295 /* i915_gem_tiling.c */
1296 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1297 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1298 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1299 void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
1302 /* i915_gem_evict.c */
1303 int i915_gem_evict_something(struct drm_device *dev, int min_size,
1304 unsigned alignment, bool mappable);
1305 int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1307 /* i915_gem_stolen.c */
1308 int i915_gem_init_stolen(struct drm_device *dev);
1309 void i915_gem_cleanup_stolen(struct drm_device *dev);
1311 /* i915_suspend.c */
1312 extern int i915_save_state(struct drm_device *dev);
1313 extern int i915_restore_state(struct drm_device *dev);
1316 extern int intel_setup_gmbus(struct drm_device *dev);
1317 extern void intel_teardown_gmbus(struct drm_device *dev);
1318 extern void intel_gmbus_set_speed(device_t idev, int speed);
1319 extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
1320 extern void intel_iic_reset(struct drm_device *dev);
1321 static inline bool intel_gmbus_is_port_valid(unsigned port)
1323 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1325 extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
1328 /* intel_opregion.c */
1329 int intel_opregion_setup(struct drm_device *dev);
1330 extern void intel_opregion_init(struct drm_device *dev);
1331 extern void intel_opregion_fini(struct drm_device *dev);
1332 extern void intel_opregion_asle_intr(struct drm_device *dev);
1333 extern void intel_opregion_gse_intr(struct drm_device *dev);
1334 extern void intel_opregion_enable_asle(struct drm_device *dev);
1336 /* i915_gem_gtt.c */
1337 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1338 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1339 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1340 struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
1341 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1342 struct drm_i915_gem_object *obj);
1344 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1345 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1346 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1347 enum i915_cache_level cache_level);
1348 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1349 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1350 int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
1351 unsigned long mappable_end, unsigned long end);
1354 extern void intel_modeset_init_hw(struct drm_device *dev);
1355 extern void intel_modeset_init(struct drm_device *dev);
1356 extern void intel_modeset_gem_init(struct drm_device *dev);
1357 extern void intel_modeset_cleanup(struct drm_device *dev);
1358 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1359 extern void intel_disable_fbc(struct drm_device *dev);
1360 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1361 extern void ironlake_init_pch_refclk(struct drm_device *dev);
1362 extern void ironlake_enable_rc6(struct drm_device *dev);
1363 extern void gen6_set_rps(struct drm_device *dev, u8 val);
1364 extern void intel_detect_pch(struct drm_device *dev);
1365 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1367 extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1368 extern void intel_gpu_ips_teardown(void);
1370 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1371 extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1372 extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1373 extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1374 extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1376 extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1377 extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1379 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
1380 struct drm_device *dev);
1381 extern void intel_overlay_print_error_state(struct sbuf *m,
1382 struct intel_overlay_error_state *error);
1383 extern struct intel_display_error_state *intel_display_capture_error_state(
1384 struct drm_device *dev);
1385 extern void intel_display_print_error_state(struct sbuf *m,
1386 struct drm_device *dev, struct intel_display_error_state *error);
1389 trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
1392 CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val);
1395 /* On SNB platform, before reading ring registers forcewake bit
1396 * must be set to prevent GT core from power down and stale values being
1399 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1400 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1401 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1403 #define __i915_read(x, y) \
1404 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1412 #define __i915_write(x, y) \
1413 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1416 __i915_write(16, 16)
1417 __i915_write(32, 32)
1418 __i915_write(64, 64)
1421 #define I915_READ8(reg) i915_read8(dev_priv, (reg))
1422 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1424 #define I915_READ16(reg) i915_read16(dev_priv, (reg))
1425 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1426 #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg))
1427 #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
1429 #define I915_READ(reg) i915_read32(dev_priv, (reg))
1430 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1431 #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg))
1432 #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
1434 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1435 #define I915_READ64(reg) i915_read64(dev_priv, (reg))
1437 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1438 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1440 #define I915_VERBOSE 0
1443 * Reads a dword out of the status page, which is written to from the command
1444 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
1445 * MI_STORE_DATA_IMM.
1447 * The following dwords have a reserved meaning:
1448 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
1449 * 0x04: ring 0 head pointer
1450 * 0x05: ring 1 head pointer (915-class)
1451 * 0x06: ring 2 head pointer (915-class)
1452 * 0x10-0x1b: Context status DWords (GM45)
1453 * 0x1f: Last written status offset. (GM45)
1455 * The area from dword 0x20 to 0x3ff is available for driver usage.
1457 #define I915_GEM_HWS_INDEX 0x20
1459 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1461 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1462 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1463 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1464 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1465 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1466 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1467 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1468 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1469 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1470 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1471 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1472 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1473 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1474 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1475 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1476 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1477 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1478 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1479 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1480 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1481 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1482 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1485 #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
1486 (dev)->pci_device == 0x2982 || \
1487 (dev)->pci_device == 0x2992 || \
1488 (dev)->pci_device == 0x29A2 || \
1489 (dev)->pci_device == 0x2A02 || \
1490 (dev)->pci_device == 0x2A12 || \
1491 (dev)->pci_device == 0x2A42 || \
1492 (dev)->pci_device == 0x2E02 || \
1493 (dev)->pci_device == 0x2E12 || \
1494 (dev)->pci_device == 0x2E22 || \
1495 (dev)->pci_device == 0x2E32)
1497 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
1499 #define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
1500 #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
1501 #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
1503 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1504 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1505 /* XXXKIB LEGACY END */
1507 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1508 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1509 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1510 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1511 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1512 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1514 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1515 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1516 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1517 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1519 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1520 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
1522 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1523 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1525 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1526 * rows, which changed the alignment requirements and fence programming.
1528 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1530 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1531 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1532 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1533 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1534 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1535 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1536 /* dsparb controlled by hw only */
1537 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1539 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1540 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1541 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1543 #define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1544 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1546 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1547 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1548 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1549 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1551 #define PRIMARY_RINGBUFFER_SIZE (128*1024)
1554 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1557 return ((int32_t)(seq1 - seq2) >= 0);
1560 static inline void i915_gem_chipset_flush(struct drm_device *dev)
1562 if (INTEL_INFO(dev)->gen < 6)
1563 intel_gtt_chipset_flush();
1566 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1568 /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
1569 obj->pages_pin_count++;
1571 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1573 KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
1574 obj->pages_pin_count--;
1577 u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);