2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
28 #include <dev/drm2/drmP.h>
29 #include <dev/drm2/i915/i915_drm.h>
30 #include <dev/drm2/i915/i915_drv.h>
31 #include <dev/drm2/i915/intel_drv.h>
32 #include <sys/sched.h>
33 #include <sys/sf_buf.h>
34 #include <vm/vm_pageout.h>
36 typedef uint32_t gtt_pte_t;
39 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
41 #define GEN6_PDE_VALID (1 << 0)
42 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
43 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
45 #define GEN6_PTE_VALID (1 << 0)
46 #define GEN6_PTE_UNCACHED (1 << 1)
47 #define HSW_PTE_UNCACHED (0)
48 #define GEN6_PTE_CACHE_LLC (2 << 1)
49 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
50 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
52 static inline gtt_pte_t pte_encode(struct drm_device *dev,
54 enum i915_cache_level level)
56 gtt_pte_t pte = GEN6_PTE_VALID;
57 pte |= GEN6_PTE_ADDR_ENCODE(addr);
60 case I915_CACHE_LLC_MLC:
61 /* Haswell doesn't set L3 this way */
63 pte |= GEN6_PTE_CACHE_LLC;
65 pte |= GEN6_PTE_CACHE_LLC_MLC;
68 pte |= GEN6_PTE_CACHE_LLC;
72 pte |= HSW_PTE_UNCACHED;
74 pte |= GEN6_PTE_UNCACHED;
84 /* PPGTT support for Sandybdrige/Gen6 and later */
85 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
90 gtt_pte_t scratch_pte;
91 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
92 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
96 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
100 last_pte = first_pte + num_entries;
101 if (last_pte > I915_PPGTT_PT_ENTRIES)
102 last_pte = I915_PPGTT_PT_ENTRIES;
105 sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
106 pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
108 for (i = first_pte; i < last_pte; i++)
109 pt_vaddr[i] = scratch_pte;
114 num_entries -= last_pte - first_pte;
120 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct i915_hw_ppgtt *ppgtt;
124 unsigned first_pd_entry_in_global_pt;
128 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
129 * entries. For aliasing ppgtt support we just steal them at the end for
131 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
133 ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
138 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
139 ppgtt->pt_pages = malloc(sizeof(struct page *)*ppgtt->num_pd_entries,
140 DRM_I915_GEM, M_WAITOK | M_ZERO);
141 if (!ppgtt->pt_pages)
144 for (i = 0; i < ppgtt->num_pd_entries; i++) {
145 ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
146 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
148 if (!ppgtt->pt_pages[i])
152 if (dev_priv->mm.gtt->needs_dmar) {
153 ppgtt->pt_dma_addr = malloc(sizeof(dma_addr_t)
154 *ppgtt->num_pd_entries,
155 DRM_I915_GEM, M_WAITOK | M_ZERO);
156 if (!ppgtt->pt_dma_addr)
159 #ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
160 for (i = 0; i < ppgtt->num_pd_entries; i++) {
163 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
165 PCI_DMA_BIDIRECTIONAL);
167 if (pci_dma_mapping_error(dev->pdev,
173 ppgtt->pt_dma_addr[i] = pt_addr;
178 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
180 i915_ppgtt_clear_range(ppgtt, 0,
181 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
183 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
185 dev_priv->mm.aliasing_ppgtt = ppgtt;
189 #ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
191 if (ppgtt->pt_dma_addr) {
192 for (i--; i >= 0; i--)
193 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
194 4096, PCI_DMA_BIDIRECTIONAL);
198 free(ppgtt->pt_dma_addr, DRM_I915_GEM);
199 for (i = 0; i < ppgtt->num_pd_entries; i++) {
200 if (ppgtt->pt_pages[i]) {
201 vm_page_unwire(ppgtt->pt_pages[i], PQ_INACTIVE);
202 vm_page_free(ppgtt->pt_pages[i]);
205 free(ppgtt->pt_pages, DRM_I915_GEM);
207 free(ppgtt, DRM_I915_GEM);
212 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
214 struct drm_i915_private *dev_priv = dev->dev_private;
215 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
221 #ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
222 if (ppgtt->pt_dma_addr) {
223 for (i = 0; i < ppgtt->num_pd_entries; i++)
224 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
225 4096, PCI_DMA_BIDIRECTIONAL);
229 free(ppgtt->pt_dma_addr, DRM_I915_GEM);
230 for (i = 0; i < ppgtt->num_pd_entries; i++) {
231 vm_page_unwire(ppgtt->pt_pages[i], PQ_INACTIVE);
232 vm_page_free(ppgtt->pt_pages[i]);
234 free(ppgtt->pt_pages, DRM_I915_GEM);
235 free(ppgtt, DRM_I915_GEM);
238 static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
240 unsigned first_entry,
241 unsigned num_entries,
242 enum i915_cache_level cache_level)
245 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
246 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
247 unsigned j, last_pte;
248 vm_paddr_t page_addr;
251 while (num_entries) {
252 last_pte = first_pte + num_entries;
253 if (last_pte > I915_PPGTT_PT_ENTRIES)
254 last_pte = I915_PPGTT_PT_ENTRIES;
257 sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
258 pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
260 for (j = first_pte; j < last_pte; j++) {
261 page_addr = VM_PAGE_TO_PHYS(*pages);
262 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
271 num_entries -= last_pte - first_pte;
277 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
278 struct drm_i915_gem_object *obj,
279 enum i915_cache_level cache_level)
281 i915_ppgtt_insert_pages(ppgtt,
283 obj->gtt_space->start >> PAGE_SHIFT,
284 obj->base.size >> PAGE_SHIFT,
288 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
289 struct drm_i915_gem_object *obj)
291 i915_ppgtt_clear_range(ppgtt,
292 obj->gtt_space->start >> PAGE_SHIFT,
293 obj->base.size >> PAGE_SHIFT);
296 void i915_gem_init_ppgtt(struct drm_device *dev)
298 drm_i915_private_t *dev_priv = dev->dev_private;
300 struct intel_ring_buffer *ring;
301 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
302 uint32_t __iomem *pd_addr;
306 if (!dev_priv->mm.aliasing_ppgtt)
310 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
311 for (i = 0; i < ppgtt->num_pd_entries; i++) {
314 if (dev_priv->mm.gtt->needs_dmar)
315 pt_addr = ppgtt->pt_dma_addr[i];
317 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
319 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
320 pd_entry |= GEN6_PDE_VALID;
322 /* NOTE Linux<->FreeBSD: Arguments of writel() are reversed. */
323 writel(pd_addr + i, pd_entry);
327 pd_offset = ppgtt->pd_offset;
328 pd_offset /= 64; /* in cachelines, */
331 if (INTEL_INFO(dev)->gen == 6) {
332 uint32_t ecochk, gab_ctl, ecobits;
334 ecobits = I915_READ(GAC_ECO_BITS);
335 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
337 gab_ctl = I915_READ(GAB_CTL);
338 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
340 ecochk = I915_READ(GAM_ECOCHK);
341 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
342 ECOCHK_PPGTT_CACHE64B);
343 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
344 } else if (INTEL_INFO(dev)->gen >= 7) {
345 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
346 /* GFX_MODE is per-ring on gen7+ */
349 for_each_ring(ring, dev_priv, i) {
350 if (INTEL_INFO(dev)->gen >= 7)
351 I915_WRITE(RING_MODE_GEN7(ring),
352 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
354 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
355 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
359 static bool do_idling(struct drm_i915_private *dev_priv)
361 bool ret = dev_priv->mm.interruptible;
363 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
364 dev_priv->mm.interruptible = false;
365 if (i915_gpu_idle(dev_priv->dev)) {
366 DRM_ERROR("Couldn't idle GPU\n");
367 /* Wait a bit, in hopes it avoids the hang */
375 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
377 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
378 dev_priv->mm.interruptible = interruptible;
382 static void i915_ggtt_clear_range(struct drm_device *dev,
383 unsigned first_entry,
384 unsigned num_entries)
386 struct drm_i915_private *dev_priv = dev->dev_private;
387 gtt_pte_t scratch_pte;
388 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
389 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
392 if (INTEL_INFO(dev)->gen < 6) {
393 intel_gtt_clear_range(first_entry, num_entries);
397 if (WARN(num_entries > max_entries,
398 "First entry = %d; Num entries = %d (max=%d)\n",
399 first_entry, num_entries, max_entries))
400 num_entries = max_entries;
402 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
403 for (i = 0; i < num_entries; i++)
404 iowrite32(scratch_pte, >t_base[i]);
408 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
410 struct drm_i915_private *dev_priv = dev->dev_private;
411 struct drm_i915_gem_object *obj;
413 /* First fill our portion of the GTT with scratch pages */
414 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
415 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
417 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
418 i915_gem_clflush_object(obj);
419 i915_gem_gtt_bind_object(obj, obj->cache_level);
422 i915_gem_chipset_flush(dev);
425 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
427 if (obj->has_dma_mapping)
431 if (!dma_map_sg(&obj->base.dev->pdev->dev,
432 obj->pages->sgl, obj->pages->nents,
433 PCI_DMA_BIDIRECTIONAL))
435 #endif /* FREEBSD_WIP */
441 * Binds an object into the global gtt with the specified cache level. The object
442 * will be accessible to the GPU via commands whose operands reference offsets
443 * within the global GTT as well as accessible by the GPU through the GMADR
444 * mapped BAR (dev_priv->mm.gtt->gtt).
446 static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
447 enum i915_cache_level level)
449 struct drm_device *dev = obj->base.dev;
450 struct drm_i915_private *dev_priv = dev->dev_private;
451 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
452 #if defined(INVARIANTS)
453 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
455 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
459 for (i = 0; i < obj->base.size >> PAGE_SHIFT; ++i) {
460 addr = VM_PAGE_TO_PHYS(obj->pages[i]);
461 iowrite32(pte_encode(dev, addr, level), >t_entries[i]);
464 BUG_ON(i > max_entries);
465 BUG_ON(i != obj->base.size / PAGE_SIZE);
467 /* XXX: This serves as a posting read to make sure that the PTE has
468 * actually been updated. There is some concern that even though
469 * registers and PTEs are within the same BAR that they are potentially
470 * of NUMA access patterns. Therefore, even with the way we assume
471 * hardware should work, we must keep this posting read for paranoia.
474 WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level));
476 /* This next bit makes the above posting read even more important. We
477 * want to flush the TLBs only after we're certain all the PTE updates
480 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
481 POSTING_READ(GFX_FLSH_CNTL_GEN6);
484 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
485 enum i915_cache_level cache_level)
487 struct drm_device *dev = obj->base.dev;
488 if (INTEL_INFO(dev)->gen < 6) {
489 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
490 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
491 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
492 obj->base.size >> PAGE_SHIFT,
496 gen6_ggtt_bind_object(obj, cache_level);
499 obj->has_global_gtt_mapping = 1;
502 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
504 i915_ggtt_clear_range(obj->base.dev,
505 obj->gtt_space->start >> PAGE_SHIFT,
506 obj->base.size >> PAGE_SHIFT);
508 obj->has_global_gtt_mapping = 0;
511 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
513 struct drm_device *dev = obj->base.dev;
514 struct drm_i915_private *dev_priv = dev->dev_private;
517 interruptible = do_idling(dev_priv);
520 if (!obj->has_dma_mapping)
521 dma_unmap_sg(&dev->pdev->dev,
522 obj->pages->sgl, obj->pages->nents,
523 PCI_DMA_BIDIRECTIONAL);
524 #endif /* FREEBSD_WIP */
526 undo_idling(dev_priv, interruptible);
529 static void i915_gtt_color_adjust(struct drm_mm_node *node,
531 unsigned long *start,
534 if (node->color != color)
537 if (!list_empty(&node->node_list)) {
538 node = list_entry(node->node_list.next,
541 if (node->allocated && node->color != color)
546 void i915_gem_init_global_gtt(struct drm_device *dev,
548 unsigned long mappable_end,
551 drm_i915_private_t *dev_priv = dev->dev_private;
553 /* Subtract the guard page ... */
554 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
556 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
558 dev_priv->mm.gtt_start = start;
559 dev_priv->mm.gtt_mappable_end = mappable_end;
560 dev_priv->mm.gtt_end = end;
561 dev_priv->mm.gtt_total = end - start;
562 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
564 /* ... but ensure that we clear the entire range. */
565 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
567 device_printf(dev->dev,
568 "taking over the fictitious range 0x%jx-0x%jx\n",
569 (uintmax_t)(dev_priv->mm.gtt_base_addr + start),
570 (uintmax_t)(dev_priv->mm.gtt_base_addr + start +
571 dev_priv->mm.mappable_gtt_total));
572 vm_phys_fictitious_reg_range(dev_priv->mm.gtt_base_addr + start,
573 dev_priv->mm.gtt_base_addr + start + dev_priv->mm.mappable_gtt_total,
574 VM_MEMATTR_WRITE_COMBINING);
577 static int setup_scratch_page(struct drm_device *dev)
579 struct drm_i915_private *dev_priv = dev->dev_private;
583 int req = VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
586 page = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
587 PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
590 if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
598 if ((page->flags & PG_ZERO) == 0)
599 pmap_zero_page(page);
601 #ifdef CONFIG_INTEL_IOMMU
602 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
603 PCI_DMA_BIDIRECTIONAL);
604 if (pci_dma_mapping_error(dev->pdev, dma_addr))
607 dma_addr = VM_PAGE_TO_PHYS(page);
609 dev_priv->mm.gtt->scratch_page = page;
610 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
615 static void teardown_scratch_page(struct drm_device *dev)
617 #ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
618 struct drm_i915_private *dev_priv = dev->dev_private;
619 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
620 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
624 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
626 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
627 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
628 return snb_gmch_ctl << 20;
631 static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
633 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
634 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
635 return snb_gmch_ctl << 25; /* 32 MB units */
638 static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
640 static const int stolen_decoder[] = {
641 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
642 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
643 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
644 return stolen_decoder[snb_gmch_ctl] << 20;
647 int i915_gem_gtt_init(struct drm_device *dev)
649 struct drm_i915_private *dev_priv = dev->dev_private;
650 vm_paddr_t gtt_bus_addr;
654 /* On modern platforms we need not worry ourself with the legacy
655 * hostbridge query stuff. Skip it entirely
657 if (INTEL_INFO(dev)->gen < 6) {
659 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
661 DRM_ERROR("failed to set up gmch\n");
664 #endif /* FREEBSD_WIP */
666 dev_priv->mm.gtt = intel_gtt_get();
667 if (!dev_priv->mm.gtt) {
668 DRM_ERROR("Failed to initialize GTT\n");
671 #endif /* FREEBSD_WIP */
677 dev_priv->mm.gtt = malloc(sizeof(*dev_priv->mm.gtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
678 if (!dev_priv->mm.gtt)
682 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
683 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
684 #endif /* FREEBSD_WIP */
686 #ifdef CONFIG_INTEL_IOMMU
687 dev_priv->mm.gtt->needs_dmar = 1;
690 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
691 gtt_bus_addr = drm_get_resource_start(dev, 0) + (2<<20);
692 dev_priv->mm.gtt->gma_bus_addr = drm_get_resource_start(dev, 2);
695 pci_read_config_word(dev->dev, SNB_GMCH_CTRL, &snb_gmch_ctl);
696 dev_priv->mm.gtt->gtt_total_entries =
697 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
698 if (INTEL_INFO(dev)->gen < 7)
699 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
701 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
703 dev_priv->mm.gtt->gtt_mappable_entries = drm_get_resource_len(dev, 2) >> PAGE_SHIFT;
704 /* 64/512MB is the current min/max we actually know of, but this is just a
705 * coarse sanity check.
707 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
708 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
709 DRM_ERROR("Unknown GMADR entries (%d)\n",
710 dev_priv->mm.gtt->gtt_mappable_entries);
715 ret = setup_scratch_page(dev);
717 DRM_ERROR("Scratch setup failed\n");
721 dev_priv->mm.gtt->gtt = pmap_mapdev_attr(gtt_bus_addr,
722 /* The size is used later by pmap_unmapdev. */
723 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t),
724 VM_MEMATTR_WRITE_COMBINING);
725 if (!dev_priv->mm.gtt->gtt) {
726 DRM_ERROR("Failed to map the gtt page table\n");
727 teardown_scratch_page(dev);
732 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
733 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
734 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
735 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
740 free(dev_priv->mm.gtt, DRM_I915_GEM);
742 if (INTEL_INFO(dev)->gen < 6)
744 #endif /* FREEBSD_WIP */
748 void i915_gem_gtt_fini(struct drm_device *dev)
750 struct drm_i915_private *dev_priv = dev->dev_private;
751 pmap_unmapdev((vm_offset_t)dev_priv->mm.gtt->gtt,
752 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
753 teardown_scratch_page(dev);
755 if (INTEL_INFO(dev)->gen < 6)
757 #endif /* FREEBSD_WIP */
758 if (INTEL_INFO(dev)->gen >= 6)
759 free(dev_priv->mm.gtt, DRM_I915_GEM);