diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 400 |
1 files changed, 247 insertions, 153 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dccdc8aad2e2..74df3d1581dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -27,6 +27,7 @@ #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_vgpu.h" #include "i915_trace.h" #include "intel_drv.h" @@ -103,6 +104,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; + if (intel_vgpu_active(dev)) + has_full_ppgtt = false; /* emulation is too hard */ + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. @@ -138,7 +142,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) return has_aliasing_ppgtt ? 1 : 0; } - static void ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -275,6 +278,100 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev) +{ + if (WARN_ON(!pt->page)) + return; + __free_page(pt->page); + kfree(pt); +} + +static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) +{ + struct i915_page_table_entry *pt; + + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + if (!pt) + return ERR_PTR(-ENOMEM); + + pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pt->page) { + kfree(pt); + return ERR_PTR(-ENOMEM); + } + + return pt; +} + +/** + * alloc_pt_range() - Allocate a multiple page tables + * @pd: The page directory which will have at least @count entries + * available to point to the allocated page tables. + * @pde: First page directory entry for which we are allocating. + * @count: Number of pages to allocate. + * @dev: DRM device. + * + * Allocates multiple page table pages and sets the appropriate entries in the + * page table structure within the page directory. Function cleans up after + * itself on any failures. + * + * Return: 0 if allocation succeeded. + */ +static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, + struct drm_device *dev) +{ + int i, ret; + + /* 512 is the max page tables per page_directory on any platform. */ + if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES)) + return -EINVAL; + + for (i = pde; i < pde + count; i++) { + struct i915_page_table_entry *pt = alloc_pt_single(dev); + + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto err_out; + } + WARN(pd->page_table[i], + "Leaking page directory entry %d (%p)\n", + i, pd->page_table[i]); + pd->page_table[i] = pt; + } + + return 0; + +err_out: + while (i-- > pde) + unmap_and_free_pt(pd->page_table[i], dev); + return ret; +} + +static void unmap_and_free_pd(struct i915_page_directory_entry *pd) +{ + if (pd->page) { + __free_page(pd->page); + kfree(pd); + } +} + +static struct i915_page_directory_entry *alloc_pd_single(void) +{ + struct i915_page_directory_entry *pd; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pd->page) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + return pd; +} + /* Broadwell Page Directory Pointer Descriptors */ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, uint64_t val) @@ -307,7 +404,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; for (i = used_pd - 1; i >= 0; i--) { - dma_addr_t addr = ppgtt->pd_dma_addr[i]; + dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; ret = gen8_write_pdp(ring, i, addr); if (ret) return ret; @@ -334,7 +431,24 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, I915_CACHE_LLC, use_scratch); while (num_entries) { - struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; + struct i915_page_directory_entry *pd; + struct i915_page_table_entry *pt; + struct page *page_table; + + if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) + continue; + + pd = ppgtt->pdp.page_directory[pdpe]; + + if (WARN_ON(!pd->page_table[pde])) + continue; + + pt = pd->page_table[pde]; + + if (WARN_ON(!pt->page)) + continue; + + page_table = pt->page; last_pte = pte + num_entries; if (last_pte > GEN8_PTES_PER_PAGE) @@ -375,11 +489,16 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr = NULL; for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { - if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) + if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) break; - if (pt_vaddr == NULL) - pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); + if (pt_vaddr == NULL) { + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe]; + struct i915_page_table_entry *pt = pd->page_table[pde]; + struct page *page_table = pt->page; + + pt_vaddr = kmap_atomic(page_table); + } pt_vaddr[pte] = gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), @@ -403,29 +522,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, } } -static void gen8_free_page_tables(struct page **pt_pages) +static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev) { int i; - if (pt_pages == NULL) + if (!pd->page) return; - for (i = 0; i < GEN8_PDES_PER_PAGE; i++) - if (pt_pages[i]) - __free_pages(pt_pages[i], 0); + for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { + if (WARN_ON(!pd->page_table[i])) + continue; + + unmap_and_free_pt(pd->page_table[i], dev); + pd->page_table[i] = NULL; + } } -static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) +static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; for (i = 0; i < ppgtt->num_pd_pages; i++) { - gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); - kfree(ppgtt->gen8_pt_pages[i]); - kfree(ppgtt->gen8_pt_dma_addr[i]); - } + if (WARN_ON(!ppgtt->pdp.page_directory[i])) + continue; - __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); + gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); + unmap_and_free_pd(ppgtt->pdp.page_directory[i]); + } } static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) @@ -436,14 +559,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { /* TODO: In the future we'll support sparse mappings, so this * will have to change. */ - if (!ppgtt->pd_dma_addr[i]) + if (!ppgtt->pdp.page_directory[i]->daddr) continue; - pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, + pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; + struct i915_page_table_entry *pt; + dma_addr_t addr; + + if (WARN_ON(!pd->page_table[j])) + continue; + + pt = pd->page_table[j]; + addr = pt->daddr; + if (addr) pci_unmap_page(hwdev, addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); @@ -460,86 +592,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) gen8_ppgtt_free(ppgtt); } -static struct page **__gen8_alloc_page_tables(void) +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) { - struct page **pt_pages; - int i; - - pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); - if (!pt_pages) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { - pt_pages[i] = alloc_page(GFP_KERNEL); - if (!pt_pages[i]) - goto bail; - } - - return pt_pages; - -bail: - gen8_free_page_tables(pt_pages); - kfree(pt_pages); - return ERR_PTR(-ENOMEM); -} - -static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) -{ - struct page **pt_pages[GEN8_LEGACY_PDPS]; int i, ret; - for (i = 0; i < max_pdp; i++) { - pt_pages[i] = __gen8_alloc_page_tables(); - if (IS_ERR(pt_pages[i])) { - ret = PTR_ERR(pt_pages[i]); + for (i = 0; i < ppgtt->num_pd_pages; i++) { + ret = alloc_pt_range(ppgtt->pdp.page_directory[i], + 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev); + if (ret) goto unwind_out; - } } - /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, - * "atomic" - for cleanup purposes. - */ - for (i = 0; i < max_pdp; i++) - ppgtt->gen8_pt_pages[i] = pt_pages[i]; - return 0; unwind_out: - while (i--) { - gen8_free_page_tables(pt_pages[i]); - kfree(pt_pages[i]); - } + while (i--) + gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); - return ret; + return -ENOMEM; } -static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) { int i; - for (i = 0; i < ppgtt->num_pd_pages; i++) { - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, - sizeof(dma_addr_t), - GFP_KERNEL); - if (!ppgtt->gen8_pt_dma_addr[i]) - return -ENOMEM; + for (i = 0; i < max_pdp; i++) { + ppgtt->pdp.page_directory[i] = alloc_pd_single(); + if (IS_ERR(ppgtt->pdp.page_directory[i])) + goto unwind_out; } - return 0; -} + ppgtt->num_pd_pages = max_pdp; + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); -static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) -{ - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); - if (!ppgtt->pd_pages) - return -ENOMEM; + return 0; - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); +unwind_out: + while (i--) + unmap_and_free_pd(ppgtt->pdp.page_directory[i]); - return 0; + return -ENOMEM; } static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, @@ -551,18 +644,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); - if (ret) { - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); - return ret; - } + ret = gen8_ppgtt_allocate_page_tables(ppgtt); + if (ret) + goto err_out; ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; - ret = gen8_ppgtt_allocate_dma(ppgtt); - if (ret) - gen8_ppgtt_free(ppgtt); + return 0; +err_out: + gen8_ppgtt_free(ppgtt); return ret; } @@ -573,14 +664,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, int ret; pd_addr = pci_map_page(ppgtt->base.dev->pdev, - &ppgtt->pd_pages[pd], 0, + ppgtt->pdp.page_directory[pd]->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); if (ret) return ret; - ppgtt->pd_dma_addr[pd] = pd_addr; + ppgtt->pdp.page_directory[pd]->daddr = pd_addr; return 0; } @@ -590,17 +681,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, const int pt) { dma_addr_t pt_addr; - struct page *p; + struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd]; + struct i915_page_table_entry *ptab = pdir->page_table[pt]; + struct page *p = ptab->page; int ret; - p = ppgtt->gen8_pt_pages[pd][pt]; pt_addr = pci_map_page(ppgtt->base.dev->pdev, p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); if (ret) return ret; - ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; + ptab->daddr = pt_addr; return 0; } @@ -653,10 +745,12 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) * will never need to touch the PDEs again. */ for (i = 0; i < max_pdp; i++) { + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; gen8_ppgtt_pde_t *pd_vaddr; - pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); + pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + struct i915_page_table_entry *pt = pd->page_table[j]; + dma_addr_t addr = pt->daddr; pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, I915_CACHE_LLC); } @@ -699,14 +793,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); + ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, - ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); + ppgtt->pd.pd_offset, + ppgtt->pd.pd_offset + ppgtt->num_pd_entries); for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { u32 expected; gen6_gtt_pte_t *pt_vaddr; - dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; + dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; pd_entry = readl(pd_addr + pde); expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); @@ -717,7 +812,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) expected); seq_printf(m, "\tPDE: %x\n", pd_entry); - pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { unsigned long va = (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + @@ -750,13 +845,13 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) uint32_t pd_entry; int i; - WARN_ON(ppgtt->pd_offset & 0x3f); + WARN_ON(ppgtt->pd.pd_offset & 0x3f); pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); + ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); for (i = 0; i < ppgtt->num_pd_entries; i++) { dma_addr_t pt_addr; - pt_addr = ppgtt->pt_dma_addr[i]; + pt_addr = ppgtt->pd.page_table[i]->daddr; pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); pd_entry |= GEN6_PDE_VALID; @@ -767,9 +862,9 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) { - BUG_ON(ppgtt->pd_offset & 0x3f); + BUG_ON(ppgtt->pd.pd_offset & 0x3f); - return (ppgtt->pd_offset / 64) << 16; + return (ppgtt->pd.pd_offset / 64) << 16; } static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, @@ -797,6 +892,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } +static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + return 0; +} + static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *ring) { @@ -922,7 +1027,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, if (last_pte > I915_PPGTT_PT_ENTRIES) last_pte = I915_PPGTT_PT_ENTRIES; - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); for (i = first_pte; i < last_pte; i++) pt_vaddr[i] = scratch_pte; @@ -951,7 +1056,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr = NULL; for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { if (pt_vaddr == NULL) - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); pt_vaddr[act_pte] = vm->pte_encode(sg_page_iter_dma_address(&sg_iter), @@ -972,22 +1077,20 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) { int i; - if (ppgtt->pt_dma_addr) { - for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->base.dev->pdev, - ppgtt->pt_dma_addr[i], - 4096, PCI_DMA_BIDIRECTIONAL); - } + for (i = 0; i < ppgtt->num_pd_entries; i++) + pci_unmap_page(ppgtt->base.dev->pdev, + ppgtt->pd.page_table[i]->daddr, + 4096, PCI_DMA_BIDIRECTIONAL); } static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; - kfree(ppgtt->pt_dma_addr); for (i = 0; i < ppgtt->num_pd_entries; i++) - __free_page(ppgtt->pt_pages[i]); - kfree(ppgtt->pt_pages); + unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); + + unmap_and_free_pd(&ppgtt->pd); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) @@ -1032,31 +1135,13 @@ alloc: goto alloc; } + if (ret) + return ret; + if (ppgtt->node.start < dev_priv->gtt.mappable_end) DRM_DEBUG("Forced to use aperture for PDEs\n"); ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; - return ret; -} - -static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) -{ - int i; - - ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), - GFP_KERNEL); - - if (!ppgtt->pt_pages) - return -ENOMEM; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); - if (!ppgtt->pt_pages[i]) { - gen6_ppgtt_free(ppgtt); - return -ENOMEM; - } - } - return 0; } @@ -1068,20 +1153,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) if (ret) return ret; - ret = gen6_ppgtt_allocate_page_tables(ppgtt); + ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, + ppgtt->base.dev); + if (ret) { drm_mm_remove_node(&ppgtt->node); return ret; } - ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), - GFP_KERNEL); - if (!ppgtt->pt_dma_addr) { - drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_free(ppgtt); - return -ENOMEM; - } - return 0; } @@ -1091,9 +1170,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) int i; for (i = 0; i < ppgtt->num_pd_entries; i++) { + struct page *page; dma_addr_t pt_addr; - pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, + page = ppgtt->pd.page_table[i]->page; + pt_addr = pci_map_page(dev->pdev, page, 0, 4096, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(dev->pdev, pt_addr)) { @@ -1101,7 +1182,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) return -EIO; } - ppgtt->pt_dma_addr[i] = pt_addr; + ppgtt->pd.page_table[i]->daddr = pt_addr; } return 0; @@ -1123,6 +1204,9 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else BUG(); + if (intel_vgpu_active(dev)) + ppgtt->switch_mm = vgpu_mm_switch; + ret = gen6_ppgtt_alloc(ppgtt); if (ret) return ret; @@ -1137,10 +1221,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; + ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; ppgtt->debug_dump = gen6_dump_ppgtt; - ppgtt->pd_offset = + ppgtt->pd.pd_offset = ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); @@ -1151,7 +1235,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) gen6_write_pdes(ppgtt); DRM_DEBUG("Adding PPGTT at offset %x\n", - ppgtt->pd_offset << 10); + ppgtt->pd.pd_offset << 10); return 0; } @@ -1753,6 +1837,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, /* Subtract the guard page ... */ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); + + dev_priv->gtt.base.start = start; + dev_priv->gtt.base.total = end - start; + + if (intel_vgpu_active(dev)) { + ret = intel_vgt_balloon(dev); + if (ret) + return ret; + } + if (!HAS_LLC(dev)) dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; @@ -1772,9 +1866,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, vma->bound |= GLOBAL_BIND; } - dev_priv->gtt.base.start = start; - dev_priv->gtt.base.total = end - start; - /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", @@ -1826,6 +1917,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev) } if (drm_mm_initialized(&vm->mm)) { + if (intel_vgpu_active(dev)) + intel_vgt_deballoon(); + drm_mm_takedown(&vm->mm); list_del(&vm->global_link); } |