From 0e028d72a38863cc0b17ee20452420316436ab26 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Aug 2019 15:14:21 +0100 Subject: [PATCH] drm/i915/gtt: Preallocate Braswell top-level page directory In order for the Braswell top-level PD to remain the same from the time of request construction to its submission onto HW, as we may be asynchronously rewriting the page tables (thus changing the expected register state after having already stored the old addresses in the request), the top level PD must be preallocated. So wave goodbye to our lazy allocation of those 4x2 pages. v2: A little bit of write-flushing required (presumably it always has been required, but now we are more susceptible and it is showing up!) v3: Put back the forced-PD-reload on every batch, we can't survive without it and explicitly marking the context for PD reload makes Braswell turn nasty. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190823141421.2398-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +++++++- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 1cdfe05514c3f..1f735ca9b1736 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1003,12 +1003,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) intel_ring_advance(rq, cs); } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + int err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; for (i = GEN8_3LVL_PDPES; i--; ) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 135f5494463a4..0db82921fb38b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -168,6 +168,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + wmb(); return 0; } @@ -1428,6 +1429,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) set_pd_entry(pd, idx, pde); atomic_inc(px_used(pde)); /* keep pinned */ } + wmb(); return 0; } @@ -1515,11 +1517,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } if (!i915_vm_is_4lvl(&ppgtt->vm)) { - if (intel_vgpu_active(i915)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_free_pd; } ppgtt->vm.insert_entries = gen8_ppgtt_insert;