From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: intel-gfx@lists.freedesktop.org,
Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Subject: [PATCH 2/5] drm/xe: Use simple xchg to cache DPT
Date: Thu, 18 Apr 2024 18:55:16 +0200 [thread overview]
Message-ID: <20240418165520.88961-3-maarten.lankhorst@linux.intel.com> (raw)
In-Reply-To: <20240418165520.88961-1-maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
drivers/gpu/drm/xe/display/xe_fb_pin.c | 33 +++++++++++++++-----------
1 file changed, 19 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d967d00bbf9d..16a287cbebc5 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -111,9 +111,11 @@ static struct xe_bo *xe_fb_dpt_alloc(struct intel_framebuffer *fb)
return dpt;
}
-static void xe_fb_dpt_free(struct i915_vma *vma)
+static void xe_fb_dpt_free(struct i915_vma *vma, struct intel_framebuffer *fb)
{
- xe_bo_put(vma->dpt);
+ if (!fb || cmpxchg((struct xe_bo **)&fb->dpt_vm, NULL, vma->dpt))
+ xe_bo_put(vma->dpt);
+
vma->dpt = NULL;
}
@@ -151,10 +153,11 @@ static int xe_fb_dpt_map_ggtt(struct xe_bo *dpt)
static int
xe_fb_dpt_alloc_pinned(struct i915_vma *vma, struct intel_framebuffer *fb)
{
- struct xe_bo *dpt;
+ struct xe_bo *dpt = (struct xe_bo *)xchg(&fb->dpt_vm, NULL);
int err;
- dpt = xe_fb_dpt_alloc(fb);
+ if (!dpt)
+ dpt = xe_fb_dpt_alloc(fb);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -170,17 +173,17 @@ xe_fb_dpt_alloc_pinned(struct i915_vma *vma, struct intel_framebuffer *fb)
ttm_bo_unreserve(&dpt->ttm);
}
if (err)
- xe_fb_dpt_free(vma);
+ xe_fb_dpt_free(vma, fb);
return err;
}
-static void xe_fb_dpt_unpin_free(struct i915_vma *vma)
+static void xe_fb_dpt_unpin_free(struct i915_vma *vma, struct intel_framebuffer *fb)
{
ttm_bo_reserve(&vma->dpt->ttm, false, false, NULL);
ttm_bo_unpin(&vma->dpt->ttm);
ttm_bo_unreserve(&vma->dpt->ttm);
- xe_fb_dpt_free(vma);
+ xe_fb_dpt_free(vma, fb);
}
static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
@@ -236,7 +239,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
ret = xe_fb_dpt_map_ggtt(dpt);
if (ret)
- xe_fb_dpt_unpin_free(vma);
+ xe_fb_dpt_unpin_free(vma, fb);
return ret;
}
@@ -398,14 +401,14 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
return ERR_PTR(ret);
}
-static void __xe_unpin_fb_vma(struct i915_vma *vma)
+static void __xe_unpin_fb_vma(struct i915_vma *vma, struct intel_framebuffer *fb)
{
struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
if (vma->dpt) {
xe_ggtt_remove_bo(ggtt, vma->dpt);
- xe_fb_dpt_unpin_free(vma);
+ xe_fb_dpt_unpin_free(vma, fb);
} else {
if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
vma->bo->ggtt_node.start != vma->node.start)
@@ -432,7 +435,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- __xe_unpin_fb_vma(vma);
+ __xe_unpin_fb_vma(vma, NULL);
}
int intel_plane_pin_fb(struct intel_plane_state *plane_state)
@@ -454,7 +457,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
- __xe_unpin_fb_vma(old_plane_state->ggtt_vma);
+ __xe_unpin_fb_vma(old_plane_state->ggtt_vma, to_intel_framebuffer(old_plane_state->hw.fb));
old_plane_state->ggtt_vma = NULL;
}
@@ -464,10 +467,12 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
*/
struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
{
- return NULL;
+ return (struct i915_address_space *)xe_fb_dpt_alloc(fb);
}
void intel_dpt_destroy(struct i915_address_space *vm)
{
- return;
+ struct xe_bo *bo = (struct xe_bo *)vm;
+
+ xe_bo_put(bo);
}
--
2.43.0
next prev parent reply other threads:[~2024-04-18 16:55 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-18 16:55 [PATCH 0/5] drm/xe: More fb pinning optimizations Maarten Lankhorst
2024-04-18 16:55 ` [PATCH 1/5] drm/xe/display: Preparations for preallocating dpt bo Maarten Lankhorst
2024-04-18 16:55 ` Maarten Lankhorst [this message]
2024-04-18 16:55 ` [PATCH 3/5] drm/xe: Remove safety check from __xe_ttm_stolen_io_mem_reserve_stolen Maarten Lankhorst
2024-04-18 16:55 ` [PATCH 4/5] drm/xe/display: Prevent overwriting original GGTT when taking over initial FB Maarten Lankhorst
2024-04-18 16:55 ` [PATCH 5/5] drm/xe/display: Re-use display vmas when possible Maarten Lankhorst
2024-04-18 18:00 ` ✗ Fi.CI.CHECKPATCH: warning for drm/xe: More fb pinning optimizations Patchwork
2024-04-18 18:13 ` ✗ Fi.CI.BAT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240418165520.88961-3-maarten.lankhorst@linux.intel.com \
--to=maarten.lankhorst@linux.intel.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).