From: Friedrich Vock <friedrich.vock@gmx.de>
To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Cc: "Pierre-Loup Griffais" <pgriffais@valvesoftware.com>,
"Tvrtko Ursulin" <tvrtko.ursulin@igalia.com>,
"Bas Nieuwenhuizen" <bas@basnieuwenhuizen.nl>,
"Joshua Ashton" <joshua@froggi.es>,
"Christian König" <christian.koenig@amd.com>,
"Alex Deucher" <alexander.deucher@amd.com>
Subject: [RFC PATCH 06/18] drm/ttm: Add public buffer eviction/uneviction functions
Date: Wed, 24 Apr 2024 18:56:56 +0200 [thread overview]
Message-ID: <20240424165937.54759-7-friedrich.vock@gmx.de> (raw)
In-Reply-To: <20240424165937.54759-1-friedrich.vock@gmx.de>
For now, they are only used internally inside TTM, but this will change
with the introduction of dynamic buffer priorities.
Signed-off-by: Friedrich Vock <friedrich.vock@gmx.de>
---
drivers/gpu/drm/ttm/ttm_bo.c | 168 ++++++++++++++++++++++++++++++++++-
include/drm/ttm/ttm_bo.h | 6 ++
2 files changed, 172 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b89fabc2f00a..3047c763eb4eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -166,6 +166,111 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return ret;
}
+/**
+ * Fetches the next BO from the manager's list of evicted BOs.
+ * bdev->unevict_lock should be held when calling this function.
+ */
+static struct ttm_buffer_object *ttm_next_evicted_bo(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_buffer_object *cursor)
+{
+ struct ttm_buffer_object *bo = NULL;
+
+ if (cursor)
+ cursor = list_next_entry(cursor, evicted);
+ else
+ cursor = list_first_entry(&man->evicted, struct ttm_buffer_object, evicted);
+
+ if (!list_entry_is_head(cursor, &man->evicted, evicted))
+ bo = ttm_bo_get_unless_zero(cursor);
+ return bo;
+}
+
+void ttm_mem_unevict_evicted(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ bool interruptible)
+{
+ struct ttm_buffer_object *evicted_bo = NULL, *next_evicted_bo = NULL;
+ struct ttm_operation_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.interruptible = interruptible;
+ ctx.no_evict = true;
+
+ spin_lock(&bdev->unevict_lock);
+ evicted_bo = ttm_next_evicted_bo(bdev, man, NULL);
+ spin_unlock(&bdev->unevict_lock);
+
+ while (evicted_bo) {
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(
+ evicted_bo->base.resv, NULL);
+ else
+ ret = dma_resv_lock(evicted_bo->base.resv,
+ NULL);
+ if (ret) {
+ ttm_bo_put(evicted_bo);
+ break;
+ }
+
+ /* If we raced with another thread (and lost), the
+ * other thread already removed the buffer from the
+ * list. In that case, we need to start over because
+ * our current cursor got removed.
+ */
+ if (evicted_bo->evicted_type == TTM_NUM_MEM_TYPES)
+ ret = 0;
+ else
+ ret = ttm_bo_try_unevict(evicted_bo, &ctx);
+
+ next_evicted_bo = ret ? evicted_bo : NULL;
+
+ spin_lock(&bdev->unevict_lock);
+ next_evicted_bo = ttm_next_evicted_bo(bdev, man,
+ next_evicted_bo);
+ spin_unlock(&bdev->unevict_lock);
+
+ dma_resv_unlock(evicted_bo->base.resv);
+ ttm_bo_put(evicted_bo);
+
+ evicted_bo = next_evicted_bo;
+ }
+}
+EXPORT_SYMBOL(ttm_mem_unevict_evicted);
+
+struct ttm_mem_unevict_work {
+ struct work_struct work;
+ struct ttm_device *bdev;
+ struct ttm_resource_manager *man;
+};
+
+static void ttm_mem_unevict_work(struct work_struct *work)
+{
+ struct ttm_mem_unevict_work *unevict_work;
+
+ unevict_work = container_of(work, typeof(*unevict_work), work);
+
+ ttm_mem_unevict_evicted(unevict_work->bdev, unevict_work->man,
+ false);
+}
+
+static void ttm_mem_queue_unevict(struct ttm_device *bdev,
+ struct ttm_resource_manager *man)
+{
+ struct ttm_mem_unevict_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, ttm_mem_unevict_work);
+ work->bdev = bdev;
+ work->man = man;
+ queue_work_node(bdev->pool.nid, bdev->wq, &work->work);
+}
+
/*
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
@@ -176,6 +281,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
+ struct ttm_resource_manager *man = NULL;
+ struct ttm_device *bdev = bo->bdev;
+
+ if (bo->resource)
+ man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
+
if (bo->bdev->funcs->delete_mem_notify)
bo->bdev->funcs->delete_mem_notify(bo);
if (bo->evicted_type != TTM_NUM_MEM_TYPES) {
@@ -187,6 +298,9 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->resource);
+
+ if (man)
+ ttm_mem_queue_unevict(bdev, man);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -432,8 +546,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return 0;
}
-static int ttm_bo_evict(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx)
+int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
int evicted_type = bo->resource->mem_type;
struct ttm_device *bdev = bo->bdev;
@@ -499,6 +612,57 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
return ret;
}
+int ttm_bo_try_unevict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource *unevict_mem;
+ struct ttm_placement placement;
+ struct ttm_place hop;
+ int ret = 0;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ man = ttm_manager_type(bdev, bo->evicted_type);
+
+ if (bo->deleted)
+ goto out;
+
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->funcs->unevict_flags(bo, &placement);
+
+ if (!placement.num_placement && !placement.num_busy_placement)
+ return -ENOSPC;
+
+ ret = ttm_bo_mem_space(bo, &placement, &unevict_mem, ctx);
+ if (ret)
+ return ret;
+
+ do {
+ ret = ttm_bo_handle_move_mem(bo, unevict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
+ ret = ttm_bo_bounce_temp_buffer(bo, &unevict_mem, ctx, &hop);
+ } while (!ret);
+
+ if (ret)
+ ttm_resource_free(bo, &unevict_mem);
+
+out:
+ if (!ret) {
+ spin_lock(&bdev->unevict_lock);
+ list_del_init(&bo->evicted);
+ man->evicted_bytes -= bo->base.size;
+ spin_unlock(&bdev->unevict_lock);
+ bo->evicted_type = TTM_NUM_MEM_TYPES;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_try_unevict);
+
/**
* ttm_bo_eviction_valuable
*
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index a8f21092403d6..8f4e6366c0417 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -370,6 +370,9 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
+int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+int ttm_bo_try_unevict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t alignment, struct ttm_operation_ctx *ctx,
@@ -395,6 +398,9 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
+void ttm_mem_unevict_evicted(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ bool interruptible);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
--
2.44.0
next prev parent reply other threads:[~2024-04-24 17:01 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-24 16:56 [RFC PATCH 00/18] TTM interface for managing VRAM oversubscription Friedrich Vock
2024-04-24 16:56 ` [RFC PATCH 01/18] drm/ttm: Add tracking for evicted memory Friedrich Vock
2024-04-24 16:56 ` [RFC PATCH 02/18] drm/ttm: Add per-BO eviction tracking Friedrich Vock
2024-04-25 6:18 ` Christian König
2024-04-25 19:02 ` Matthew Brost
2024-04-26 6:27 ` Christian König
2024-04-24 16:56 ` [RFC PATCH 03/18] drm/ttm: Implement BO " Friedrich Vock
2024-04-24 16:56 ` [RFC PATCH 04/18] drm/ttm: Add driver funcs for uneviction control Friedrich Vock
2024-04-24 16:56 ` [RFC PATCH 05/18] drm/ttm: Add option to evict no BOs in operation Friedrich Vock
2024-04-25 6:20 ` Christian König
2024-04-24 16:56 ` Friedrich Vock [this message]
2024-04-24 16:56 ` [RFC PATCH 07/18] drm/amdgpu: Add TTM uneviction control functions Friedrich Vock
2024-04-24 16:56 ` [RFC PATCH 08/18] drm/amdgpu: Don't try moving BOs to preferred domain before submit Friedrich Vock
2024-04-25 6:36 ` Christian König
2024-04-24 16:56 ` [RFC PATCH 09/18] drm/amdgpu: Don't mark VRAM as a busy placement for VRAM|GTT resources Friedrich Vock
2024-04-25 6:24 ` Christian König
2024-04-24 16:57 ` [RFC PATCH 10/18] drm/amdgpu: Don't add GTT to initial domains after failing to allocate VRAM Friedrich Vock
2024-04-25 6:25 ` Christian König
2024-04-25 7:39 ` Friedrich Vock
2024-04-25 7:54 ` Christian König
2024-04-24 16:57 ` [RFC PATCH 11/18] drm/ttm: Bump BO priority count Friedrich Vock
2024-04-24 16:57 ` [RFC PATCH 12/18] drm/ttm: Do not evict BOs with higher priority Friedrich Vock
2024-04-25 6:26 ` Christian König
2024-04-24 16:57 ` [RFC PATCH 13/18] drm/ttm: Implement ttm_bo_update_priority Friedrich Vock
2024-04-25 6:29 ` Christian König
2024-04-24 16:57 ` [RFC PATCH 14/18] drm/ttm: Consider BOs placed in non-favorite locations evicted Friedrich Vock
2024-04-24 16:57 ` [RFC PATCH 15/18] drm/amdgpu: Set a default priority for user/kernel BOs Friedrich Vock
2024-04-24 16:57 ` [RFC PATCH 16/18] drm/amdgpu: Implement SET_PRIORITY GEM op Friedrich Vock
2024-04-25 6:32 ` Christian König
2024-04-25 6:46 ` Friedrich Vock
2024-04-25 6:58 ` Christian König
2024-04-25 7:06 ` Friedrich Vock
2024-04-25 7:15 ` Christian König
2024-04-25 7:39 ` Friedrich Vock
2024-04-24 16:57 ` [RFC PATCH 17/18] drm/amdgpu: Implement EVICTED_VRAM query Friedrich Vock
2024-04-24 16:57 ` [RFC PATCH 18/18] drm/amdgpu: Bump minor version Friedrich Vock
2024-04-25 6:54 ` [RFC PATCH 00/18] TTM interface for managing VRAM oversubscription Christian König
2024-04-25 13:22 ` Marek Olšák
2024-04-25 13:33 ` Christian König
2024-05-02 14:23 ` Maarten Lankhorst
2024-05-13 13:44 ` Friedrich Vock
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240424165937.54759-7-friedrich.vock@gmx.de \
--to=friedrich.vock@gmx.de \
--cc=alexander.deucher@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=bas@basnieuwenhuizen.nl \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=joshua@froggi.es \
--cc=pgriffais@valvesoftware.com \
--cc=tvrtko.ursulin@igalia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).