All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource
@ 2021-06-10 11:05 Christian König
  2021-06-10 11:05 ` [PATCH 2/4] drm/ttm: add ttm_resource_fini Christian König
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Christian König @ 2021-06-10 11:05 UTC (permalink / raw)
  To: thomas_os, matthew.auld, dri-devel

We are going to need this for the next patch and it allows us to clean
up amdgpu as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 47 ++++++++-------------
 drivers/gpu/drm/ttm/ttm_resource.c          |  1 +
 include/drm/ttm/ttm_resource.h              |  1 +
 3 files changed, 19 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 194f9eecf89c..8e3f5da44e4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -26,23 +26,12 @@
 
 #include "amdgpu.h"
 
-struct amdgpu_gtt_node {
-	struct ttm_buffer_object *tbo;
-	struct ttm_range_mgr_node base;
-};
-
 static inline struct amdgpu_gtt_mgr *
 to_gtt_mgr(struct ttm_resource_manager *man)
 {
 	return container_of(man, struct amdgpu_gtt_mgr, manager);
 }
 
-static inline struct amdgpu_gtt_node *
-to_amdgpu_gtt_node(struct ttm_resource *res)
-{
-	return container_of(res, struct amdgpu_gtt_node, base.base);
-}
-
 /**
  * DOC: mem_info_gtt_total
  *
@@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
  */
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 
-	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
+	return drm_mm_node_allocated(&node->mm_nodes[0]);
 }
 
 /**
@@ -129,7 +118,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 	uint32_t num_pages = PFN_UP(tbo->base.size);
-	struct amdgpu_gtt_node *node;
+	struct ttm_range_mgr_node *node;
 	int r;
 
 	spin_lock(&mgr->lock);
@@ -141,19 +130,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 	atomic64_sub(num_pages, &mgr->available);
 	spin_unlock(&mgr->lock);
 
-	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node) {
 		r = -ENOMEM;
 		goto err_out;
 	}
 
-	node->tbo = tbo;
-	ttm_resource_init(tbo, place, &node->base.base);
-
+	ttm_resource_init(tbo, place, &node->base);
 	if (place->lpfn) {
 		spin_lock(&mgr->lock);
 		r = drm_mm_insert_node_in_range(&mgr->mm,
-						&node->base.mm_nodes[0],
+						&node->mm_nodes[0],
 						num_pages, tbo->page_alignment,
 						0, place->fpfn, place->lpfn,
 						DRM_MM_INSERT_BEST);
@@ -161,14 +148,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 		if (unlikely(r))
 			goto err_free;
 
-		node->base.base.start = node->base.mm_nodes[0].start;
+		node->base.start = node->mm_nodes[0].start;
 	} else {
-		node->base.mm_nodes[0].start = 0;
-		node->base.mm_nodes[0].size = node->base.base.num_pages;
-		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+		node->mm_nodes[0].start = 0;
+		node->mm_nodes[0].size = node->base.num_pages;
+		node->base.start = AMDGPU_BO_INVALID_OFFSET;
 	}
 
-	*res = &node->base.base;
+	*res = &node->base;
 	return 0;
 
 err_free:
@@ -191,12 +178,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 			       struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 
 	spin_lock(&mgr->lock);
-	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
-		drm_mm_remove_node(&node->base.mm_nodes[0]);
+	if (drm_mm_node_allocated(&node->mm_nodes[0]))
+		drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&mgr->lock);
 	atomic64_add(res->num_pages, &mgr->available);
 
@@ -228,14 +215,14 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-	struct amdgpu_gtt_node *node;
+	struct ttm_range_mgr_node *node;
 	struct drm_mm_node *mm_node;
 	int r = 0;
 
 	spin_lock(&mgr->lock);
 	drm_mm_for_each_node(mm_node, &mgr->mm) {
-		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-		r = amdgpu_ttm_recover_gart(node->tbo);
+		node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+		r = amdgpu_ttm_recover_gart(node->base.bo);
 		if (r)
 			break;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 2431717376e7..7ff6194154fe 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 	res->bus.offset = 0;
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
+	res->bo = bo;
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 140b6b9a8bbe..6d0b7a6d2169 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -171,6 +171,7 @@ struct ttm_resource {
 	uint32_t mem_type;
 	uint32_t placement;
 	struct ttm_bus_placement bus;
+	struct ttm_buffer_object *bo;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] drm/ttm: add ttm_resource_fini
  2021-06-10 11:05 [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Christian König
@ 2021-06-10 11:05 ` Christian König
  2021-06-10 11:07   ` Christian König
  2021-06-10 11:05 ` [PATCH 3/4] drm/ttm: move the LRU into resource handling Christian König
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Christian König @ 2021-06-10 11:05 UTC (permalink / raw)
  To: thomas_os, matthew.auld, dri-devel

For now that function is just a stub.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c  | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 1 +
 drivers/gpu/drm/nouveau/nouveau_ttm.c        | 1 +
 drivers/gpu/drm/ttm/ttm_range_manager.c      | 1 +
 drivers/gpu/drm/ttm/ttm_resource.c           | 5 +++++
 drivers/gpu/drm/ttm/ttm_sys_manager.c        | 1 +
 drivers/gpu/drm/vmwgfx/vmwgfx_thp.c          | 1 +
 include/drm/ttm/ttm_resource.h               | 2 ++
 8 files changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 8e3f5da44e4f..95d1cd338cf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -187,6 +187,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 	spin_unlock(&mgr->lock);
 	atomic64_add(res->num_pages, &mgr->available);
 
+	ttm_resource_fini(&node->base);
 	kfree(node);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9a6df02477ce..9f0eb93123ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -510,6 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 	atomic64_sub(usage, &mgr->usage);
 	atomic64_sub(vis_usage, &mgr->vis_usage);
 
+	ttm_resource_fini(&node->base);
 	kvfree(node);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f4c2e46b6fe1..1969759ee2ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -38,6 +38,7 @@
 static void
 nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
 {
+	ttm_resource_fini(reg);
 	nouveau_mem_del(reg);
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 03395386e8a7..3636601fd4b0 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -108,6 +108,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
 	drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&rman->lock);
 
+	ttm_resource_fini(&node->base);
 	kfree(node);
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7ff6194154fe..5df1c63373cf 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -45,6 +45,11 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
+void ttm_resource_fini(struct ttm_resource *res)
+{
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
 int ttm_resource_alloc(struct ttm_buffer_object *bo,
 		       const struct ttm_place *place,
 		       struct ttm_resource **res_ptr)
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 63aca52f75e1..4427bf6b076b 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
 static void ttm_sys_man_free(struct ttm_resource_manager *man,
 			     struct ttm_resource *res)
 {
+	ttm_resource_fini(res);
 	kfree(res);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 2a3d3468e4e0..414d3713f250 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -123,6 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
 	drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&rman->lock);
 
+	ttm_resource_fini(&node->base);
 	kfree(node);
 }
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 6d0b7a6d2169..7fc42db688b8 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -263,6 +263,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource *res);
+
 int ttm_resource_alloc(struct ttm_buffer_object *bo,
 		       const struct ttm_place *place,
 		       struct ttm_resource **res);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] drm/ttm: move the LRU into resource handling
  2021-06-10 11:05 [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Christian König
  2021-06-10 11:05 ` [PATCH 2/4] drm/ttm: add ttm_resource_fini Christian König
@ 2021-06-10 11:05 ` Christian König
  2021-06-10 11:05 ` [PATCH 4/4] drm/ttm: add resource iterator Christian König
  2021-06-11  5:34 ` [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Thomas Hellström (Intel)
  3 siblings, 0 replies; 7+ messages in thread
From: Christian König @ 2021-06-10 11:05 UTC (permalink / raw)
  To: thomas_os, matthew.auld, dri-devel

This way we finally fix the problem that new resource are
not immediately evict-able after allocation.

That has caused numerous problems including OOM on GDS handling
and not being able to use TTM as general resource manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |   8 +-
 drivers/gpu/drm/ttm/ttm_bo.c           | 101 ++-------------------
 drivers/gpu/drm/ttm/ttm_bo_util.c      |   1 -
 drivers/gpu/drm/ttm/ttm_device.c       |   4 +-
 drivers/gpu/drm/ttm/ttm_resource.c     | 119 +++++++++++++++++++++++++
 include/drm/ttm/ttm_bo_api.h           |  16 ----
 include/drm/ttm/ttm_bo_driver.h        |  29 +-----
 include/drm/ttm/ttm_resource.h         |  35 ++++++++
 8 files changed, 168 insertions(+), 145 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d1a229212e7a..7475bb760ec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -643,12 +643,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 
 	if (vm->bulk_moveable) {
 		spin_lock(&adev->mman.bdev.lru_lock);
-		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
+		ttm_bulk_move_lru_tail(&vm->lru_bulk_move);
 		spin_unlock(&adev->mman.bdev.lru_lock);
 		return;
 	}
 
-	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
+	ttm_bulk_move_init(&vm->lru_bulk_move);
 
 	spin_lock(&adev->mman.bdev.lru_lock);
 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
@@ -657,11 +657,9 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 		if (!bo->parent)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
-					&vm->lru_bulk_move);
+		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
 		if (bo->shadow)
 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
-						bo->shadow->tbo.resource,
 						&vm->lru_bulk_move);
 	}
 	spin_unlock(&adev->mman.bdev.lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db53fecca696..e081d48aad76 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,95 +69,15 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
 	}
 }
 
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
-{
-	struct ttm_device *bdev = bo->bdev;
-
-	list_del_init(&bo->lru);
-
-	if (bdev->funcs->del_from_lru_notify)
-		bdev->funcs->del_from_lru_notify(bo);
-}
-
-static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
-				     struct ttm_buffer_object *bo)
-{
-	if (!pos->first)
-		pos->first = bo;
-	pos->last = bo;
-}
-
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
-			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk)
 {
-	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man;
-
-	if (!bo->deleted)
-		dma_resv_assert_held(bo->base.resv);
-
-	if (bo->pin_count) {
-		ttm_bo_del_from_lru(bo);
-		return;
-	}
-
-	man = ttm_manager_type(bdev, mem->mem_type);
-	list_move_tail(&bo->lru, &man->lru[bo->priority]);
-
-	if (bdev->funcs->del_from_lru_notify)
-		bdev->funcs->del_from_lru_notify(bo);
-
-	if (bulk && !bo->pin_count) {
-		switch (bo->resource->mem_type) {
-		case TTM_PL_TT:
-			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
-			break;
+	dma_resv_assert_held(bo->base.resv);
 
-		case TTM_PL_VRAM:
-			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
-			break;
-		}
-	}
+	ttm_resource_move_to_lru_tail(bo->resource, bulk);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
-{
-	unsigned i;
-
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
-		struct ttm_resource_manager *man;
-
-		if (!pos->first)
-			continue;
-
-		dma_resv_assert_held(pos->first->base.resv);
-		dma_resv_assert_held(pos->last->base.resv);
-
-		man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
-		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
-				    &pos->last->lru);
-	}
-
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
-		struct ttm_resource_manager *man;
-
-		if (!pos->first)
-			continue;
-
-		dma_resv_assert_held(pos->first->base.resv);
-		dma_resv_assert_held(pos->last->base.resv);
-
-		man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
-		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
-				    &pos->last->lru);
-	}
-}
-EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
-
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 				  struct ttm_resource *mem, bool evict,
 				  struct ttm_operation_ctx *ctx,
@@ -339,7 +259,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 		return ret;
 	}
 
-	ttm_bo_del_from_lru(bo);
 	list_del_init(&bo->ddestroy);
 	spin_unlock(&bo->bdev->lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
@@ -440,7 +359,7 @@ static void ttm_bo_release(struct kref *kref)
 		 */
 		if (bo->pin_count) {
 			bo->pin_count = 0;
-			ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+			ttm_resource_move_to_lru_tail(bo->resource, NULL);
 		}
 
 		kref_init(&bo->kref);
@@ -453,7 +372,6 @@ static void ttm_bo_release(struct kref *kref)
 	}
 
 	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_del_from_lru(bo);
 	list_del(&bo->ddestroy);
 	spin_unlock(&bo->bdev->lru_lock);
 
@@ -637,15 +555,17 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 			struct ww_acquire_ctx *ticket)
 {
 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+	struct ttm_resource *res;
 	bool locked = false;
 	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		list_for_each_entry(bo, &man->lru[i], lru) {
+		list_for_each_entry(res, &man->lru[i], lru) {
 			bool busy;
 
+			bo = res->bo;
 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
 							    &locked, &busy)) {
 				if (busy && !busy_bo && ticket !=
@@ -663,7 +583,7 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 		}
 
 		/* If the inner loop terminated early, we have our candidate */
-		if (&bo->lru != &man->lru[i])
+		if (&res->lru != &man->lru[i])
 			break;
 
 		bo = NULL;
@@ -837,9 +757,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	}
 
 error:
-	if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
-		ttm_bo_move_to_lru_tail_unlocked(bo);
-
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -1001,7 +918,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
 	kref_init(&bo->kref);
-	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	bo->bdev = bdev;
 	bo->type = type;
@@ -1051,8 +967,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 		return ret;
 	}
 
-	ttm_bo_move_to_lru_tail_unlocked(bo);
-
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_init_reserved);
@@ -1151,7 +1065,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
 		return 0;
 	}
 
-	ttm_bo_del_from_lru(bo);
 	/* TODO: Cleanup the locking */
 	spin_unlock(&bo->bdev->lru_lock);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2f57f824e6db..d10acd9f3a5b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -229,7 +229,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 
 	atomic_inc(&ttm_glob.bo_count);
 	INIT_LIST_HEAD(&fbo->base.ddestroy);
-	INIT_LIST_HEAD(&fbo->base.lru);
 	fbo->base.moving = NULL;
 	drm_vma_node_reset(&fbo->base.base.vma_node);
 
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 460953dcad11..a6e533c3ac56 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -132,6 +132,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 {
 	struct ttm_resource_manager *man;
 	struct ttm_buffer_object *bo;
+	struct ttm_resource *res;
 	unsigned i, j;
 	int ret;
 
@@ -142,9 +143,10 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 			continue;
 
 		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
-			list_for_each_entry(bo, &man->lru[j], lru) {
+			list_for_each_entry(res, &man->lru[j], lru) {
 				uint32_t num_pages;
 
+				bo = res->bo;
 				if (!bo->ttm ||
 				    bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
 				    bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 5df1c63373cf..bc2b71922235 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -29,6 +29,115 @@
 #include <drm/ttm/ttm_resource.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
+/**
+ * ttm_bulk_move_init - initialize a bulk move structure
+ * @bulk: the structure to init
+ *
+ * For now just memset the structure to zero.
+ */
+void ttm_bulk_move_init(struct ttm_lru_bulk_move *bulk)
+{
+	memset(bulk, 0, sizeof(*bulk));
+}
+EXPORT_SYMBOL(ttm_bulk_move_init);
+
+/**
+ * ttm_bo_bulk_move_lru_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * resource order never changes. Should be called with ttm_device::lru_lock held.
+ */
+void ttm_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
+{
+	unsigned i;
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
+		struct ttm_resource_manager *man;
+
+		if (!pos->first)
+			continue;
+
+		dma_resv_assert_held(pos->first->bo->base.resv);
+		dma_resv_assert_held(pos->last->bo->base.resv);
+
+		man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_TT);
+		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+				    &pos->last->lru);
+	}
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
+		struct ttm_resource_manager *man;
+
+		if (!pos->first)
+			continue;
+
+		dma_resv_assert_held(pos->first->bo->base.resv);
+		dma_resv_assert_held(pos->last->bo->base.resv);
+
+		man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_VRAM);
+		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+				    &pos->last->lru);
+	}
+}
+EXPORT_SYMBOL(ttm_bulk_move_lru_tail);
+
+/* Record a resource position in a bulk move structure */
+static void ttm_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
+				  struct ttm_resource *res)
+{
+	if (!pos->first)
+		pos->first = res;
+	pos->last = res;
+}
+
+/* Remove a resource from the LRU */
+static void ttm_resource_del_from_lru(struct ttm_resource *res)
+{
+	struct ttm_device *bdev = res->bo->bdev;
+
+	list_del_init(&res->lru);
+
+	if (bdev->funcs->del_from_lru_notify)
+		bdev->funcs->del_from_lru_notify(res->bo);
+}
+
+/* Move a resource to the LRU tail and track the bulk position */
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
+				   struct ttm_lru_bulk_move *bulk)
+{
+	struct ttm_buffer_object *bo = res->bo;
+	struct ttm_device *bdev = bo->bdev;
+	struct ttm_resource_manager *man;
+
+	if (bo->pin_count) {
+		ttm_resource_del_from_lru(res);
+		return;
+	}
+
+	man = ttm_manager_type(bdev, res->mem_type);
+	list_move_tail(&res->lru, &man->lru[bo->priority]);
+
+	if (bdev->funcs->del_from_lru_notify)
+		bdev->funcs->del_from_lru_notify(bo);
+
+	if (!bulk)
+		return;
+
+	switch (res->mem_type) {
+	case TTM_PL_TT:
+		ttm_bulk_move_set_pos(&bulk->tt[bo->priority], res);
+		break;
+
+	case TTM_PL_VRAM:
+		ttm_bulk_move_set_pos(&bulk->vram[bo->priority], res);
+		break;
+	}
+}
+
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res)
@@ -42,11 +151,21 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
 	res->bo = bo;
+	INIT_LIST_HEAD(&res->lru);
+
+	spin_lock(&bo->bdev->lru_lock);
+	ttm_resource_move_to_lru_tail(res, NULL);
+	spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
 void ttm_resource_fini(struct ttm_resource *res)
 {
+	struct ttm_device *bdev = res->bo->bdev;
+
+	spin_lock(&bdev->lru_lock);
+	ttm_resource_del_from_lru(res);
+	spin_unlock(&bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_fini);
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index f681bbdbc698..0928d8cfb45a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -56,8 +56,6 @@ struct ttm_placement;
 
 struct ttm_place;
 
-struct ttm_lru_bulk_move;
-
 /**
  * enum ttm_bo_type
  *
@@ -95,7 +93,6 @@ struct ttm_tt;
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
  * @deleted: True if the object is only a zombie and already deleted.
- * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
  * @moving: Fence set when BO is moving
@@ -144,7 +141,6 @@ struct ttm_buffer_object {
 	 * Members protected by the bdev::lru_lock.
 	 */
 
-	struct list_head lru;
 	struct list_head ddestroy;
 
 	/**
@@ -308,7 +304,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * ttm_bo_move_to_lru_tail
  *
  * @bo: The buffer object.
- * @mem: Resource object.
  * @bulk: optional bulk move structure to remember BO positions
  *
  * Move this BO to the tail of all lru lists used to lookup and reserve an
@@ -316,19 +311,8 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * held, and is used to make a BO less likely to be considered for eviction.
  */
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
-			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk);
 
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
-
 /**
  * ttm_bo_lock_delayed_workqueue
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 68d6069572aa..fba2f7d3d34e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -45,33 +45,6 @@
 #include "ttm_tt.h"
 #include "ttm_pool.h"
 
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
-	struct ttm_buffer_object *first;
-	struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
-	struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
-	struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
-};
-
 /*
  * ttm_bo.c
  */
@@ -182,7 +155,7 @@ static inline void
 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
 {
 	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+	ttm_bo_move_to_lru_tail(bo, NULL);
 	spin_unlock(&bo->bdev->lru_lock);
 }
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 7fc42db688b8..56583b2392ed 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -26,9 +26,11 @@
 #define _TTM_RESOURCE_H_
 
 #include <linux/types.h>
+#include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/dma-buf-map.h>
 #include <linux/dma-fence.h>
+
 #include <drm/drm_print.h>
 #include <drm/ttm/ttm_caching.h>
 #include <drm/ttm/ttm_kmap_iter.h>
@@ -172,6 +174,33 @@ struct ttm_resource {
 	uint32_t placement;
 	struct ttm_bus_placement bus;
 	struct ttm_buffer_object *bo;
+	struct list_head lru;
+};
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Positions for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+	struct ttm_resource *first;
+	struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @tt: first/last lru entry for resources in the TT domain
+ * @vram: first/last lru entry for resources in the VRAM domain
+ *
+ * Helper structure for bulk moves on the LRU list.
+ */
+struct ttm_lru_bulk_move {
+	struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
+	struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
 };
 
 /**
@@ -260,6 +289,12 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 	man->move = NULL;
 }
 
+void ttm_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
+				   struct ttm_lru_bulk_move *bulk);
+
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] drm/ttm: add resource iterator
  2021-06-10 11:05 [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Christian König
  2021-06-10 11:05 ` [PATCH 2/4] drm/ttm: add ttm_resource_fini Christian König
  2021-06-10 11:05 ` [PATCH 3/4] drm/ttm: move the LRU into resource handling Christian König
@ 2021-06-10 11:05 ` Christian König
  2021-06-11  5:34 ` [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Thomas Hellström (Intel)
  3 siblings, 0 replies; 7+ messages in thread
From: Christian König @ 2021-06-10 11:05 UTC (permalink / raw)
  To: thomas_os, matthew.auld, dri-devel

Instead of duplicating that at different places add an iterator over all
the resources in a resource manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c       | 41 ++++++++++++------------------
 drivers/gpu/drm/ttm/ttm_device.c   | 37 +++++++++++++--------------
 drivers/gpu/drm/ttm/ttm_resource.c | 29 +++++++++++++++++++++
 include/drm/ttm/ttm_resource.h     | 23 +++++++++++++++++
 4 files changed, 86 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e081d48aad76..c2408d53f85a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -555,38 +555,29 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 			struct ww_acquire_ctx *ticket)
 {
 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+	struct ttm_resource_cursor cursor;
 	struct ttm_resource *res;
 	bool locked = false;
-	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		list_for_each_entry(res, &man->lru[i], lru) {
-			bool busy;
-
-			bo = res->bo;
-			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
-							    &locked, &busy)) {
-				if (busy && !busy_bo && ticket !=
-				    dma_resv_locking_ctx(bo->base.resv))
-					busy_bo = bo;
-				continue;
-			}
-
-			if (!ttm_bo_get_unless_zero(bo)) {
-				if (locked)
-					dma_resv_unlock(bo->base.resv);
-				continue;
-			}
-			break;
+	ttm_resource_manager_for_each_res(man, &cursor, res) {
+		bool busy;
+
+		if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
+						    &locked, &busy)) {
+			if (busy && !busy_bo && ticket !=
+			    dma_resv_locking_ctx(bo->base.resv))
+				busy_bo = res->bo;
+			continue;
 		}
 
-		/* If the inner loop terminated early, we have our candidate */
-		if (&res->lru != &man->lru[i])
-			break;
-
-		bo = NULL;
+		if (!ttm_bo_get_unless_zero(res->bo)) {
+			if (locked)
+				dma_resv_unlock(res->bo->base.resv);
+			continue;
+		}
+		bo = res->bo;
 	}
 
 	if (!bo) {
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index a6e533c3ac56..8225575d2d0f 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -130,10 +130,11 @@ EXPORT_SYMBOL(ttm_global_swapout);
 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 		       gfp_t gfp_flags)
 {
+	struct ttm_resource_cursor cursor;
 	struct ttm_resource_manager *man;
 	struct ttm_buffer_object *bo;
 	struct ttm_resource *res;
-	unsigned i, j;
+	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
@@ -142,24 +143,22 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 		if (!man || !man->use_tt)
 			continue;
 
-		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
-			list_for_each_entry(res, &man->lru[j], lru) {
-				uint32_t num_pages;
-
-				bo = res->bo;
-				if (!bo->ttm ||
-				    bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
-				    bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
-					continue;
-
-				num_pages = bo->ttm->num_pages;
-				ret = ttm_bo_swapout(bo, ctx, gfp_flags);
-				/* ttm_bo_swapout has dropped the lru_lock */
-				if (!ret)
-					return num_pages;
-				if (ret != -EBUSY)
-					return ret;
-			}
+		ttm_resource_manager_for_each_res(man, &cursor, res) {
+			uint32_t num_pages;
+
+			bo = res->bo;
+			if (!bo->ttm ||
+			    bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+			    bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
+				continue;
+
+			num_pages = bo->ttm->num_pages;
+			ret = ttm_bo_swapout(bo, ctx, gfp_flags);
+			/* ttm_bo_swapout has dropped the lru_lock */
+			if (!ret)
+				return num_pages;
+			if (ret != -EBUSY)
+				return ret;
 		}
 	}
 	spin_unlock(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index bc2b71922235..53ef3462a26d 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -284,6 +284,35 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 }
 EXPORT_SYMBOL(ttm_resource_manager_debug);
 
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+			   struct ttm_resource_cursor *cursor)
+{
+	struct ttm_resource *res;
+
+	for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
+	     ++cursor->priority)
+		list_for_each_entry(res, &man->lru[cursor->priority], lru)
+			return res;
+
+	return NULL;
+}
+
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+			  struct ttm_resource_cursor *cursor,
+			  struct ttm_resource *res)
+{
+	list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
+		return res;
+
+	for (; cursor->priority < TTM_MAX_BO_PRIORITY; ++cursor->priority)
+		list_for_each_entry(res, &man->lru[cursor->priority], lru)
+			return res;
+
+	return NULL;
+}
+
 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
 					  struct dma_buf_map *dmap,
 					  pgoff_t i)
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 56583b2392ed..c854664be2b2 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -177,6 +177,17 @@ struct ttm_resource {
 	struct list_head lru;
 };
 
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+	unsigned int priority;
+};
+
 /**
  * struct ttm_lru_bulk_move_pos
  *
@@ -314,6 +325,18 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 				struct drm_printer *p);
 
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+			   struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+			  struct ttm_resource_cursor *cursor,
+			  struct ttm_resource *res);
+
+#define ttm_resource_manager_for_each_res(man, cursor, res)		\
+	for (res = ttm_resource_manager_first(man, cursor); res;	\
+	     res = ttm_resource_manager_next(man, cursor, res))
+
 struct ttm_kmap_iter *
 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
 			 struct io_mapping *iomap,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/4] drm/ttm: add ttm_resource_fini
  2021-06-10 11:05 ` [PATCH 2/4] drm/ttm: add ttm_resource_fini Christian König
@ 2021-06-10 11:07   ` Christian König
  0 siblings, 0 replies; 7+ messages in thread
From: Christian König @ 2021-06-10 11:07 UTC (permalink / raw)
  To: thomas_os, matthew.auld, dri-devel

Ah, crap forget this patch. I wanted to squash it into the next one.

Am 10.06.21 um 13:05 schrieb Christian König:
> For now that function is just a stub.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c  | 1 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 1 +
>   drivers/gpu/drm/nouveau/nouveau_ttm.c        | 1 +
>   drivers/gpu/drm/ttm/ttm_range_manager.c      | 1 +
>   drivers/gpu/drm/ttm/ttm_resource.c           | 5 +++++
>   drivers/gpu/drm/ttm/ttm_sys_manager.c        | 1 +
>   drivers/gpu/drm/vmwgfx/vmwgfx_thp.c          | 1 +
>   include/drm/ttm/ttm_resource.h               | 2 ++
>   8 files changed, 13 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index 8e3f5da44e4f..95d1cd338cf4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -187,6 +187,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>   	spin_unlock(&mgr->lock);
>   	atomic64_add(res->num_pages, &mgr->available);
>   
> +	ttm_resource_fini(&node->base);
>   	kfree(node);
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index 9a6df02477ce..9f0eb93123ec 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -510,6 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
>   	atomic64_sub(usage, &mgr->usage);
>   	atomic64_sub(vis_usage, &mgr->vis_usage);
>   
> +	ttm_resource_fini(&node->base);
>   	kvfree(node);
>   }
>   
> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> index f4c2e46b6fe1..1969759ee2ee 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> @@ -38,6 +38,7 @@
>   static void
>   nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
>   {
> +	ttm_resource_fini(reg);
>   	nouveau_mem_del(reg);
>   }
>   
> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
> index 03395386e8a7..3636601fd4b0 100644
> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
> @@ -108,6 +108,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
>   	drm_mm_remove_node(&node->mm_nodes[0]);
>   	spin_unlock(&rman->lock);
>   
> +	ttm_resource_fini(&node->base);
>   	kfree(node);
>   }
>   
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 7ff6194154fe..5df1c63373cf 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -45,6 +45,11 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>   }
>   EXPORT_SYMBOL(ttm_resource_init);
>   
> +void ttm_resource_fini(struct ttm_resource *res)
> +{
> +}
> +EXPORT_SYMBOL(ttm_resource_fini);
> +
>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
>   		       const struct ttm_place *place,
>   		       struct ttm_resource **res_ptr)
> diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> index 63aca52f75e1..4427bf6b076b 100644
> --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> @@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
>   static void ttm_sys_man_free(struct ttm_resource_manager *man,
>   			     struct ttm_resource *res)
>   {
> +	ttm_resource_fini(res);
>   	kfree(res);
>   }
>   
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> index 2a3d3468e4e0..414d3713f250 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> @@ -123,6 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
>   	drm_mm_remove_node(&node->mm_nodes[0]);
>   	spin_unlock(&rman->lock);
>   
> +	ttm_resource_fini(&node->base);
>   	kfree(node);
>   }
>   
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 6d0b7a6d2169..7fc42db688b8 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -263,6 +263,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
>   void ttm_resource_init(struct ttm_buffer_object *bo,
>                          const struct ttm_place *place,
>                          struct ttm_resource *res);
> +void ttm_resource_fini(struct ttm_resource *res);
> +
>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
>   		       const struct ttm_place *place,
>   		       struct ttm_resource **res);


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource
  2021-06-10 11:05 [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Christian König
                   ` (2 preceding siblings ...)
  2021-06-10 11:05 ` [PATCH 4/4] drm/ttm: add resource iterator Christian König
@ 2021-06-11  5:34 ` Thomas Hellström (Intel)
  2021-06-11 11:30   ` Christian König
  3 siblings, 1 reply; 7+ messages in thread
From: Thomas Hellström (Intel) @ 2021-06-11  5:34 UTC (permalink / raw)
  To: Christian König, matthew.auld, dri-devel

Hi, Christian,

I know you have a lot on your plate, and that the drm community is a bit 
lax about following the kernel patch submitting guidelines, but now that 
we're also spinning up a number of Intel developers on TTM could we 
please make a better effort with cover letters and commit messages so 
that they understand what the purpose and end goal of the series is. A 
reviewer shouldn't have to look at the last patch to try to get an 
understanding what the series is doing and why.

On 6/10/21 1:05 PM, Christian König wrote:
> We are going to need this for the next patch


> and it allows us to clean
> up amdgpu as well.

The amdgpu changes are not reflected in the commit title.


>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 47 ++++++++-------------
>   drivers/gpu/drm/ttm/ttm_resource.c          |  1 +
>   include/drm/ttm/ttm_resource.h              |  1 +
>   3 files changed, 19 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index 194f9eecf89c..8e3f5da44e4f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -26,23 +26,12 @@
>   
>   #include "amdgpu.h"
>   
> -struct amdgpu_gtt_node {
> -	struct ttm_buffer_object *tbo;
> -	struct ttm_range_mgr_node base;
> -};
> -
>   static inline struct amdgpu_gtt_mgr *
>   to_gtt_mgr(struct ttm_resource_manager *man)
>   {
>   	return container_of(man, struct amdgpu_gtt_mgr, manager);
>   }
>   
> -static inline struct amdgpu_gtt_node *
> -to_amdgpu_gtt_node(struct ttm_resource *res)
> -{
> -	return container_of(res, struct amdgpu_gtt_node, base.base);
> -}
> -
>   /**
>    * DOC: mem_info_gtt_total
>    *
> @@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
>    */
>   bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
>   {
> -	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
> +	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>   
> -	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
> +	return drm_mm_node_allocated(&node->mm_nodes[0]);
>   }
>   
>   /**
> @@ -129,7 +118,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>   {
>   	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>   	uint32_t num_pages = PFN_UP(tbo->base.size);
> -	struct amdgpu_gtt_node *node;
> +	struct ttm_range_mgr_node *node;
>   	int r;
>   
>   	spin_lock(&mgr->lock);
> @@ -141,19 +130,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>   	atomic64_sub(num_pages, &mgr->available);
>   	spin_unlock(&mgr->lock);
>   
> -	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
> +	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
>   	if (!node) {
>   		r = -ENOMEM;
>   		goto err_out;
>   	}
>   
> -	node->tbo = tbo;
> -	ttm_resource_init(tbo, place, &node->base.base);
> -
> +	ttm_resource_init(tbo, place, &node->base);
>   	if (place->lpfn) {
>   		spin_lock(&mgr->lock);
>   		r = drm_mm_insert_node_in_range(&mgr->mm,
> -						&node->base.mm_nodes[0],
> +						&node->mm_nodes[0],
>   						num_pages, tbo->page_alignment,
>   						0, place->fpfn, place->lpfn,
>   						DRM_MM_INSERT_BEST);
> @@ -161,14 +148,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>   		if (unlikely(r))
>   			goto err_free;
>   
> -		node->base.base.start = node->base.mm_nodes[0].start;
> +		node->base.start = node->mm_nodes[0].start;
>   	} else {
> -		node->base.mm_nodes[0].start = 0;
> -		node->base.mm_nodes[0].size = node->base.base.num_pages;
> -		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
> +		node->mm_nodes[0].start = 0;
> +		node->mm_nodes[0].size = node->base.num_pages;
> +		node->base.start = AMDGPU_BO_INVALID_OFFSET;
>   	}
>   
> -	*res = &node->base.base;
> +	*res = &node->base;
>   	return 0;
>   
>   err_free:
> @@ -191,12 +178,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>   static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>   			       struct ttm_resource *res)
>   {
> -	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
> +	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>   	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>   
>   	spin_lock(&mgr->lock);
> -	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
> -		drm_mm_remove_node(&node->base.mm_nodes[0]);
> +	if (drm_mm_node_allocated(&node->mm_nodes[0]))
> +		drm_mm_remove_node(&node->mm_nodes[0]);
>   	spin_unlock(&mgr->lock);
>   	atomic64_add(res->num_pages, &mgr->available);
>   
> @@ -228,14 +215,14 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
>   int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
>   {
>   	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
> -	struct amdgpu_gtt_node *node;
> +	struct ttm_range_mgr_node *node;
>   	struct drm_mm_node *mm_node;
>   	int r = 0;
>   
>   	spin_lock(&mgr->lock);
>   	drm_mm_for_each_node(mm_node, &mgr->mm) {
> -		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
> -		r = amdgpu_ttm_recover_gart(node->tbo);
> +		node = container_of(mm_node, typeof(*node), mm_nodes[0]);
> +		r = amdgpu_ttm_recover_gart(node->base.bo);
>   		if (r)
>   			break;
>   	}
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 2431717376e7..7ff6194154fe 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>   	res->bus.offset = 0;
>   	res->bus.is_iomem = false;
>   	res->bus.caching = ttm_cached;
> +	res->bo = bo;
>   }
>   EXPORT_SYMBOL(ttm_resource_init);
>   
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 140b6b9a8bbe..6d0b7a6d2169 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -171,6 +171,7 @@ struct ttm_resource {
>   	uint32_t mem_type;
>   	uint32_t placement;
>   	struct ttm_bus_placement bus;
> +	struct ttm_buffer_object *bo;

Not that I'm against this change by itself, but this bo pointer is not 
refcounted, and therefore needs a description when it's needed and why. 
What happens, for example when the resource is moved to a ghost object, 
or the bo is killed while the resource is remaining on a lru list (which 
I understand was one of the main purposes with free-standing resources). 
Weak references need a guarantee that the object they pointed to is 
alive. What is that guarantee?

Also could we introduce new TTM structure members where they are first 
used /referenced by TTM and not where they are used by amdgpu? Without 
finding out in patch 3 that this member is needed to look up the bo from 
a lru list the correct response to this patch would have been: That bo 
is amdgpu-specific and needs to be in a driver private struct...


Thanks,

/Thomas


>   };
>   
>   /**

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource
  2021-06-11  5:34 ` [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Thomas Hellström (Intel)
@ 2021-06-11 11:30   ` Christian König
  0 siblings, 0 replies; 7+ messages in thread
From: Christian König @ 2021-06-11 11:30 UTC (permalink / raw)
  To: Thomas Hellström (Intel), matthew.auld, dri-devel



Am 11.06.21 um 07:34 schrieb Thomas Hellström (Intel):
> Hi, Christian,
>
> I know you have a lot on your plate, and that the drm community is a 
> bit lax about following the kernel patch submitting guidelines, but 
> now that we're also spinning up a number of Intel developers on TTM 
> could we please make a better effort with cover letters and commit 
> messages so that they understand what the purpose and end goal of the 
> series is. A reviewer shouldn't have to look at the last patch to try 
> to get an understanding what the series is doing and why.

Sorry, that was send out this early unintentionally. See it more like an 
RFC.

>
> On 6/10/21 1:05 PM, Christian König wrote:
>> We are going to need this for the next patch
>
>
>> and it allows us to clean
>> up amdgpu as well.
>
> The amdgpu changes are not reflected in the commit title.
>
>
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 47 ++++++++-------------
>>   drivers/gpu/drm/ttm/ttm_resource.c          |  1 +
>>   include/drm/ttm/ttm_resource.h              |  1 +
>>   3 files changed, 19 insertions(+), 30 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> index 194f9eecf89c..8e3f5da44e4f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> @@ -26,23 +26,12 @@
>>     #include "amdgpu.h"
>>   -struct amdgpu_gtt_node {
>> -    struct ttm_buffer_object *tbo;
>> -    struct ttm_range_mgr_node base;
>> -};
>> -
>>   static inline struct amdgpu_gtt_mgr *
>>   to_gtt_mgr(struct ttm_resource_manager *man)
>>   {
>>       return container_of(man, struct amdgpu_gtt_mgr, manager);
>>   }
>>   -static inline struct amdgpu_gtt_node *
>> -to_amdgpu_gtt_node(struct ttm_resource *res)
>> -{
>> -    return container_of(res, struct amdgpu_gtt_node, base.base);
>> -}
>> -
>>   /**
>>    * DOC: mem_info_gtt_total
>>    *
>> @@ -107,9 +96,9 @@ const struct attribute_group 
>> amdgpu_gtt_mgr_attr_group = {
>>    */
>>   bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
>>   {
>> -    struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
>> +    struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>>   -    return drm_mm_node_allocated(&node->base.mm_nodes[0]);
>> +    return drm_mm_node_allocated(&node->mm_nodes[0]);
>>   }
>>     /**
>> @@ -129,7 +118,7 @@ static int amdgpu_gtt_mgr_new(struct 
>> ttm_resource_manager *man,
>>   {
>>       struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>>       uint32_t num_pages = PFN_UP(tbo->base.size);
>> -    struct amdgpu_gtt_node *node;
>> +    struct ttm_range_mgr_node *node;
>>       int r;
>>         spin_lock(&mgr->lock);
>> @@ -141,19 +130,17 @@ static int amdgpu_gtt_mgr_new(struct 
>> ttm_resource_manager *man,
>>       atomic64_sub(num_pages, &mgr->available);
>>       spin_unlock(&mgr->lock);
>>   -    node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
>> +    node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
>>       if (!node) {
>>           r = -ENOMEM;
>>           goto err_out;
>>       }
>>   -    node->tbo = tbo;
>> -    ttm_resource_init(tbo, place, &node->base.base);
>> -
>> +    ttm_resource_init(tbo, place, &node->base);
>>       if (place->lpfn) {
>>           spin_lock(&mgr->lock);
>>           r = drm_mm_insert_node_in_range(&mgr->mm,
>> -                        &node->base.mm_nodes[0],
>> +                        &node->mm_nodes[0],
>>                           num_pages, tbo->page_alignment,
>>                           0, place->fpfn, place->lpfn,
>>                           DRM_MM_INSERT_BEST);
>> @@ -161,14 +148,14 @@ static int amdgpu_gtt_mgr_new(struct 
>> ttm_resource_manager *man,
>>           if (unlikely(r))
>>               goto err_free;
>>   -        node->base.base.start = node->base.mm_nodes[0].start;
>> +        node->base.start = node->mm_nodes[0].start;
>>       } else {
>> -        node->base.mm_nodes[0].start = 0;
>> -        node->base.mm_nodes[0].size = node->base.base.num_pages;
>> -        node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
>> +        node->mm_nodes[0].start = 0;
>> +        node->mm_nodes[0].size = node->base.num_pages;
>> +        node->base.start = AMDGPU_BO_INVALID_OFFSET;
>>       }
>>   -    *res = &node->base.base;
>> +    *res = &node->base;
>>       return 0;
>>     err_free:
>> @@ -191,12 +178,12 @@ static int amdgpu_gtt_mgr_new(struct 
>> ttm_resource_manager *man,
>>   static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>>                      struct ttm_resource *res)
>>   {
>> -    struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
>> +    struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>>       struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>>         spin_lock(&mgr->lock);
>> -    if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
>> -        drm_mm_remove_node(&node->base.mm_nodes[0]);
>> +    if (drm_mm_node_allocated(&node->mm_nodes[0]))
>> +        drm_mm_remove_node(&node->mm_nodes[0]);
>>       spin_unlock(&mgr->lock);
>>       atomic64_add(res->num_pages, &mgr->available);
>>   @@ -228,14 +215,14 @@ uint64_t amdgpu_gtt_mgr_usage(struct 
>> ttm_resource_manager *man)
>>   int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
>>   {
>>       struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>> -    struct amdgpu_gtt_node *node;
>> +    struct ttm_range_mgr_node *node;
>>       struct drm_mm_node *mm_node;
>>       int r = 0;
>>         spin_lock(&mgr->lock);
>>       drm_mm_for_each_node(mm_node, &mgr->mm) {
>> -        node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
>> -        r = amdgpu_ttm_recover_gart(node->tbo);
>> +        node = container_of(mm_node, typeof(*node), mm_nodes[0]);
>> +        r = amdgpu_ttm_recover_gart(node->base.bo);
>>           if (r)
>>               break;
>>       }
>> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c 
>> b/drivers/gpu/drm/ttm/ttm_resource.c
>> index 2431717376e7..7ff6194154fe 100644
>> --- a/drivers/gpu/drm/ttm/ttm_resource.c
>> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
>> @@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>>       res->bus.offset = 0;
>>       res->bus.is_iomem = false;
>>       res->bus.caching = ttm_cached;
>> +    res->bo = bo;
>>   }
>>   EXPORT_SYMBOL(ttm_resource_init);
>>   diff --git a/include/drm/ttm/ttm_resource.h 
>> b/include/drm/ttm/ttm_resource.h
>> index 140b6b9a8bbe..6d0b7a6d2169 100644
>> --- a/include/drm/ttm/ttm_resource.h
>> +++ b/include/drm/ttm/ttm_resource.h
>> @@ -171,6 +171,7 @@ struct ttm_resource {
>>       uint32_t mem_type;
>>       uint32_t placement;
>>       struct ttm_bus_placement bus;
>> +    struct ttm_buffer_object *bo;
>
> Not that I'm against this change by itself, but this bo pointer is not 
> refcounted, and therefore needs a description when it's needed and 
> why. What happens, for example when the resource is moved to a ghost 
> object, or the bo is killed while the resource is remaining on a lru 
> list (which I understand was one of the main purposes with 
> free-standing resources). Weak references need a guarantee that the 
> object they pointed to is alive. What is that guarantee?

The basic idea is that we want to get rid of ghost objects and all the 
related workarounds in the mid term. But yes for the interim we probably 
need more logic here to make sure the BO is not destroyed.

>
> Also could we introduce new TTM structure members where they are first 
> used /referenced by TTM and not where they are used by amdgpu? Without 
> finding out in patch 3 that this member is needed to look up the bo 
> from a lru list the correct response to this patch would have been: 
> That bo is amdgpu-specific and needs to be in a driver private struct...

That was indeed not supposed like this.

The question I rather wanted to raise if it's ok to make the resource 
object fully depend on the BO for allocation/destruction?

I've seen some code in the DG1 branch where a mock BO is used to 
allocate some resource, but that approach won't work with this here.

On the other hand this fixes a long outstanding and very annoying 
problem we had.

Christian.

>
>
> Thanks,
>
> /Thomas
>
>
>>   };
>>     /**


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-06-11 11:30 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-10 11:05 [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Christian König
2021-06-10 11:05 ` [PATCH 2/4] drm/ttm: add ttm_resource_fini Christian König
2021-06-10 11:07   ` Christian König
2021-06-10 11:05 ` [PATCH 3/4] drm/ttm: move the LRU into resource handling Christian König
2021-06-10 11:05 ` [PATCH 4/4] drm/ttm: add resource iterator Christian König
2021-06-11  5:34 ` [PATCH 1/4] drm/ttm: add a pointer to the allocating BO into ttm_resource Thomas Hellström (Intel)
2021-06-11 11:30   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.