From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: "Matthew Brost" <matthew.brost@intel.com>,
"Oak Zeng" <oak.zeng@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Subject: [PATCH v2 2/6] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops
Date: Tue, 14 May 2024 17:40:06 -0700 [thread overview]
Message-ID: <20240515004010.100091-3-matthew.brost@intel.com> (raw)
In-Reply-To: <20240515004010.100091-1-matthew.brost@intel.com>
Each xe_vma_op resolves to 0-3 pt_ops. Add storage for the pt_ops to
xe_vma_ops which is dynamically allocated based the number and types of
xe_vma_op in the xe_vma_ops list. Allocation only implemented in this
patch.
This will help with converting xe_vma_ops (multiple xe_vma_op) in a
atomic update unit.
Cc: Oak Zeng <oak.zeng@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_pt_types.h | 12 ++++++
drivers/gpu/drm/xe/xe_vm.c | 66 +++++++++++++++++++++++++++++++-
drivers/gpu/drm/xe/xe_vm_types.h | 8 ++++
3 files changed, 84 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index cee70cb0f014..2093150f461e 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -74,4 +74,16 @@ struct xe_vm_pgtable_update {
u32 flags;
};
+/** struct xe_vm_pgtable_update_op - Page table update operation */
+struct xe_vm_pgtable_update_op {
+ /** @entries: entries to update for this operation */
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ /** @num_entries: number of entries for this update operation */
+ u32 num_entries;
+ /** @bind: is a bind */
+ bool bind;
+ /** @rebind: is a rebind */
+ bool rebind;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c5b1694b292f..17b43b567bf3 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -712,6 +712,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
+static int xe_vma_ops_alloc(struct xe_vma_ops *vops)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
+ if (!vops->pt_update_ops[i].num_ops)
+ continue;
+
+ vops->pt_update_ops[i].ops =
+ kmalloc_array(vops->pt_update_ops[i].num_ops,
+ sizeof(*vops->pt_update_ops[i].ops),
+ GFP_KERNEL);
+ if (!vops->pt_update_ops[i].ops)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xe_vma_ops_fini(struct xe_vma_ops *vops)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ kfree(vops->pt_update_ops[i].ops);
+}
+
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ if (BIT(i) & tile_mask)
+ ++vops->pt_update_ops[i].num_ops;
+}
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -739,6 +775,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
return 0;
}
@@ -779,6 +816,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
goto free_ops;
}
+ err = xe_vma_ops_alloc(&vops);
+ if (err)
+ goto free_ops;
+
fence = ops_execute(vm, &vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
@@ -793,6 +834,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return err;
}
@@ -814,12 +856,20 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
if (err)
return ERR_PTR(err);
+ err = xe_vma_ops_alloc(&vops);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
fence = ops_execute(vm, &vops);
+free_ops:
list_for_each_entry_safe(op, next_op, &vops.list, link) {
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return fence;
}
@@ -2276,7 +2326,6 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
-
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct drm_gpuva_ops *ops,
struct xe_sync_entry *syncs, u32 num_syncs,
@@ -2328,6 +2377,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return PTR_ERR(vma);
op->map.vma = vma;
+ if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ xe_vma_ops_incr_pt_update_ops(vops,
+ op->tile_mask);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2372,6 +2424,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
@@ -2408,13 +2462,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
- /* Nothing to do */
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -3261,11 +3318,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
+ err = xe_vma_ops_alloc(&vops);
+ if (err)
+ goto unwind_ops;
+
err = vm_bind_ioctl_ops_execute(vm, &vops);
unwind_ops:
if (err && err != -ENODATA)
vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ xe_vma_ops_fini(&vops);
for (i = args->num_binds - 1; i >= 0; --i)
if (ops[i])
drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ce1a63a5e3e7..211c88801182 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -21,6 +21,7 @@ struct xe_bo;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
+struct xe_vm_pgtable_update_op;
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
@@ -368,6 +369,13 @@ struct xe_vma_ops {
struct xe_sync_entry *syncs;
/** @num_syncs: number of syncs */
u32 num_syncs;
+ /** @pt_update_ops: page table update operations */
+ struct {
+ /** @ops: operations */
+ struct xe_vm_pgtable_update_op *ops;
+ /** @num_ops: number of operations */
+ u32 num_ops;
+ } pt_update_ops[XE_MAX_TILES_PER_DEVICE];
};
#endif
--
2.34.1
next prev parent reply other threads:[~2024-05-15 0:39 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-15 0:40 [PATCH v2 0/6] Convert multiple bind ops to 1 job Matthew Brost
2024-05-15 0:40 ` [PATCH v2 1/6] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Matthew Brost
2024-05-15 20:56 ` Cavitt, Jonathan
2024-05-18 2:42 ` Zeng, Oak
2024-05-15 0:40 ` Matthew Brost [this message]
2024-05-15 21:56 ` [PATCH v2 2/6] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Cavitt, Jonathan
2024-05-15 0:40 ` [PATCH v2 3/6] drm/xe: Convert multiple bind ops into single job Matthew Brost
2024-05-15 0:40 ` [PATCH v2 4/6] drm/xe: Update VM trace events Matthew Brost
2024-05-15 20:59 ` Cavitt, Jonathan
2024-05-15 0:40 ` [PATCH v2 5/6] drm/xe: Update PT layer with better error handling Matthew Brost
2024-05-15 0:40 ` [PATCH v2 6/6] drm/xe: Add VM bind IOCTL error injection Matthew Brost
2024-05-15 21:29 ` Cavitt, Jonathan
2024-05-15 22:45 ` Matthew Brost
2024-05-15 0:44 ` ✓ CI.Patch_applied: success for Convert multiple bind ops to 1 job (rev2) Patchwork
2024-05-15 0:44 ` ✗ CI.checkpatch: warning " Patchwork
2024-05-15 0:45 ` ✓ CI.KUnit: success " Patchwork
2024-05-15 0:57 ` ✓ CI.Build: " Patchwork
2024-05-15 0:59 ` ✓ CI.Hooks: " Patchwork
2024-05-15 1:01 ` ✓ CI.checksparse: " Patchwork
2024-05-15 1:26 ` ✓ CI.BAT: " Patchwork
2024-05-15 2:53 ` ✗ CI.FULL: failure " Patchwork
2024-05-18 2:41 ` [PATCH v2 0/6] Convert multiple bind ops to 1 job Zeng, Oak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240515004010.100091-3-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=oak.zeng@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).