All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Oak Zeng <oak.zeng@intel.com>
To: dri-devel@lists.freedesktop.org, intel-xe@lists.freedesktop.org
Cc: matthew.brost@intel.com, Thomas.Hellstrom@linux.intel.com,
	brian.welty@intel.com, himal.prasad.ghimiray@intel.com,
	krishnaiah.bommu@intel.com, niranjana.vishwanathapura@intel.com
Subject: [PATCH 15/23] drm/xe/svm: Implement functions to register and unregister mmu notifier
Date: Wed, 17 Jan 2024 17:12:15 -0500	[thread overview]
Message-ID: <20240117221223.18540-16-oak.zeng@intel.com> (raw)
In-Reply-To: <20240117221223.18540-1-oak.zeng@intel.com>

xe driver register mmu interval notifier to core mm to monitor vma
change. We register mmu interval notifier for each svm range. mmu
interval notifier should be unregistered in a worker (see next patch
in this series), so also initialize kernel worker to unregister mmu
interval notifier.

Signed-off-by: Oak Zeng <oak.zeng@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Brian Welty <brian.welty@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.h       | 14 ++++++
 drivers/gpu/drm/xe/xe_svm_range.c | 73 +++++++++++++++++++++++++++++++
 2 files changed, 87 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 6b93055934f8..90e665f2bfc6 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -52,16 +52,28 @@ struct xe_svm {
  * struct xe_svm_range - Represents a shared virtual address range.
  */
 struct xe_svm_range {
+	/** @svm: pointer of the xe_svm that this range belongs to */
+	struct xe_svm *svm;
+
 	/** @notifier: The mmu interval notifer used to keep track of CPU
 	 * side address range change. Driver will get a callback with this
 	 * notifier if anything changed from CPU side, such as range is
 	 * unmapped from CPU
 	 */
 	struct mmu_interval_notifier notifier;
+	bool mmu_notifier_registered;
 	/** @start: start address of this range, inclusive */
 	u64 start;
 	/** @end: end address of this range, exclusive */
 	u64 end;
+	/** @vma: the corresponding vma of this svm range
+	 *  The relationship b/t vma and svm range is 1:N,
+	 *  which means one vma can be splitted into multiple
+	 *  @xe_svm_range while one @xe_svm_range can have
+	 *  only one vma. A N:N mapping means some complication
+	 *  in codes. Lets assume 1:N for now.
+	 */
+	struct vm_area_struct *vma;
 	/** @unregister_notifier_work: A worker used to unregister this notifier */
 	struct work_struct unregister_notifier_work;
 	/** @inode: used to link this range to svm's range_tree */
@@ -77,6 +89,8 @@ struct xe_svm_range *xe_svm_range_from_addr(struct xe_svm *svm,
 bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 								struct xe_svm_range *range,
 								struct vm_area_struct *vma);
+void xe_svm_range_unregister_mmu_notifier(struct xe_svm_range *range);
+int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range);
 
 int xe_svm_build_sg(struct hmm_range *range, struct sg_table *st);
 int xe_svm_devm_add(struct xe_tile *tile, struct xe_mem_region *mem);
diff --git a/drivers/gpu/drm/xe/xe_svm_range.c b/drivers/gpu/drm/xe/xe_svm_range.c
index b32c32f60315..286d5f7d6ecd 100644
--- a/drivers/gpu/drm/xe/xe_svm_range.c
+++ b/drivers/gpu/drm/xe/xe_svm_range.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
 #include <linux/container_of.h>
 #include <linux/mm_types.h>
 #include <linux/mutex.h>
@@ -57,3 +58,75 @@ bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 
 	return (vma1 == vma) && (vma2 == vma);
 }
+
+static const struct mmu_interval_notifier_ops xe_svm_mni_ops = {
+	.invalidate = NULL,
+};
+
+/**
+ * unregister a mmu interval notifier for a svm range
+ *
+ * @range: svm range
+ *
+ */
+void xe_svm_range_unregister_mmu_notifier(struct xe_svm_range *range)
+{
+	if (!range->mmu_notifier_registered)
+		return;
+
+	mmu_interval_notifier_remove(&range->notifier);
+	range->mmu_notifier_registered = false;
+}
+
+static void xe_svm_unregister_notifier_work(struct work_struct *work)
+{
+	struct xe_svm_range *range;
+
+	range = container_of(work, struct xe_svm_range, unregister_notifier_work);
+
+	xe_svm_range_unregister_mmu_notifier(range);
+
+	/**
+	 * This is called from mmu notifier MUNMAP event. When munmap is called,
+	 * this range is not valid any more. Remove it.
+	 */
+	mutex_lock(&range->svm->mutex);
+	interval_tree_remove(&range->inode, &range->svm->range_tree);
+	mutex_unlock(&range->svm->mutex);
+	kfree(range);
+}
+
+/**
+ * register a mmu interval notifier to monitor vma change
+ *
+ * @range: svm range to monitor
+ *
+ * This has to be called inside a mmap_read_lock
+ */
+int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range)
+{
+	struct vm_area_struct *vma = range->vma;
+	struct mm_struct *mm = range->svm->mm;
+	u64 start, length;
+	int ret = 0;
+
+	if (range->mmu_notifier_registered)
+		return 0;
+
+	start =  range->start;
+	length = range->end - start;
+	/** We are inside a mmap_read_lock, but it requires a mmap_write_lock
+	 *  to register mmu notifier.
+	 */
+	mmap_read_unlock(mm);
+	mmap_write_lock(mm);
+	ret = mmu_interval_notifier_insert_locked(&range->notifier, vma->vm_mm,
+						start, length, &xe_svm_mni_ops);
+	mmap_write_downgrade(mm);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&range->unregister_notifier_work, xe_svm_unregister_notifier_work);
+	range->mmu_notifier_registered = true;
+	return ret;
+}
-- 
2.26.3


WARNING: multiple messages have this Message-ID (diff)
From: Oak Zeng <oak.zeng@intel.com>
To: dri-devel@lists.freedesktop.org, intel-xe@lists.freedesktop.org
Subject: [PATCH 15/23] drm/xe/svm: Implement functions to register and unregister mmu notifier
Date: Wed, 17 Jan 2024 17:12:15 -0500	[thread overview]
Message-ID: <20240117221223.18540-16-oak.zeng@intel.com> (raw)
In-Reply-To: <20240117221223.18540-1-oak.zeng@intel.com>

xe driver register mmu interval notifier to core mm to monitor vma
change. We register mmu interval notifier for each svm range. mmu
interval notifier should be unregistered in a worker (see next patch
in this series), so also initialize kernel worker to unregister mmu
interval notifier.

Signed-off-by: Oak Zeng <oak.zeng@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Brian Welty <brian.welty@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.h       | 14 ++++++
 drivers/gpu/drm/xe/xe_svm_range.c | 73 +++++++++++++++++++++++++++++++
 2 files changed, 87 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 6b93055934f8..90e665f2bfc6 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -52,16 +52,28 @@ struct xe_svm {
  * struct xe_svm_range - Represents a shared virtual address range.
  */
 struct xe_svm_range {
+	/** @svm: pointer of the xe_svm that this range belongs to */
+	struct xe_svm *svm;
+
 	/** @notifier: The mmu interval notifer used to keep track of CPU
 	 * side address range change. Driver will get a callback with this
 	 * notifier if anything changed from CPU side, such as range is
 	 * unmapped from CPU
 	 */
 	struct mmu_interval_notifier notifier;
+	bool mmu_notifier_registered;
 	/** @start: start address of this range, inclusive */
 	u64 start;
 	/** @end: end address of this range, exclusive */
 	u64 end;
+	/** @vma: the corresponding vma of this svm range
+	 *  The relationship b/t vma and svm range is 1:N,
+	 *  which means one vma can be splitted into multiple
+	 *  @xe_svm_range while one @xe_svm_range can have
+	 *  only one vma. A N:N mapping means some complication
+	 *  in codes. Lets assume 1:N for now.
+	 */
+	struct vm_area_struct *vma;
 	/** @unregister_notifier_work: A worker used to unregister this notifier */
 	struct work_struct unregister_notifier_work;
 	/** @inode: used to link this range to svm's range_tree */
@@ -77,6 +89,8 @@ struct xe_svm_range *xe_svm_range_from_addr(struct xe_svm *svm,
 bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 								struct xe_svm_range *range,
 								struct vm_area_struct *vma);
+void xe_svm_range_unregister_mmu_notifier(struct xe_svm_range *range);
+int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range);
 
 int xe_svm_build_sg(struct hmm_range *range, struct sg_table *st);
 int xe_svm_devm_add(struct xe_tile *tile, struct xe_mem_region *mem);
diff --git a/drivers/gpu/drm/xe/xe_svm_range.c b/drivers/gpu/drm/xe/xe_svm_range.c
index b32c32f60315..286d5f7d6ecd 100644
--- a/drivers/gpu/drm/xe/xe_svm_range.c
+++ b/drivers/gpu/drm/xe/xe_svm_range.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
 #include <linux/container_of.h>
 #include <linux/mm_types.h>
 #include <linux/mutex.h>
@@ -57,3 +58,75 @@ bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 
 	return (vma1 == vma) && (vma2 == vma);
 }
+
+static const struct mmu_interval_notifier_ops xe_svm_mni_ops = {
+	.invalidate = NULL,
+};
+
+/**
+ * unregister a mmu interval notifier for a svm range
+ *
+ * @range: svm range
+ *
+ */
+void xe_svm_range_unregister_mmu_notifier(struct xe_svm_range *range)
+{
+	if (!range->mmu_notifier_registered)
+		return;
+
+	mmu_interval_notifier_remove(&range->notifier);
+	range->mmu_notifier_registered = false;
+}
+
+static void xe_svm_unregister_notifier_work(struct work_struct *work)
+{
+	struct xe_svm_range *range;
+
+	range = container_of(work, struct xe_svm_range, unregister_notifier_work);
+
+	xe_svm_range_unregister_mmu_notifier(range);
+
+	/**
+	 * This is called from mmu notifier MUNMAP event. When munmap is called,
+	 * this range is not valid any more. Remove it.
+	 */
+	mutex_lock(&range->svm->mutex);
+	interval_tree_remove(&range->inode, &range->svm->range_tree);
+	mutex_unlock(&range->svm->mutex);
+	kfree(range);
+}
+
+/**
+ * register a mmu interval notifier to monitor vma change
+ *
+ * @range: svm range to monitor
+ *
+ * This has to be called inside a mmap_read_lock
+ */
+int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range)
+{
+	struct vm_area_struct *vma = range->vma;
+	struct mm_struct *mm = range->svm->mm;
+	u64 start, length;
+	int ret = 0;
+
+	if (range->mmu_notifier_registered)
+		return 0;
+
+	start =  range->start;
+	length = range->end - start;
+	/** We are inside a mmap_read_lock, but it requires a mmap_write_lock
+	 *  to register mmu notifier.
+	 */
+	mmap_read_unlock(mm);
+	mmap_write_lock(mm);
+	ret = mmu_interval_notifier_insert_locked(&range->notifier, vma->vm_mm,
+						start, length, &xe_svm_mni_ops);
+	mmap_write_downgrade(mm);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&range->unregister_notifier_work, xe_svm_unregister_notifier_work);
+	range->mmu_notifier_registered = true;
+	return ret;
+}
-- 
2.26.3


  parent reply	other threads:[~2024-01-17 22:02 UTC|newest]

Thread overview: 198+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-17 22:12 [PATCH 00/23] XeKmd basic SVM support Oak Zeng
2024-01-17 22:12 ` Oak Zeng
2024-01-17 22:12 ` [PATCH 01/23] drm/xe/svm: Add SVM document Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 02/23] drm/xe/svm: Add svm key data structures Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 03/23] drm/xe/svm: create xe svm during vm creation Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 04/23] drm/xe/svm: Trace svm creation Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 05/23] drm/xe/svm: add helper to retrieve svm range from address Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 06/23] drm/xe/svm: Introduce a helper to build sg table from hmm range Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-04-05  0:39   ` Jason Gunthorpe
2024-04-05  3:33     ` Zeng, Oak
2024-04-05 12:37       ` Jason Gunthorpe
2024-04-05 16:42         ` Zeng, Oak
2024-04-05 18:02           ` Jason Gunthorpe
2024-04-09 16:45             ` Zeng, Oak
2024-04-09 17:24               ` Jason Gunthorpe
2024-04-23 21:17                 ` Zeng, Oak
2024-04-24  2:31                   ` Matthew Brost
2024-04-24 13:57                     ` Jason Gunthorpe
2024-04-24 16:35                       ` Matthew Brost
2024-04-24 16:44                         ` Jason Gunthorpe
2024-04-24 16:56                           ` Matthew Brost
2024-04-24 17:48                             ` Jason Gunthorpe
2024-04-24 13:48                   ` Jason Gunthorpe
2024-04-24 23:59                     ` Zeng, Oak
2024-04-25  1:05                       ` Jason Gunthorpe
2024-04-26  9:55                         ` Thomas Hellström
2024-04-26 12:00                           ` Jason Gunthorpe
2024-04-26 14:49                             ` Thomas Hellström
2024-04-26 16:35                               ` Jason Gunthorpe
2024-04-29  8:25                                 ` Thomas Hellström
2024-04-30 17:30                                   ` Jason Gunthorpe
2024-04-30 18:57                                     ` Daniel Vetter
2024-05-01  0:09                                       ` Jason Gunthorpe
2024-05-02  8:04                                         ` Daniel Vetter
2024-05-02  9:11                                           ` Thomas Hellström
2024-05-02 12:46                                             ` Jason Gunthorpe
2024-05-02 15:01                                               ` Thomas Hellström
2024-05-02 19:25                                                 ` Zeng, Oak
2024-05-03 13:37                                                   ` Jason Gunthorpe
2024-05-03 14:43                                                     ` Zeng, Oak
2024-05-03 16:28                                                       ` Jason Gunthorpe
2024-05-03 20:29                                                         ` Zeng, Oak
2024-05-04  1:03                                                           ` Dave Airlie
2024-05-06 13:04                                                             ` Daniel Vetter
2024-05-06 23:50                                                               ` Matthew Brost
2024-05-07 11:56                                                                 ` Jason Gunthorpe
2024-05-06 13:33                                                           ` Jason Gunthorpe
2024-04-09 17:33               ` Matthew Brost
2024-01-17 22:12 ` [PATCH 07/23] drm/xe/svm: Add helper for binding hmm range to gpu Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 08/23] drm/xe/svm: Add helper to invalidate svm range from GPU Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 09/23] drm/xe/svm: Remap and provide memmap backing for GPU vram Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 10/23] drm/xe/svm: Introduce svm migration function Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 11/23] drm/xe/svm: implement functions to allocate and free device memory Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 12/23] drm/xe/svm: Trace buddy block allocation and free Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 13/23] drm/xe/svm: Handle CPU page fault Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 14/23] drm/xe/svm: trace svm range migration Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` Oak Zeng [this message]
2024-01-17 22:12   ` [PATCH 15/23] drm/xe/svm: Implement functions to register and unregister mmu notifier Oak Zeng
2024-01-17 22:12 ` [PATCH 16/23] drm/xe/svm: Implement the mmu notifier range invalidate callback Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 17/23] drm/xe/svm: clean up svm range during process exit Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 18/23] drm/xe/svm: Move a few structures to xe_gt.h Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 19/23] drm/xe/svm: migrate svm range to vram Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 20/23] drm/xe/svm: Populate svm range Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 21/23] drm/xe/svm: GPU page fault support Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-23  2:06   ` Welty, Brian
2024-01-23  2:06     ` Welty, Brian
2024-01-23  3:09     ` Zeng, Oak
2024-01-23  3:09       ` Zeng, Oak
2024-01-23  3:21       ` Making drm_gpuvm work across gpu devices Zeng, Oak
2024-01-23  3:21         ` Zeng, Oak
2024-01-23 11:13         ` Christian König
2024-01-23 11:13           ` Christian König
2024-01-23 19:37           ` Zeng, Oak
2024-01-23 19:37             ` Zeng, Oak
2024-01-23 20:17             ` Felix Kuehling
2024-01-23 20:17               ` Felix Kuehling
2024-01-25  1:39               ` Zeng, Oak
2024-01-25  1:39                 ` Zeng, Oak
2024-01-23 23:56             ` Danilo Krummrich
2024-01-23 23:56               ` Danilo Krummrich
2024-01-24  3:57               ` Zeng, Oak
2024-01-24  3:57                 ` Zeng, Oak
2024-01-24  4:14                 ` Zeng, Oak
2024-01-24  4:14                   ` Zeng, Oak
2024-01-24  6:48                   ` Christian König
2024-01-24  6:48                     ` Christian König
2024-01-25 22:13                 ` Danilo Krummrich
2024-01-25 22:13                   ` Danilo Krummrich
2024-01-24  8:33             ` Christian König
2024-01-24  8:33               ` Christian König
2024-01-25  1:17               ` Zeng, Oak
2024-01-25  1:17                 ` Zeng, Oak
2024-01-25  1:25                 ` David Airlie
2024-01-25  1:25                   ` David Airlie
2024-01-25  5:25                   ` Zeng, Oak
2024-01-25  5:25                     ` Zeng, Oak
2024-01-26 10:09                     ` Christian König
2024-01-26 10:09                       ` Christian König
2024-01-26 20:13                       ` Zeng, Oak
2024-01-26 20:13                         ` Zeng, Oak
2024-01-29 10:10                         ` Christian König
2024-01-29 10:10                           ` Christian König
2024-01-29 20:09                           ` Zeng, Oak
2024-01-29 20:09                             ` Zeng, Oak
2024-01-25 11:00                 ` 回复:Making " 周春明(日月)
2024-01-25 11:00                   ` 周春明(日月)
2024-01-25 17:00                   ` Zeng, Oak
2024-01-25 17:00                     ` Zeng, Oak
2024-01-25 17:15                 ` Making " Felix Kuehling
2024-01-25 17:15                   ` Felix Kuehling
2024-01-25 18:37                   ` Zeng, Oak
2024-01-25 18:37                     ` Zeng, Oak
2024-01-26 13:23                     ` Christian König
2024-01-26 13:23                       ` Christian König
2024-01-25 16:42               ` Zeng, Oak
2024-01-25 16:42                 ` Zeng, Oak
2024-01-25 18:32               ` Daniel Vetter
2024-01-25 18:32                 ` Daniel Vetter
2024-01-25 21:02                 ` Zeng, Oak
2024-01-25 21:02                   ` Zeng, Oak
2024-01-26  8:21                 ` Thomas Hellström
2024-01-26  8:21                   ` Thomas Hellström
2024-01-26 12:52                   ` Christian König
2024-01-26 12:52                     ` Christian König
2024-01-27  2:21                     ` Zeng, Oak
2024-01-27  2:21                       ` Zeng, Oak
2024-01-29 10:19                       ` Christian König
2024-01-29 10:19                         ` Christian König
2024-01-30  0:21                         ` Zeng, Oak
2024-01-30  0:21                           ` Zeng, Oak
2024-01-30  8:39                           ` Christian König
2024-01-30  8:39                             ` Christian König
2024-01-30 22:29                             ` Zeng, Oak
2024-01-30 22:29                               ` Zeng, Oak
2024-01-30 23:12                               ` David Airlie
2024-01-30 23:12                                 ` David Airlie
2024-01-31  9:15                                 ` Daniel Vetter
2024-01-31  9:15                                   ` Daniel Vetter
2024-01-31 20:17                                   ` Zeng, Oak
2024-01-31 20:17                                     ` Zeng, Oak
2024-01-31 20:59                                     ` Zeng, Oak
2024-01-31 20:59                                       ` Zeng, Oak
2024-02-01  8:52                                     ` Christian König
2024-02-01  8:52                                       ` Christian König
2024-02-29 18:22                                       ` Zeng, Oak
2024-03-08  4:43                                         ` Zeng, Oak
2024-03-08 10:07                                           ` Christian König
2024-01-30  8:43                           ` Thomas Hellström
2024-01-30  8:43                             ` Thomas Hellström
2024-01-29 15:03                 ` Felix Kuehling
2024-01-29 15:03                   ` Felix Kuehling
2024-01-29 15:33                   ` Christian König
2024-01-29 15:33                     ` Christian König
2024-01-29 16:24                     ` Felix Kuehling
2024-01-29 16:24                       ` Felix Kuehling
2024-01-29 16:28                       ` Christian König
2024-01-29 16:28                         ` Christian König
2024-01-29 17:52                         ` Felix Kuehling
2024-01-29 17:52                           ` Felix Kuehling
2024-01-29 19:03                           ` Christian König
2024-01-29 19:03                             ` Christian König
2024-01-29 20:24                             ` Felix Kuehling
2024-01-29 20:24                               ` Felix Kuehling
2024-02-23 20:12               ` Zeng, Oak
2024-02-27  6:54                 ` Christian König
2024-02-27 15:58                   ` Zeng, Oak
2024-02-28 19:51                     ` Zeng, Oak
2024-02-29  9:41                       ` Christian König
2024-02-29 16:05                         ` Zeng, Oak
2024-02-29 17:12                         ` Thomas Hellström
2024-03-01  7:01                           ` Christian König
2024-01-17 22:12 ` [PATCH 22/23] drm/xe/svm: Add DRM_XE_SVM kernel config entry Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-17 22:12 ` [PATCH 23/23] drm/xe/svm: Add svm memory hints interface Oak Zeng
2024-01-17 22:12   ` Oak Zeng
2024-01-18  2:45 ` ✓ CI.Patch_applied: success for XeKmd basic SVM support Patchwork
2024-01-18  2:46 ` ✗ CI.checkpatch: warning " Patchwork
2024-01-18  2:46 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240117221223.18540-16-oak.zeng@intel.com \
    --to=oak.zeng@intel.com \
    --cc=Thomas.Hellstrom@linux.intel.com \
    --cc=brian.welty@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=krishnaiah.bommu@intel.com \
    --cc=matthew.brost@intel.com \
    --cc=niranjana.vishwanathapura@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.