All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] LoongArch: Add steal time support
@ 2024-04-30  1:45 Bibo Mao
  2024-04-30  1:45 ` [PATCH v2 1/2] LoongArch: KVM: Add steal time support in kvm side Bibo Mao
  2024-04-30  1:45 ` [PATCH v2 2/2] LoongArch: Add steal time support in guest side Bibo Mao
  0 siblings, 2 replies; 5+ messages in thread
From: Bibo Mao @ 2024-04-30  1:45 UTC (permalink / raw
  To: Tianrui Zhao, Huacai Chen
  Cc: Juergen Gross, kvm, loongarch, linux-kernel, x86, virtualization

Para-virt feature steal time is added in both kvm and guest kernel side.
It is silimar with other architectures, steal time structure comes from
guest memory, also pseduo register is used to save/restore base address
of steal time structure, so that vm migration is supported also.

---
v2:
  1. Add PARAVIRT_TIME_ACCOUNTING kconfig option in file
arch/loongarch/Kconfig
  2. Function name change such as replace pv_register_steal_time with
pv_enable_steal_time etc

---
Bibo Mao (2):
  LoongArch: KVM: Add steal time support in kvm side
  LoongArch: Add steal time support in guest side

 arch/loongarch/Kconfig                 |  11 +++
 arch/loongarch/include/asm/kvm_host.h  |   7 ++
 arch/loongarch/include/asm/kvm_para.h  |  10 ++
 arch/loongarch/include/asm/loongarch.h |   1 +
 arch/loongarch/include/asm/paravirt.h  |   5 +
 arch/loongarch/include/uapi/asm/kvm.h  |   4 +
 arch/loongarch/kernel/paravirt.c       | 131 +++++++++++++++++++++++++
 arch/loongarch/kernel/time.c           |   2 +
 arch/loongarch/kvm/exit.c              |  29 +++++-
 arch/loongarch/kvm/vcpu.c              | 120 ++++++++++++++++++++++
 10 files changed, 318 insertions(+), 2 deletions(-)


base-commit: 2c8159388952f530bd260e097293ccc0209240be
-- 
2.39.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/2] LoongArch: KVM: Add steal time support in kvm side
  2024-04-30  1:45 [PATCH v2 0/2] LoongArch: Add steal time support Bibo Mao
@ 2024-04-30  1:45 ` Bibo Mao
  2024-04-30  1:45 ` [PATCH v2 2/2] LoongArch: Add steal time support in guest side Bibo Mao
  1 sibling, 0 replies; 5+ messages in thread
From: Bibo Mao @ 2024-04-30  1:45 UTC (permalink / raw
  To: Tianrui Zhao, Huacai Chen
  Cc: Juergen Gross, kvm, loongarch, linux-kernel, x86, virtualization

Steal time feature is added here in kvm side, VM can search supported
features provided by KVM hypervisor, feature KVM_FEATURE_STEAL_TIME
is added here. Like x86, steal time structure is saved in guest memory,
one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to
enable the feature.

One cpu attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to
save and restore base address of steal time structure when VM is migrated.

Since it needs hypercall instruction emulation handling, and it is
dependent on this patchset:
https://lore.kernel.org/all/20240428100518.1642324-1-maobibo@loongson.cn/

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/loongarch/include/asm/kvm_host.h  |   7 ++
 arch/loongarch/include/asm/kvm_para.h  |  10 +++
 arch/loongarch/include/asm/loongarch.h |   1 +
 arch/loongarch/include/uapi/asm/kvm.h  |   4 +
 arch/loongarch/kvm/exit.c              |  29 +++++-
 arch/loongarch/kvm/vcpu.c              | 120 +++++++++++++++++++++++++
 6 files changed, 169 insertions(+), 2 deletions(-)

diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 1921750d4b4c..30bda553c54d 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -30,6 +30,7 @@
 #define KVM_PRIVATE_MEM_SLOTS		0
 
 #define KVM_HALT_POLL_NS_DEFAULT	500000
+#define KVM_REQ_RECORD_STEAL		KVM_ARCH_REQ(1)
 
 #define KVM_GUESTDBG_VALID_MASK		(KVM_GUESTDBG_ENABLE | \
 			KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
@@ -198,6 +199,12 @@ struct kvm_vcpu_arch {
 	struct kvm_mp_state mp_state;
 	/* cpucfg */
 	u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+	/* paravirt steal time */
+	struct {
+		u64 guest_addr;
+		u64 last_steal;
+		struct gfn_to_hva_cache cache;
+	} st;
 };
 
 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index 56775554402a..5fb89e20432d 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -12,6 +12,7 @@
 #define KVM_HCALL_CODE_SWDBG		1
 #define KVM_HCALL_PV_SERVICE		HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE)
 #define  KVM_HCALL_FUNC_PV_IPI		1
+#define  KVM_HCALL_FUNC_NOTIFY		2
 #define KVM_HCALL_SWDBG			HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
 
 /*
@@ -21,6 +22,15 @@
 #define KVM_HCALL_INVALID_CODE		-1UL
 #define KVM_HCALL_INVALID_PARAMETER	-2UL
 
+#define KVM_STEAL_PHYS_VALID		BIT_ULL(0)
+#define KVM_STEAL_PHYS_MASK		GENMASK_ULL(63, 6)
+struct kvm_steal_time {
+	__u64 steal;
+	__u32 version;
+	__u32 flags;
+	__u32 pad[12];
+};
+
 /*
  * Hypercall interface for KVM hypervisor
  *
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 0ad36704cb4b..ab6a5e93c280 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -168,6 +168,7 @@
 #define  KVM_SIGNATURE			"KVM\0"
 #define CPUCFG_KVM_FEATURE		(CPUCFG_KVM_BASE + 4)
 #define  KVM_FEATURE_PV_IPI		BIT(1)
+#define  KVM_FEATURE_STEAL_TIME		BIT(2)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index 8f78b23672ac..286b5ce93a57 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -80,7 +80,11 @@ struct kvm_fpu {
 #define LOONGARCH_REG_64(TYPE, REG)	(TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
 #define KVM_IOC_CSRID(REG)		LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
 #define KVM_IOC_CPUCFG(REG)		LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+/* Device Control API on vcpu fd */
 #define KVM_LOONGARCH_VCPU_CPUCFG	0
+#define KVM_LOONGARCH_VCPU_PVTIME_CTRL	1
+#define  KVM_LOONGARCH_VCPU_PVTIME_GPA	0
 
 struct kvm_debug_exit_arch {
 };
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index 680aeaa4aeeb..af0f1c46e4eb 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -209,7 +209,7 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
 {
 	int rd, rj;
-	unsigned int index;
+	unsigned int index, ret;
 
 	rd = inst.reg2_format.rd;
 	rj = inst.reg2_format.rj;
@@ -232,7 +232,10 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
 		vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
 		break;
 	case CPUCFG_KVM_FEATURE:
-		vcpu->arch.gprs[rd] = KVM_FEATURE_PV_IPI;
+		ret = KVM_FEATURE_PV_IPI;
+		if (sched_info_on())
+			ret |= KVM_FEATURE_STEAL_TIME;
+		vcpu->arch.gprs[rd] = ret;
 		break;
 	default:
 		vcpu->arch.gprs[rd] = 0;
@@ -738,6 +741,25 @@ static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static int kvm_save_notify(struct kvm_vcpu *vcpu)
+{
+	unsigned long id, data;
+
+	id   = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+	data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+	switch (id) {
+	case KVM_FEATURE_STEAL_TIME:
+		vcpu->arch.st.guest_addr = data;
+		vcpu->arch.st.last_steal = current->sched_info.run_delay;
+		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
+		break;
+	default:
+		break;
+	};
+
+	return 0;
+};
+
 /*
  * hypercall emulation always return to guest, Caller should check retval.
  */
@@ -751,6 +773,9 @@ static void kvm_handle_pv_service(struct kvm_vcpu *vcpu)
 		kvm_pv_send_ipi(vcpu);
 		ret = KVM_HCALL_STATUS_SUCCESS;
 		break;
+	case KVM_HCALL_FUNC_NOTIFY:
+		ret = kvm_save_notify(vcpu);
+		break;
 	default:
 		ret = KVM_HCALL_INVALID_CODE;
 		break;
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 71a4d10b3ff6..4289a0f545fe 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -31,6 +31,113 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
 		       sizeof(kvm_vcpu_stats_desc),
 };
 
+static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+	struct kvm_steal_time __user *st;
+	struct gfn_to_hva_cache *ghc;
+	struct kvm_memslots *slots;
+	gpa_t gpa;
+	u64 steal;
+	u32 version;
+
+	ghc = &vcpu->arch.st.cache;
+	gpa = vcpu->arch.st.guest_addr;
+	if (!(gpa & KVM_STEAL_PHYS_VALID))
+		return;
+
+	gpa &= KVM_STEAL_PHYS_MASK;
+	slots = kvm_memslots(vcpu->kvm);
+	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa,
+					sizeof(*st))) {
+			ghc->gpa = INVALID_GPA;
+			return;
+		}
+	}
+
+	st = (struct kvm_steal_time __user *)ghc->hva;
+	unsafe_get_user(version, &st->version, out);
+	if (version & 1)
+		version += 1;
+	version += 1;
+	unsafe_put_user(version, &st->version, out);
+	smp_wmb();
+
+	unsafe_get_user(steal, &st->steal, out);
+	steal += current->sched_info.run_delay -
+		vcpu->arch.st.last_steal;
+	vcpu->arch.st.last_steal = current->sched_info.run_delay;
+	unsafe_put_user(steal, &st->steal, out);
+
+	smp_wmb();
+	version += 1;
+	unsafe_put_user(version, &st->version, out);
+out:
+	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
+static bool kvm_pvtime_supported(void)
+{
+	return !!sched_info_on();
+}
+
+static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
+					struct kvm_device_attr *attr)
+{
+	u64 __user *user = (u64 __user *)attr->addr;
+	struct kvm *kvm = vcpu->kvm;
+	u64 gpa;
+	int ret = 0;
+	int idx;
+
+	if (!kvm_pvtime_supported() ||
+			attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+		return -ENXIO;
+
+	if (get_user(gpa, user))
+		return -EFAULT;
+
+	/* Check the address is in a valid memslot */
+	idx = srcu_read_lock(&kvm->srcu);
+	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
+		ret = -EINVAL;
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	if (!ret)
+		vcpu->arch.st.guest_addr = gpa;
+
+	return ret;
+}
+
+static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
+					struct kvm_device_attr *attr)
+{
+	u64 __user *user = (u64 __user *)attr->addr;
+	u64 gpa;
+
+	if (!kvm_pvtime_supported() ||
+			attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+		return -ENXIO;
+
+	gpa = vcpu->arch.st.guest_addr;
+	if (put_user(gpa, user))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
+					struct kvm_device_attr *attr)
+{
+	switch (attr->attr) {
+	case KVM_LOONGARCH_VCPU_PVTIME_GPA:
+		if (kvm_pvtime_supported())
+			return 0;
+	}
+
+	return -ENXIO;
+}
+
 /*
  * kvm_check_requests - check and handle pending vCPU requests
  *
@@ -48,6 +155,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
 	if (kvm_dirty_ring_check_request(vcpu))
 		return RESUME_HOST;
 
+	if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
+		kvm_update_stolen_time(vcpu);
+
 	return RESUME_GUEST;
 }
 
@@ -670,6 +780,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
 	case KVM_LOONGARCH_VCPU_CPUCFG:
 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
 		break;
+	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
+		break;
 	default:
 		break;
 	}
@@ -702,6 +815,9 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
 	case KVM_LOONGARCH_VCPU_CPUCFG:
 		ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
 		break;
+	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
+		break;
 	default:
 		break;
 	}
@@ -724,6 +840,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
 	case KVM_LOONGARCH_VCPU_CPUCFG:
 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
 		break;
+	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
+		break;
 	default:
 		break;
 	}
@@ -1082,6 +1201,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	/* Control guest page CCA attribute */
 	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+	kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
 
 	/* Don't bother restoring registers multiple times unless necessary */
 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/2] LoongArch: Add steal time support in guest side
  2024-04-30  1:45 [PATCH v2 0/2] LoongArch: Add steal time support Bibo Mao
  2024-04-30  1:45 ` [PATCH v2 1/2] LoongArch: KVM: Add steal time support in kvm side Bibo Mao
@ 2024-04-30  1:45 ` Bibo Mao
  2024-04-30  8:23   ` Huacai Chen
  1 sibling, 1 reply; 5+ messages in thread
From: Bibo Mao @ 2024-04-30  1:45 UTC (permalink / raw
  To: Tianrui Zhao, Huacai Chen
  Cc: Juergen Gross, kvm, loongarch, linux-kernel, x86, virtualization

Percpu struct kvm_steal_time is added here, its size is 64 bytes and
also defined as 64 bytes, so that the whole structure is in one physical
page.

When vcpu is onlined, function pv_enable_steal_time() is called. This
function will pass guest physical address of struct kvm_steal_time and
tells hypervisor to enable steal time. When vcpu is offline, physical
address is set as 0 and tells hypervisor to disable steal time.

Here is output of vmstat on guest when there is workload on both host
and guest. It includes steal time stat information.

procs -----------memory---------- -----io---- -system-- ------cpu-----
 r  b   swpd   free  inact active   bi    bo   in   cs us sy id wa st
15  1      0 7583616 184112  72208    20    0  162   52 31  6 43  0 20
17  0      0 7583616 184704  72192    0     0 6318 6885  5 60  8  5 22
16  0      0 7583616 185392  72144    0     0 1766 1081  0 49  0  1 50
16  0      0 7583616 184816  72304    0     0 6300 6166  4 62 12  2 20
18  0      0 7583632 184480  72240    0     0 2814 1754  2 58  4  1 35

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/loongarch/Kconfig                |  11 +++
 arch/loongarch/include/asm/paravirt.h |   5 +
 arch/loongarch/kernel/paravirt.c      | 131 ++++++++++++++++++++++++++
 arch/loongarch/kernel/time.c          |   2 +
 4 files changed, 149 insertions(+)

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 0a1540a8853e..f3a03c33a052 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -592,6 +592,17 @@ config PARAVIRT
 	  over full virtualization.  However, when run without a hypervisor
 	  the kernel is theoretically slower and slightly larger.
 
+config PARAVIRT_TIME_ACCOUNTING
+	bool "Paravirtual steal time accounting"
+	select PARAVIRT
+	help
+	  Select this option to enable fine granularity task steal time
+	  accounting. Time spent executing other tasks in parallel with
+	  the current vCPU is discounted from the vCPU power. To account for
+	  that, there can be a small performance impact.
+
+	  If in doubt, say N here.
+
 config ARCH_SUPPORTS_KEXEC
 	def_bool y
 
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index 58f7b7b89f2c..fe27fb5e82b8 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -17,11 +17,16 @@ static inline u64 paravirt_steal_clock(int cpu)
 }
 
 int pv_ipi_init(void);
+int __init pv_time_init(void);
 #else
 static inline int pv_ipi_init(void)
 {
 	return 0;
 }
 
+static inline int pv_time_init(void)
+{
+	return 0;
+}
 #endif // CONFIG_PARAVIRT
 #endif
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index 9044ed62045c..3f83afc7e2d2 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -5,10 +5,13 @@
 #include <linux/jump_label.h>
 #include <linux/kvm_para.h>
 #include <asm/paravirt.h>
+#include <linux/reboot.h>
 #include <linux/static_call.h>
 
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static int has_steal_clock;
 
 static u64 native_steal_clock(int cpu)
 {
@@ -17,6 +20,57 @@ static u64 native_steal_clock(int cpu)
 
 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 
+static bool steal_acc = true;
+static int __init parse_no_stealacc(char *arg)
+{
+	steal_acc = false;
+	return 0;
+}
+early_param("no-steal-acc", parse_no_stealacc);
+
+static u64 para_steal_clock(int cpu)
+{
+	u64 steal;
+	struct kvm_steal_time *src;
+	int version;
+
+	src = &per_cpu(steal_time, cpu);
+	do {
+
+		version = src->version;
+		/* Make sure that the version is read before the steal */
+		virt_rmb();
+		steal = src->steal;
+		/* Make sure that the steal is read before the next version */
+		virt_rmb();
+
+	} while ((version & 1) || (version != src->version));
+	return steal;
+}
+
+static int pv_enable_steal_time(void)
+{
+	int cpu = smp_processor_id();
+	struct kvm_steal_time *st;
+	unsigned long addr;
+
+	if (!has_steal_clock)
+		return -EPERM;
+
+	st = &per_cpu(steal_time, cpu);
+	addr = per_cpu_ptr_to_phys(st);
+
+	/* The whole structure kvm_steal_time should be one page */
+	if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
+		pr_warn("Illegal PV steal time addr %lx\n", addr);
+		return -EFAULT;
+	}
+
+	addr |= KVM_STEAL_PHYS_VALID;
+	kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
+	return 0;
+}
+
 #ifdef CONFIG_SMP
 static void pv_send_ipi_single(int cpu, unsigned int action)
 {
@@ -110,6 +164,32 @@ static void pv_init_ipi(void)
 	if (r < 0)
 		panic("SWI0 IRQ request failed\n");
 }
+
+static void pv_disable_steal_time(void)
+{
+	if (has_steal_clock)
+		kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
+}
+
+static int pv_time_cpu_online(unsigned int cpu)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	pv_enable_steal_time();
+	local_irq_restore(flags);
+	return 0;
+}
+
+static int pv_time_cpu_down_prepare(unsigned int cpu)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	pv_disable_steal_time();
+	local_irq_restore(flags);
+	return 0;
+}
 #endif
 
 static bool kvm_para_available(void)
@@ -149,3 +229,54 @@ int __init pv_ipi_init(void)
 
 	return 1;
 }
+
+static void pv_cpu_reboot(void *unused)
+{
+	pv_disable_steal_time();
+}
+
+static int pv_reboot_notify(struct notifier_block *nb, unsigned long code,
+		void *unused)
+{
+	on_each_cpu(pv_cpu_reboot, NULL, 1);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pv_reboot_nb = {
+	.notifier_call  = pv_reboot_notify,
+};
+
+int __init pv_time_init(void)
+{
+	int feature;
+
+	if (!cpu_has_hypervisor)
+		return 0;
+	if (!kvm_para_available())
+		return 0;
+
+	feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+	if (!(feature & KVM_FEATURE_STEAL_TIME))
+		return 0;
+
+	has_steal_clock = 1;
+	if (pv_enable_steal_time()) {
+		has_steal_clock = 0;
+		return 0;
+	}
+
+	register_reboot_notifier(&pv_reboot_nb);
+	static_call_update(pv_steal_clock, para_steal_clock);
+	static_key_slow_inc(&paravirt_steal_enabled);
+	if (steal_acc)
+		static_key_slow_inc(&paravirt_steal_rq_enabled);
+
+#ifdef CONFIG_SMP
+	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+			"loongarch/pvi_time:online",
+			pv_time_cpu_online, pv_time_cpu_down_prepare) < 0)
+		pr_err("Failed to install cpu hotplug callbacks\n");
+#endif
+	pr_info("Using stolen time PV\n");
+	return 0;
+}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index fd5354f9be7c..46d7d40c87e3 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -15,6 +15,7 @@
 
 #include <asm/cpu-features.h>
 #include <asm/loongarch.h>
+#include <asm/paravirt.h>
 #include <asm/time.h>
 
 u64 cpu_clock_freq;
@@ -214,4 +215,5 @@ void __init time_init(void)
 
 	constant_clockevent_init();
 	constant_clocksource_init();
+	pv_time_init();
 }
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 2/2] LoongArch: Add steal time support in guest side
  2024-04-30  1:45 ` [PATCH v2 2/2] LoongArch: Add steal time support in guest side Bibo Mao
@ 2024-04-30  8:23   ` Huacai Chen
  2024-04-30  9:21     ` maobibo
  0 siblings, 1 reply; 5+ messages in thread
From: Huacai Chen @ 2024-04-30  8:23 UTC (permalink / raw
  To: Bibo Mao
  Cc: Tianrui Zhao, Juergen Gross, kvm, loongarch, linux-kernel, x86,
	virtualization

Hi, Bibo,

On Tue, Apr 30, 2024 at 9:45 AM Bibo Mao <maobibo@loongson.cn> wrote:
>
> Percpu struct kvm_steal_time is added here, its size is 64 bytes and
> also defined as 64 bytes, so that the whole structure is in one physical
> page.
>
> When vcpu is onlined, function pv_enable_steal_time() is called. This
> function will pass guest physical address of struct kvm_steal_time and
> tells hypervisor to enable steal time. When vcpu is offline, physical
> address is set as 0 and tells hypervisor to disable steal time.
>
> Here is output of vmstat on guest when there is workload on both host
> and guest. It includes steal time stat information.
>
> procs -----------memory---------- -----io---- -system-- ------cpu-----
>  r  b   swpd   free  inact active   bi    bo   in   cs us sy id wa st
> 15  1      0 7583616 184112  72208    20    0  162   52 31  6 43  0 20
> 17  0      0 7583616 184704  72192    0     0 6318 6885  5 60  8  5 22
> 16  0      0 7583616 185392  72144    0     0 1766 1081  0 49  0  1 50
> 16  0      0 7583616 184816  72304    0     0 6300 6166  4 62 12  2 20
> 18  0      0 7583632 184480  72240    0     0 2814 1754  2 58  4  1 35
>
> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> ---
>  arch/loongarch/Kconfig                |  11 +++
>  arch/loongarch/include/asm/paravirt.h |   5 +
>  arch/loongarch/kernel/paravirt.c      | 131 ++++++++++++++++++++++++++
>  arch/loongarch/kernel/time.c          |   2 +
>  4 files changed, 149 insertions(+)
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 0a1540a8853e..f3a03c33a052 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -592,6 +592,17 @@ config PARAVIRT
>           over full virtualization.  However, when run without a hypervisor
>           the kernel is theoretically slower and slightly larger.
>
> +config PARAVIRT_TIME_ACCOUNTING
> +       bool "Paravirtual steal time accounting"
> +       select PARAVIRT
> +       help
> +         Select this option to enable fine granularity task steal time
> +         accounting. Time spent executing other tasks in parallel with
> +         the current vCPU is discounted from the vCPU power. To account for
> +         that, there can be a small performance impact.
> +
> +         If in doubt, say N here.
> +
Can we use a hidden selection manner, which means:

config PARAVIRT_TIME_ACCOUNTING
       def_bool PARAVIRT

Because I think we needn't give too many choices to users (and bring
much more effort to test).

PowerPC even hide all the PARAVIRT config options...
see arch/powerpc/platforms/pseries/Kconfig

Huacai

>  config ARCH_SUPPORTS_KEXEC
>         def_bool y
>
> diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
> index 58f7b7b89f2c..fe27fb5e82b8 100644
> --- a/arch/loongarch/include/asm/paravirt.h
> +++ b/arch/loongarch/include/asm/paravirt.h
> @@ -17,11 +17,16 @@ static inline u64 paravirt_steal_clock(int cpu)
>  }
>
>  int pv_ipi_init(void);
> +int __init pv_time_init(void);
>  #else
>  static inline int pv_ipi_init(void)
>  {
>         return 0;
>  }
>
> +static inline int pv_time_init(void)
> +{
> +       return 0;
> +}
>  #endif // CONFIG_PARAVIRT
>  #endif
> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> index 9044ed62045c..3f83afc7e2d2 100644
> --- a/arch/loongarch/kernel/paravirt.c
> +++ b/arch/loongarch/kernel/paravirt.c
> @@ -5,10 +5,13 @@
>  #include <linux/jump_label.h>
>  #include <linux/kvm_para.h>
>  #include <asm/paravirt.h>
> +#include <linux/reboot.h>
>  #include <linux/static_call.h>
>
>  struct static_key paravirt_steal_enabled;
>  struct static_key paravirt_steal_rq_enabled;
> +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
> +static int has_steal_clock;
>
>  static u64 native_steal_clock(int cpu)
>  {
> @@ -17,6 +20,57 @@ static u64 native_steal_clock(int cpu)
>
>  DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
>
> +static bool steal_acc = true;
> +static int __init parse_no_stealacc(char *arg)
> +{
> +       steal_acc = false;
> +       return 0;
> +}
> +early_param("no-steal-acc", parse_no_stealacc);
> +
> +static u64 para_steal_clock(int cpu)
> +{
> +       u64 steal;
> +       struct kvm_steal_time *src;
> +       int version;
> +
> +       src = &per_cpu(steal_time, cpu);
> +       do {
> +
> +               version = src->version;
> +               /* Make sure that the version is read before the steal */
> +               virt_rmb();
> +               steal = src->steal;
> +               /* Make sure that the steal is read before the next version */
> +               virt_rmb();
> +
> +       } while ((version & 1) || (version != src->version));
> +       return steal;
> +}
> +
> +static int pv_enable_steal_time(void)
> +{
> +       int cpu = smp_processor_id();
> +       struct kvm_steal_time *st;
> +       unsigned long addr;
> +
> +       if (!has_steal_clock)
> +               return -EPERM;
> +
> +       st = &per_cpu(steal_time, cpu);
> +       addr = per_cpu_ptr_to_phys(st);
> +
> +       /* The whole structure kvm_steal_time should be one page */
> +       if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
> +               pr_warn("Illegal PV steal time addr %lx\n", addr);
> +               return -EFAULT;
> +       }
> +
> +       addr |= KVM_STEAL_PHYS_VALID;
> +       kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
> +       return 0;
> +}
> +
>  #ifdef CONFIG_SMP
>  static void pv_send_ipi_single(int cpu, unsigned int action)
>  {
> @@ -110,6 +164,32 @@ static void pv_init_ipi(void)
>         if (r < 0)
>                 panic("SWI0 IRQ request failed\n");
>  }
> +
> +static void pv_disable_steal_time(void)
> +{
> +       if (has_steal_clock)
> +               kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
> +}
> +
> +static int pv_time_cpu_online(unsigned int cpu)
> +{
> +       unsigned long flags;
> +
> +       local_irq_save(flags);
> +       pv_enable_steal_time();
> +       local_irq_restore(flags);
> +       return 0;
> +}
> +
> +static int pv_time_cpu_down_prepare(unsigned int cpu)
> +{
> +       unsigned long flags;
> +
> +       local_irq_save(flags);
> +       pv_disable_steal_time();
> +       local_irq_restore(flags);
> +       return 0;
> +}
>  #endif
>
>  static bool kvm_para_available(void)
> @@ -149,3 +229,54 @@ int __init pv_ipi_init(void)
>
>         return 1;
>  }
> +
> +static void pv_cpu_reboot(void *unused)
> +{
> +       pv_disable_steal_time();
> +}
> +
> +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code,
> +               void *unused)
> +{
> +       on_each_cpu(pv_cpu_reboot, NULL, 1);
> +       return NOTIFY_DONE;
> +}
> +
> +static struct notifier_block pv_reboot_nb = {
> +       .notifier_call  = pv_reboot_notify,
> +};
> +
> +int __init pv_time_init(void)
> +{
> +       int feature;
> +
> +       if (!cpu_has_hypervisor)
> +               return 0;
> +       if (!kvm_para_available())
> +               return 0;
> +
> +       feature = read_cpucfg(CPUCFG_KVM_FEATURE);
> +       if (!(feature & KVM_FEATURE_STEAL_TIME))
> +               return 0;
> +
> +       has_steal_clock = 1;
> +       if (pv_enable_steal_time()) {
> +               has_steal_clock = 0;
> +               return 0;
> +       }
> +
> +       register_reboot_notifier(&pv_reboot_nb);
> +       static_call_update(pv_steal_clock, para_steal_clock);
> +       static_key_slow_inc(&paravirt_steal_enabled);
> +       if (steal_acc)
> +               static_key_slow_inc(&paravirt_steal_rq_enabled);
> +
> +#ifdef CONFIG_SMP
> +       if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
> +                       "loongarch/pvi_time:online",
> +                       pv_time_cpu_online, pv_time_cpu_down_prepare) < 0)
> +               pr_err("Failed to install cpu hotplug callbacks\n");
> +#endif
> +       pr_info("Using stolen time PV\n");
> +       return 0;
> +}
> diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
> index fd5354f9be7c..46d7d40c87e3 100644
> --- a/arch/loongarch/kernel/time.c
> +++ b/arch/loongarch/kernel/time.c
> @@ -15,6 +15,7 @@
>
>  #include <asm/cpu-features.h>
>  #include <asm/loongarch.h>
> +#include <asm/paravirt.h>
>  #include <asm/time.h>
>
>  u64 cpu_clock_freq;
> @@ -214,4 +215,5 @@ void __init time_init(void)
>
>         constant_clockevent_init();
>         constant_clocksource_init();
> +       pv_time_init();
>  }
> --
> 2.39.3
>
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 2/2] LoongArch: Add steal time support in guest side
  2024-04-30  8:23   ` Huacai Chen
@ 2024-04-30  9:21     ` maobibo
  0 siblings, 0 replies; 5+ messages in thread
From: maobibo @ 2024-04-30  9:21 UTC (permalink / raw
  To: Huacai Chen
  Cc: Tianrui Zhao, Juergen Gross, kvm, loongarch, linux-kernel, x86,
	virtualization



On 2024/4/30 下午4:23, Huacai Chen wrote:
> Hi, Bibo,
> 
> On Tue, Apr 30, 2024 at 9:45 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>> Percpu struct kvm_steal_time is added here, its size is 64 bytes and
>> also defined as 64 bytes, so that the whole structure is in one physical
>> page.
>>
>> When vcpu is onlined, function pv_enable_steal_time() is called. This
>> function will pass guest physical address of struct kvm_steal_time and
>> tells hypervisor to enable steal time. When vcpu is offline, physical
>> address is set as 0 and tells hypervisor to disable steal time.
>>
>> Here is output of vmstat on guest when there is workload on both host
>> and guest. It includes steal time stat information.
>>
>> procs -----------memory---------- -----io---- -system-- ------cpu-----
>>   r  b   swpd   free  inact active   bi    bo   in   cs us sy id wa st
>> 15  1      0 7583616 184112  72208    20    0  162   52 31  6 43  0 20
>> 17  0      0 7583616 184704  72192    0     0 6318 6885  5 60  8  5 22
>> 16  0      0 7583616 185392  72144    0     0 1766 1081  0 49  0  1 50
>> 16  0      0 7583616 184816  72304    0     0 6300 6166  4 62 12  2 20
>> 18  0      0 7583632 184480  72240    0     0 2814 1754  2 58  4  1 35
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>>   arch/loongarch/Kconfig                |  11 +++
>>   arch/loongarch/include/asm/paravirt.h |   5 +
>>   arch/loongarch/kernel/paravirt.c      | 131 ++++++++++++++++++++++++++
>>   arch/loongarch/kernel/time.c          |   2 +
>>   4 files changed, 149 insertions(+)
>>
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index 0a1540a8853e..f3a03c33a052 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -592,6 +592,17 @@ config PARAVIRT
>>            over full virtualization.  However, when run without a hypervisor
>>            the kernel is theoretically slower and slightly larger.
>>
>> +config PARAVIRT_TIME_ACCOUNTING
>> +       bool "Paravirtual steal time accounting"
>> +       select PARAVIRT
>> +       help
>> +         Select this option to enable fine granularity task steal time
>> +         accounting. Time spent executing other tasks in parallel with
>> +         the current vCPU is discounted from the vCPU power. To account for
>> +         that, there can be a small performance impact.
>> +
>> +         If in doubt, say N here.
>> +
> Can we use a hidden selection manner, which means:
> 
> config PARAVIRT_TIME_ACCOUNTING
>         def_bool PARAVIRT
> 
> Because I think we needn't give too many choices to users (and bring
> much more effort to test).
> 
> PowerPC even hide all the PARAVIRT config options...
> see arch/powerpc/platforms/pseries/Kconfig

I do not know neither :(  It is just used at beginning, maybe there is
negative effect or potential issue, I just think that it can be selected 
by user for easily debugging. After it is matured, hidden selection 
manner can be used.

Regards
Bibo Mao
> 
> Huacai
> 
>>   config ARCH_SUPPORTS_KEXEC
>>          def_bool y
>>
>> diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
>> index 58f7b7b89f2c..fe27fb5e82b8 100644
>> --- a/arch/loongarch/include/asm/paravirt.h
>> +++ b/arch/loongarch/include/asm/paravirt.h
>> @@ -17,11 +17,16 @@ static inline u64 paravirt_steal_clock(int cpu)
>>   }
>>
>>   int pv_ipi_init(void);
>> +int __init pv_time_init(void);
>>   #else
>>   static inline int pv_ipi_init(void)
>>   {
>>          return 0;
>>   }
>>
>> +static inline int pv_time_init(void)
>> +{
>> +       return 0;
>> +}
>>   #endif // CONFIG_PARAVIRT
>>   #endif
>> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
>> index 9044ed62045c..3f83afc7e2d2 100644
>> --- a/arch/loongarch/kernel/paravirt.c
>> +++ b/arch/loongarch/kernel/paravirt.c
>> @@ -5,10 +5,13 @@
>>   #include <linux/jump_label.h>
>>   #include <linux/kvm_para.h>
>>   #include <asm/paravirt.h>
>> +#include <linux/reboot.h>
>>   #include <linux/static_call.h>
>>
>>   struct static_key paravirt_steal_enabled;
>>   struct static_key paravirt_steal_rq_enabled;
>> +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
>> +static int has_steal_clock;
>>
>>   static u64 native_steal_clock(int cpu)
>>   {
>> @@ -17,6 +20,57 @@ static u64 native_steal_clock(int cpu)
>>
>>   DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
>>
>> +static bool steal_acc = true;
>> +static int __init parse_no_stealacc(char *arg)
>> +{
>> +       steal_acc = false;
>> +       return 0;
>> +}
>> +early_param("no-steal-acc", parse_no_stealacc);
>> +
>> +static u64 para_steal_clock(int cpu)
>> +{
>> +       u64 steal;
>> +       struct kvm_steal_time *src;
>> +       int version;
>> +
>> +       src = &per_cpu(steal_time, cpu);
>> +       do {
>> +
>> +               version = src->version;
>> +               /* Make sure that the version is read before the steal */
>> +               virt_rmb();
>> +               steal = src->steal;
>> +               /* Make sure that the steal is read before the next version */
>> +               virt_rmb();
>> +
>> +       } while ((version & 1) || (version != src->version));
>> +       return steal;
>> +}
>> +
>> +static int pv_enable_steal_time(void)
>> +{
>> +       int cpu = smp_processor_id();
>> +       struct kvm_steal_time *st;
>> +       unsigned long addr;
>> +
>> +       if (!has_steal_clock)
>> +               return -EPERM;
>> +
>> +       st = &per_cpu(steal_time, cpu);
>> +       addr = per_cpu_ptr_to_phys(st);
>> +
>> +       /* The whole structure kvm_steal_time should be one page */
>> +       if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
>> +               pr_warn("Illegal PV steal time addr %lx\n", addr);
>> +               return -EFAULT;
>> +       }
>> +
>> +       addr |= KVM_STEAL_PHYS_VALID;
>> +       kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
>> +       return 0;
>> +}
>> +
>>   #ifdef CONFIG_SMP
>>   static void pv_send_ipi_single(int cpu, unsigned int action)
>>   {
>> @@ -110,6 +164,32 @@ static void pv_init_ipi(void)
>>          if (r < 0)
>>                  panic("SWI0 IRQ request failed\n");
>>   }
>> +
>> +static void pv_disable_steal_time(void)
>> +{
>> +       if (has_steal_clock)
>> +               kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
>> +}
>> +
>> +static int pv_time_cpu_online(unsigned int cpu)
>> +{
>> +       unsigned long flags;
>> +
>> +       local_irq_save(flags);
>> +       pv_enable_steal_time();
>> +       local_irq_restore(flags);
>> +       return 0;
>> +}
>> +
>> +static int pv_time_cpu_down_prepare(unsigned int cpu)
>> +{
>> +       unsigned long flags;
>> +
>> +       local_irq_save(flags);
>> +       pv_disable_steal_time();
>> +       local_irq_restore(flags);
>> +       return 0;
>> +}
>>   #endif
>>
>>   static bool kvm_para_available(void)
>> @@ -149,3 +229,54 @@ int __init pv_ipi_init(void)
>>
>>          return 1;
>>   }
>> +
>> +static void pv_cpu_reboot(void *unused)
>> +{
>> +       pv_disable_steal_time();
>> +}
>> +
>> +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code,
>> +               void *unused)
>> +{
>> +       on_each_cpu(pv_cpu_reboot, NULL, 1);
>> +       return NOTIFY_DONE;
>> +}
>> +
>> +static struct notifier_block pv_reboot_nb = {
>> +       .notifier_call  = pv_reboot_notify,
>> +};
>> +
>> +int __init pv_time_init(void)
>> +{
>> +       int feature;
>> +
>> +       if (!cpu_has_hypervisor)
>> +               return 0;
>> +       if (!kvm_para_available())
>> +               return 0;
>> +
>> +       feature = read_cpucfg(CPUCFG_KVM_FEATURE);
>> +       if (!(feature & KVM_FEATURE_STEAL_TIME))
>> +               return 0;
>> +
>> +       has_steal_clock = 1;
>> +       if (pv_enable_steal_time()) {
>> +               has_steal_clock = 0;
>> +               return 0;
>> +       }
>> +
>> +       register_reboot_notifier(&pv_reboot_nb);
>> +       static_call_update(pv_steal_clock, para_steal_clock);
>> +       static_key_slow_inc(&paravirt_steal_enabled);
>> +       if (steal_acc)
>> +               static_key_slow_inc(&paravirt_steal_rq_enabled);
>> +
>> +#ifdef CONFIG_SMP
>> +       if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
>> +                       "loongarch/pvi_time:online",
>> +                       pv_time_cpu_online, pv_time_cpu_down_prepare) < 0)
>> +               pr_err("Failed to install cpu hotplug callbacks\n");
>> +#endif
>> +       pr_info("Using stolen time PV\n");
>> +       return 0;
>> +}
>> diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
>> index fd5354f9be7c..46d7d40c87e3 100644
>> --- a/arch/loongarch/kernel/time.c
>> +++ b/arch/loongarch/kernel/time.c
>> @@ -15,6 +15,7 @@
>>
>>   #include <asm/cpu-features.h>
>>   #include <asm/loongarch.h>
>> +#include <asm/paravirt.h>
>>   #include <asm/time.h>
>>
>>   u64 cpu_clock_freq;
>> @@ -214,4 +215,5 @@ void __init time_init(void)
>>
>>          constant_clockevent_init();
>>          constant_clocksource_init();
>> +       pv_time_init();
>>   }
>> --
>> 2.39.3
>>
>>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-04-30  9:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-30  1:45 [PATCH v2 0/2] LoongArch: Add steal time support Bibo Mao
2024-04-30  1:45 ` [PATCH v2 1/2] LoongArch: KVM: Add steal time support in kvm side Bibo Mao
2024-04-30  1:45 ` [PATCH v2 2/2] LoongArch: Add steal time support in guest side Bibo Mao
2024-04-30  8:23   ` Huacai Chen
2024-04-30  9:21     ` maobibo

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.