All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.linux.dev
Cc: maz@kernel.org, will@kernel.org, qperret@google.com,
	tabba@google.com,  seanjc@google.com, alexandru.elisei@arm.com,
	catalin.marinas@arm.com,  philmd@linaro.org, james.morse@arm.com,
	suzuki.poulose@arm.com,  oliver.upton@linux.dev,
	mark.rutland@arm.com, broonie@kernel.org,  joey.gouly@arm.com,
	rananta@google.com
Subject: [PATCH v1 07/44] KVM: arm64: Support TLB invalidation in guest context
Date: Wed, 27 Mar 2024 17:34:54 +0000	[thread overview]
Message-ID: <20240327173531.1379685-8-tabba@google.com> (raw)
In-Reply-To: <20240327173531.1379685-1-tabba@google.com>

From: Will Deacon <will@kernel.org>

Typically, TLB invalidation of guest stage-2 mappings using nVHE is
performed by a hypercall originating from the host. For the invalidation
instruction to be effective, therefore, __tlb_switch_to_{guest,host}()
swizzle the active stage-2 context around the TLBI instruction.

With guest-to-host memory sharing and unsharing hypercalls
originating from the guest under pKVM, there is need to support
both guest and host VMID invalidations issued from guest context.

Replace the __tlb_switch_to_{guest,host}() functions with a more general
{enter,exit}_vmid_context() implementation which supports being invoked
from guest context and acts as a no-op if the target context matches the
running context.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/nvhe/tlb.c | 114 +++++++++++++++++++++++++++-------
 1 file changed, 90 insertions(+), 24 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index a60fb13e2192..05a66b2ed76d 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -11,13 +11,23 @@
 #include <nvhe/mem_protect.h>
 
 struct tlb_inv_context {
-	u64		tcr;
+	struct kvm_s2_mmu	*mmu;
+	u64			tcr;
+	u64			sctlr;
 };
 
-static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-				  struct tlb_inv_context *cxt,
-				  bool nsh)
+static void enter_vmid_context(struct kvm_s2_mmu *mmu,
+			       struct tlb_inv_context *cxt,
+			       bool nsh)
 {
+	struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
+	struct kvm_cpu_context *host_ctxt;
+	struct kvm_vcpu *vcpu;
+
+	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+	vcpu = host_ctxt->__hyp_running_vcpu;
+	cxt->mmu = NULL;
+
 	/*
 	 * We have two requirements:
 	 *
@@ -40,20 +50,52 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
 	else
 		dsb(ish);
 
+	/*
+	 * If we're already in the desired context, then there's nothing
+	 * to do.
+	 */
+	if (vcpu) {
+		/* We're in guest context */
+		if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
+			return;
+
+		cxt->mmu = vcpu->arch.hw_mmu;
+	} else {
+		/* We're in host context */
+		if (mmu == host_s2_mmu)
+			return;
+
+		cxt->mmu = host_s2_mmu;
+	}
+
 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
 		u64 val;
 
 		/*
 		 * For CPUs that are affected by ARM 1319367, we need to
-		 * avoid a host Stage-1 walk while we have the guest's
-		 * VMID set in the VTTBR in order to invalidate TLBs.
-		 * We're guaranteed that the S1 MMU is enabled, so we can
-		 * simply set the EPD bits to avoid any further TLB fill.
+		 * avoid a Stage-1 walk with the old VMID while we have
+		 * the new VMID set in the VTTBR in order to invalidate TLBs.
+		 * We're guaranteed that the host S1 MMU is enabled, so
+		 * we can simply set the EPD bits to avoid any further
+		 * TLB fill. For guests, we ensure that the S1 MMU is
+		 * temporarily enabled in the next context.
 		 */
 		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
 		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
 		write_sysreg_el1(val, SYS_TCR);
 		isb();
+
+		if (vcpu) {
+			val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
+			if (!(val & SCTLR_ELx_M)) {
+				val |= SCTLR_ELx_M;
+				write_sysreg_el1(val, SYS_SCTLR);
+				isb();
+			}
+		} else {
+			/* The host S1 MMU is always enabled. */
+			cxt->sctlr = SCTLR_ELx_M;
+		}
 	}
 
 	/*
@@ -62,20 +104,44 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
 	 * ensuring that we always have an ISB, but not two ISBs back
 	 * to back.
 	 */
-	__load_stage2(mmu, kern_hyp_va(mmu->arch));
+	if (vcpu)
+		__load_host_stage2();
+	else
+		__load_stage2(mmu, kern_hyp_va(mmu->arch));
+
 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+static void exit_vmid_context(struct tlb_inv_context *cxt)
 {
-	__load_host_stage2();
+	struct kvm_s2_mmu *mmu = cxt->mmu;
+	struct kvm_cpu_context *host_ctxt;
+	struct kvm_vcpu *vcpu;
+
+	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+	vcpu = host_ctxt->__hyp_running_vcpu;
+
+	if (!mmu)
+		return;
+
+	if (vcpu)
+		__load_stage2(mmu, kern_hyp_va(mmu->arch));
+	else
+		__load_host_stage2();
 
 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
-		/* Ensure write of the host VMID */
+		/* Ensure write of the old VMID */
 		isb();
-		/* Restore the host's TCR_EL1 */
+
+		if (!(cxt->sctlr & SCTLR_ELx_M)) {
+			write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
+			isb();
+		}
+
 		write_sysreg_el1(cxt->tcr, SYS_TCR);
 	}
+
+	cxt->mmu = NULL;
 }
 
 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
@@ -84,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
 	struct tlb_inv_context cxt;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt, false);
+	enter_vmid_context(mmu, &cxt, false);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -105,7 +171,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
 	dsb(ish);
 	isb();
 
-	__tlb_switch_to_host(&cxt);
+	exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -114,7 +180,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
 	struct tlb_inv_context cxt;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt, true);
+	enter_vmid_context(mmu, &cxt, true);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -135,7 +201,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
 	dsb(nsh);
 	isb();
 
-	__tlb_switch_to_host(&cxt);
+	exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -152,7 +218,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
 	start = round_down(start, stride);
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt, false);
+	enter_vmid_context(mmu, &cxt, false);
 
 	__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
 
@@ -161,7 +227,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
 	dsb(ish);
 	isb();
 
-	__tlb_switch_to_host(&cxt);
+	exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -169,13 +235,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 	struct tlb_inv_context cxt;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt, false);
+	enter_vmid_context(mmu, &cxt, false);
 
 	__tlbi(vmalls12e1is);
 	dsb(ish);
 	isb();
 
-	__tlb_switch_to_host(&cxt);
+	exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
@@ -183,19 +249,19 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
 	struct tlb_inv_context cxt;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt, false);
+	enter_vmid_context(mmu, &cxt, false);
 
 	__tlbi(vmalle1);
 	asm volatile("ic iallu");
 	dsb(nsh);
 	isb();
 
-	__tlb_switch_to_host(&cxt);
+	exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_vm_context(void)
 {
-	/* Same remark as in __tlb_switch_to_guest() */
+	/* Same remark as in enter_vmid_context() */
 	dsb(ish);
 	__tlbi(alle1is);
 	dsb(ish);
-- 
2.44.0.478.gd926399ef9-goog


  parent reply	other threads:[~2024-03-27 17:35 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-27 17:34 [PATCH v1 00/44] KVM: arm64: Preamble for pKVM Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 01/44] KVM: arm64: Change kvm_handle_mmio_return() return polarity Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 02/44] KVM: arm64: Use enum instead of helper for checking FP-state Fuad Tabba
2024-03-28 16:19   ` Mark Brown
2024-04-08  7:39   ` Marc Zyngier
2024-04-08 13:39     ` Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 03/44] KVM: arm64: Move setting the page as dirty out of the critical section Fuad Tabba
2024-04-08  7:41   ` Marc Zyngier
2024-04-08 15:41     ` Fuad Tabba
2024-04-08 15:53       ` Marc Zyngier
2024-04-08 15:57         ` Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 04/44] KVM: arm64: Avoid BUG-ing from the host abort path Fuad Tabba
2024-04-08  7:44   ` Marc Zyngier
2024-04-08 13:48     ` Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 05/44] KVM: arm64: Check for PTE validity when checking for executable/cacheable Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 06/44] KVM: arm64: Avoid BBM when changing only s/w bits in Stage-2 PTE Fuad Tabba
2024-03-27 17:34 ` Fuad Tabba [this message]
2024-04-15 11:36   ` [PATCH v1 07/44] KVM: arm64: Support TLB invalidation in guest context Marc Zyngier
2024-04-15 15:02     ` Fuad Tabba
2024-04-15 15:59       ` Marc Zyngier
2024-03-27 17:34 ` [PATCH v1 08/44] KVM: arm64: Simplify vgic-v3 hypercalls Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 09/44] KVM: arm64: Add is_pkvm_initialized() helper Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 10/44] KVM: arm64: Introduce predicates to check for protected state Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 11/44] KVM: arm64: Split up nvhe/fixed_config.h Fuad Tabba
2024-03-27 17:34 ` [PATCH v1 12/44] KVM: arm64: Move pstate reset value definitions to kvm_arm.h Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 13/44] KVM: arm64: Clarify rationale for ZCR_EL1 value restored on guest exit Fuad Tabba
2024-03-28 18:53   ` Mark Brown
2024-04-08 13:34     ` Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 14/44] KVM: arm64: Refactor calculating SVE state size to use helpers Fuad Tabba
2024-03-28 18:57   ` Mark Brown
2024-04-08 13:35     ` Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 15/44] KVM: arm64: Use active guest SVE vector length on guest restore Fuad Tabba
2024-03-28 19:17   ` Mark Brown
2024-04-09  9:34     ` Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 16/44] KVM: arm64: Do not map the host fpsimd state to hyp in pKVM Fuad Tabba
2024-03-28 19:20   ` Mark Brown
2024-03-27 17:35 ` [PATCH v1 17/44] KVM: arm64: Move some kvm_psci functions to a shared header Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 18/44] KVM: arm64: Refactor reset_mpidr() to extract its computation Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 19/44] KVM: arm64: Refactor kvm_vcpu_enable_ptrauth() for hyp use Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 20/44] KVM: arm64: Refactor enter_exception64() Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 21/44] KVM: arm64: Add PC_UPDATE_REQ flags covering all PC updates Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 22/44] KVM: arm64: Add vcpu flag copy primitive Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 23/44] KVM: arm64: Introduce gfn_to_memslot_prot() Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 24/44] KVM: arm64: Do not use the hva in kvm_handle_guest_abort() Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 25/44] KVM: arm64: Introduce hyp_rwlock_t Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 26/44] KVM: arm64: Add atomics-based checking refcount implementation at EL2 Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 27/44] KVM: arm64: Use atomic refcount helpers for 'struct hyp_page::refcount' Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 28/44] KVM: arm64: Remove locking from EL2 allocation fast-paths Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 29/44] KVM: arm64: Reformat/beautify PTP hypercall documentation Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 30/44] KVM: arm64: Rename firmware pseudo-register documentation file Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 31/44] KVM: arm64: Document the KVM/arm64-specific calls in hypercalls.rst Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 32/44] KVM: arm64: Prevent kmemleak from accessing .hyp.data Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 33/44] KVM: arm64: Issue CMOs when tearing down guest s2 pages Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 34/44] KVM: arm64: Do not set the virtual timer offset for protected vCPUs Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 35/44] KVM: arm64: Fix comment for __pkvm_vcpu_init_traps() Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 36/44] KVM: arm64: Do not re-initialize the KVM lock Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 37/44] KVM: arm64: Check directly whether a vcpu is protected Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 38/44] KVM: arm64: Trap debug break and watch from guest Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 39/44] KVM: arm64: Restrict protected VM capabilities Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 40/44] KVM: arm64: Do not support MTE for protected VMs Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 41/44] KVM: arm64: Move pkvm_vcpu_init_traps() to hyp vcpu init Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 42/44] KVM: arm64: Fix initializing traps in protected mode Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 43/44] KVM: arm64: Advertise GICv3 sysreg interface to protected guests Fuad Tabba
2024-03-27 17:35 ` [PATCH v1 44/44] KVM: arm64: Force injection of a data abort on NISV MMIO exit Fuad Tabba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240327173531.1379685-8-tabba@google.com \
    --to=tabba@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=joey.gouly@arm.com \
    --cc=kvmarm@lists.linux.dev \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=philmd@linaro.org \
    --cc=qperret@google.com \
    --cc=rananta@google.com \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.