Historical speck list archives
 help / color / mirror / Atom feed
From: mark gross <mark.gross@intel.com>
To: speck@linutronix.de
Subject: [MODERATED] [PATCH 2/2] more sampling fun 2
Date: Thu, 16 Jan 2020 14:16:07 -0800	[thread overview]
Message-ID: <=?utf-8?q?=3Cc5bae80efe4694c81d9cbbce633a2228086a330c=2E158215?= =?utf-8?q?2322=2Egit=2Emgross=40linux=2Eintel=2Ecom=3E?=> (raw)

From: mark gross <mark.gross@intel.com>
Subject: [PATCH 2/2] WIP SRBDS mitigation enabling.

From: mark gross <mark.gross@intel.com>
Subject: [PATCH 2/2] WIP SRBDS mitigation enabling.

SRBDS is an MDS-like speculative side channel that can leak bits from
the RNG across cores and threads. New microcode serializes the processor
access during the execution of RDRAND and RDSEED ensures that the shared
buffer is overwritten before it is released for reuse.

We subdivide processors that are vulnerable to SRBDS into two classes:
X86_BUG_SRBDS:          models that are vulnerable
X86_BUG_SRBDS_TSX:      models only vulnerable when TSX is enabled.

The latter are not vulnerable to SRBDS if TSX is disabled on all cores.

The mitigation is activated by default on affected processors and it slows
down /dev/urandom.  The latency of RDRAND and RDSEED instructions is
increased by 10x.  We don't expect this to be noticeable in most cases.

This patch:
* enables administrator to configure the mitigation off when desired
  using either mitigations=off or srbds=off.
* exports vulnerability status via sysfs

Signed-off-by: mark gross <mgross@linux.intel.com>
Reviewed-by: tony luck <tony.luck@intel.com>
---
 arch/x86/include/asm/cpufeatures.h |  3 ++
 arch/x86/include/asm/msr-index.h   |  4 ++
 arch/x86/kernel/cpu/bugs.c         | 84 ++++++++++++++++++++++++++++++
 arch/x86/kernel/cpu/common.c       | 24 +++++++++
 arch/x86/kernel/cpu/cpu.h          | 10 ++++
 arch/x86/kernel/cpu/intel.c        |  2 +
 drivers/base/cpu.c                 |  8 +++
 7 files changed, 135 insertions(+)

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f3327cb56edf..e7d032542d63 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -360,6 +360,7 @@
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_FSRM		(18*32+ 4) /* Fast Short Rep Mov */
 #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL		(18*32+ 9) /* "" SRBDS mitigation MSR available */
 #define X86_FEATURE_MD_CLEAR		(18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT	(18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
@@ -404,5 +405,7 @@
 #define X86_BUG_SWAPGS			X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 #define X86_BUG_TAA			X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
 #define X86_BUG_ITLB_MULTIHIT		X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS			X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+#define X86_BUG_SRBDS_TSX		X86_BUG(25) /* CPU may leak RNG bits if not mitigated when TSX is enabled */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ebe1685e92dd..f1603f247f52 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -119,6 +119,10 @@
 #define TSX_CTRL_RTM_DISABLE		BIT(0)	/* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR		BIT(1)	/* Disable TSX enumeration */
 
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL		0x00000123
+#define SRBDS_MITG_DIS			BIT(0)
+
 #define MSR_IA32_SYSENTER_CS		0x00000174
 #define MSR_IA32_SYSENTER_ESP		0x00000175
 #define MSR_IA32_SYSENTER_EIP		0x00000176
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ed54b3b21c39..db3ec7755ff2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void);
 static void __init mds_select_mitigation(void);
 static void __init mds_print_mitigation(void);
 static void __init taa_select_mitigation(void);
+static void __init srbds_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -108,6 +109,7 @@ void __init check_bugs(void)
 	l1tf_select_mitigation();
 	mds_select_mitigation();
 	taa_select_mitigation();
+	srbds_select_mitigation();
 
 	/*
 	 * As MDS and TAA mitigations are inter-related, print MDS
@@ -397,6 +399,69 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
 }
 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)	"SRBDS: " fmt
+
+enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+static const char * const srbds_strings[] = {
+	[SRBDS_MITIGATION_OFF]		= "Vulnerable",
+	[SRBDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: no microcode",
+	[SRBDS_MITIGATION_FULL]		= "Mitigation: bus lock when using RDRAND or RDSEED",
+};
+
+void srbds_configure_mitigation(void)
+{
+	u64 mcu_ctrl;
+
+	if (!boot_cpu_has_bug(X86_BUG_SRBDS) && !boot_cpu_has_bug(X86_BUG_SRBDS_TSX))
+		return;
+
+	if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+		return;
+
+	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+	if (srbds_mitigation == SRBDS_MITIGATION_FULL)
+		mcu_ctrl &= ~SRBDS_MITG_DIS;
+	else if (srbds_mitigation == SRBDS_MITIGATION_OFF)
+		mcu_ctrl |= SRBDS_MITG_DIS;
+
+	if (boot_cpu_has_bug(X86_BUG_SRBDS_TSX) && !boot_cpu_has(X86_FEATURE_RTM))
+		mcu_ctrl |= SRBDS_MITG_DIS;
+
+	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+}
+
+static void __init srbds_select_mitigation(void)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SRBDS) &&
+	    !boot_cpu_has_bug(X86_BUG_SRBDS_TSX))
+		return;
+
+	if (cpu_mitigations_off()) {
+		srbds_mitigation = SRBDS_MITIGATION_OFF;
+		goto out;
+	}
+
+	if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+		srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
+
+out:
+	srbds_configure_mitigation();
+}
+
+static int __init srbds_parse_cmdline(char *str)
+{
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "off"))
+		srbds_mitigation = SRBDS_MITIGATION_OFF;
+
+	return 0;
+}
+
+early_param("srbds_mitigation", srbds_parse_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 
@@ -1528,6 +1593,11 @@ static char *ibpb_state(void)
 	return "";
 }
 
+static ssize_t srbds_show_state(char *buf)
+{
+	return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
 			       char *buf, unsigned int bug)
 {
@@ -1572,6 +1642,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
 	case X86_BUG_ITLB_MULTIHIT:
 		return itlb_multihit_show_state(buf);
 
+	case X86_BUG_SRBDS:
+	case X86_BUG_SRBDS_TSX:
+		return srbds_show_state(buf);
+
 	default:
 		break;
 	}
@@ -1618,4 +1692,14 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr
 {
 	return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
 }
+
+ssize_t cpu_show_special_register_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	if (boot_cpu_has_bug(X86_BUG_SRBDS))
+		return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
+	else if (boot_cpu_has_bug(X86_BUG_SRBDS_TSX))
+		return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS_TSX);
+	else
+		return sprintf(buf, "Not affected\n");
+}
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c0519be5f563..050c5c05b500 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1007,6 +1007,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define NO_SWAPGS		BIT(6)
 #define NO_ITLB_MULTIHIT	BIT(7)
 #define NO_SPECTRE_V2		BIT(8)
+#define SRBDS			BIT(9)
+#define SRBDS_TSX		BIT(10)
 
 #define VULNWL(_vendor, _family, _model, __stepping,  _whitelist)	\
 	{{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }, __stepping}
@@ -1014,6 +1016,9 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define VULNWL_INTEL(model, whitelist)		\
 	VULNWL(INTEL, 6, INTEL_FAM6_##model, X86_STEPPING_ANY, whitelist)
 
+#define VULNWL_INTEL_STEPPING(model, stepping, whitelist)		\
+	VULNWL(INTEL, 6, INTEL_FAM6_##model, stepping, whitelist)
+
 #define VULNWL_AMD(family, whitelist)		\
 	VULNWL(AMD, family, X86_MODEL_ANY, X86_STEPPING_ANY, whitelist)
 
@@ -1042,6 +1047,19 @@ static const struct x86_cpu_id_ext cpu_vuln_whitelist[] __initconst = {
 
 	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
 
+	VULNWL_INTEL(IVYBRIDGE,		SRBDS), /*06_3A*/
+	VULNWL_INTEL(HASWELL,		SRBDS), /*06_3C*/
+	VULNWL_INTEL(HASWELL_L,		SRBDS), /*06_45*/
+	VULNWL_INTEL(HASWELL_G,		SRBDS), /*06_46*/
+	VULNWL_INTEL(BROADWELL_G,	SRBDS), /*06_47*/
+	VULNWL_INTEL(BROADWELL,		SRBDS), /*06_3D*/
+	VULNWL_INTEL(SKYLAKE_L,		SRBDS), /*06_4E*/
+	VULNWL_INTEL(SKYLAKE,		SRBDS), /*06_5E*/
+	VULNWL_INTEL_STEPPING(KABYLAKE_L, (BIT(0xA)-1),		SRBDS), /*06_8E steppings <=A*/
+	VULNWL_INTEL_STEPPING(KABYLAKE_L, BIT(0xB)|BIT(0xC),	SRBDS_TSX), /*06_8E stepping = 0xB if TSX enabled*/
+	VULNWL_INTEL_STEPPING(KABYLAKE, (BIT(0xB)-1),		SRBDS), /*06_9E steppings <=B*/
+	VULNWL_INTEL_STEPPING(KABYLAKE, BIT(0xC)|BIT(0xD),	SRBDS_TSX), /*06_9E stepping = 0xC if TSX enabled*/
+
 	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
 	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
@@ -1124,6 +1142,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 	if (!cpu_matches(NO_SWAPGS))
 		setup_force_cpu_bug(X86_BUG_SWAPGS);
 
+	if (cpu_matches(SRBDS))
+		setup_force_cpu_bug(X86_BUG_SRBDS);
+
+	if (cpu_matches(SRBDS_TSX))
+		setup_force_cpu_bug(X86_BUG_SRBDS_TSX);
+
 	/*
 	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
 	 *	- TSX is supported or
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 37fdefd14f28..f2b3fd4d4274 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -44,7 +44,17 @@ struct _tlb_table {
 extern const struct cpu_dev *const __x86_cpu_dev_start[],
 			    *const __x86_cpu_dev_end[];
 
+enum srbds_mitigations {
+	SRBDS_MITIGATION_OFF,
+	SRBDS_MITIGATION_UCODE_NEEDED,
+	SRBDS_MITIGATION_FULL,
+};
+
+extern __ro_after_init enum srbds_mitigations srbds_mitigation;
+void srbds_configure_mitigation(void);
+
 #ifdef CONFIG_CPU_SUP_INTEL
+
 enum tsx_ctrl_states {
 	TSX_CTRL_ENABLE,
 	TSX_CTRL_DISABLE,
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index be82cd5841c3..1b083a2a415b 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -684,6 +684,8 @@ static void init_intel(struct cpuinfo_x86 *c)
 		tsx_enable();
 	if (tsx_ctrl_state == TSX_CTRL_DISABLE)
 		tsx_disable();
+
+	srbds_configure_mitigation();
 }
 
 #ifdef CONFIG_X86_32
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 6265871a4af2..d69e094e790c 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -567,6 +567,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_special_register_data_sampling(struct device *dev,
+						       struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -575,6 +581,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
 static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
 static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
 static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(special_register_data_sampling, 0444, cpu_show_special_register_data_sampling, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
@@ -585,6 +592,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_mds.attr,
 	&dev_attr_tsx_async_abort.attr,
 	&dev_attr_itlb_multihit.attr,
+	&dev_attr_special_register_data_sampling.attr,
 	NULL
 };
 
-- 
2.17.1

                 reply	other threads:[~2020-02-20 17:37 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='=?utf-8?q?=3Cc5bae80efe4694c81d9cbbce633a2228086a330c=2E158215?= =?utf-8?q?2322=2Egit=2Emgross=40linux=2Eintel=2Ecom=3E?=' \
    --to=mark.gross@intel.com \
    --cc=speck@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).