bugs.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * Copyright (C) 1994 Linus Torvalds
  3. *
  4. * Cyrix stuff, June 1998 by:
  5. * - Rafael R. Reilova (moved everything from head.S),
  6. * <rreilova@ececs.uc.edu>
  7. * - Channing Corn (tests & fixes),
  8. * - Andrew D. Balsa (code cleanup).
  9. */
  10. #include <linux/init.h>
  11. #include <linux/utsname.h>
  12. #include <linux/cpu.h>
  13. #include <linux/module.h>
  14. #include <linux/nospec.h>
  15. #include <linux/prctl.h>
  16. #include <asm/spec-ctrl.h>
  17. #include <asm/cmdline.h>
  18. #include <asm/bugs.h>
  19. #include <asm/processor.h>
  20. #include <asm/processor-flags.h>
  21. #include <asm/fpu/internal.h>
  22. #include <asm/msr.h>
  23. #include <asm/paravirt.h>
  24. #include <asm/alternative.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/cacheflush.h>
  27. #include <asm/intel-family.h>
  28. #include <asm/e820.h>
  29. static void __init spectre_v2_select_mitigation(void);
  30. static void __init ssb_select_mitigation(void);
  31. static void __init l1tf_select_mitigation(void);
  32. /*
  33. * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
  34. * writes to SPEC_CTRL contain whatever reserved bits have been set.
  35. */
  36. u64 x86_spec_ctrl_base;
  37. EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  38. /*
  39. * The vendor and possibly platform specific bits which can be modified in
  40. * x86_spec_ctrl_base.
  41. */
  42. static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
  43. /*
  44. * AMD specific MSR info for Speculative Store Bypass control.
  45. * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
  46. */
  47. u64 x86_amd_ls_cfg_base;
  48. u64 x86_amd_ls_cfg_ssbd_mask;
  49. void __init check_bugs(void)
  50. {
  51. identify_boot_cpu();
  52. if (!IS_ENABLED(CONFIG_SMP)) {
  53. pr_info("CPU: ");
  54. print_cpu_info(&boot_cpu_data);
  55. }
  56. /*
  57. * Read the SPEC_CTRL MSR to account for reserved bits which may
  58. * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
  59. * init code as it is not enumerated and depends on the family.
  60. */
  61. if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
  62. rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  63. /* Allow STIBP in MSR_SPEC_CTRL if supported */
  64. if (boot_cpu_has(X86_FEATURE_STIBP))
  65. x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
  66. /* Select the proper spectre mitigation before patching alternatives */
  67. spectre_v2_select_mitigation();
  68. /*
  69. * Select proper mitigation for any exposure to the Speculative Store
  70. * Bypass vulnerability.
  71. */
  72. ssb_select_mitigation();
  73. l1tf_select_mitigation();
  74. #ifdef CONFIG_X86_32
  75. /*
  76. * Check whether we are able to run this kernel safely on SMP.
  77. *
  78. * - i386 is no longer supported.
  79. * - In order to run on anything without a TSC, we need to be
  80. * compiled for a i486.
  81. */
  82. if (boot_cpu_data.x86 < 4)
  83. panic("Kernel requires i486+ for 'invlpg' and other features");
  84. init_utsname()->machine[1] =
  85. '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
  86. alternative_instructions();
  87. fpu__init_check_bugs();
  88. #else /* CONFIG_X86_64 */
  89. alternative_instructions();
  90. /*
  91. * Make sure the first 2MB area is not mapped by huge pages
  92. * There are typically fixed size MTRRs in there and overlapping
  93. * MTRRs into large pages causes slow downs.
  94. *
  95. * Right now we don't do that with gbpages because there seems
  96. * very little benefit for that case.
  97. */
  98. if (!direct_gbpages)
  99. set_memory_4k((unsigned long)__va(0), 1);
  100. #endif
  101. }
  102. /* The kernel command line selection */
  103. enum spectre_v2_mitigation_cmd {
  104. SPECTRE_V2_CMD_NONE,
  105. SPECTRE_V2_CMD_AUTO,
  106. SPECTRE_V2_CMD_FORCE,
  107. SPECTRE_V2_CMD_RETPOLINE,
  108. SPECTRE_V2_CMD_RETPOLINE_GENERIC,
  109. SPECTRE_V2_CMD_RETPOLINE_AMD,
  110. };
  111. static const char *spectre_v2_strings[] = {
  112. [SPECTRE_V2_NONE] = "Vulnerable",
  113. [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
  114. [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
  115. [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
  116. [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
  117. };
  118. #undef pr_fmt
  119. #define pr_fmt(fmt) "Spectre V2 : " fmt
  120. static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
  121. void
  122. x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
  123. {
  124. u64 msrval, guestval, hostval = x86_spec_ctrl_base;
  125. struct thread_info *ti = current_thread_info();
  126. /* Is MSR_SPEC_CTRL implemented ? */
  127. if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
  128. /*
  129. * Restrict guest_spec_ctrl to supported values. Clear the
  130. * modifiable bits in the host base value and or the
  131. * modifiable bits from the guest value.
  132. */
  133. guestval = hostval & ~x86_spec_ctrl_mask;
  134. guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
  135. /* SSBD controlled in MSR_SPEC_CTRL */
  136. if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
  137. hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
  138. if (hostval != guestval) {
  139. msrval = setguest ? guestval : hostval;
  140. wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
  141. }
  142. }
  143. /*
  144. * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
  145. * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
  146. */
  147. if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
  148. !static_cpu_has(X86_FEATURE_VIRT_SSBD))
  149. return;
  150. /*
  151. * If the host has SSBD mitigation enabled, force it in the host's
  152. * virtual MSR value. If its not permanently enabled, evaluate
  153. * current's TIF_SSBD thread flag.
  154. */
  155. if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
  156. hostval = SPEC_CTRL_SSBD;
  157. else
  158. hostval = ssbd_tif_to_spec_ctrl(ti->flags);
  159. /* Sanitize the guest value */
  160. guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
  161. if (hostval != guestval) {
  162. unsigned long tif;
  163. tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
  164. ssbd_spec_ctrl_to_tif(hostval);
  165. speculative_store_bypass_update(tif);
  166. }
  167. }
  168. EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
  169. static void x86_amd_ssb_disable(void)
  170. {
  171. u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
  172. if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
  173. wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
  174. else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
  175. wrmsrl(MSR_AMD64_LS_CFG, msrval);
  176. }
  177. #ifdef RETPOLINE
  178. static bool spectre_v2_bad_module;
  179. bool retpoline_module_ok(bool has_retpoline)
  180. {
  181. if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
  182. return true;
  183. pr_err("System may be vulnerable to spectre v2\n");
  184. spectre_v2_bad_module = true;
  185. return false;
  186. }
  187. static inline const char *spectre_v2_module_string(void)
  188. {
  189. return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
  190. }
  191. #else
  192. static inline const char *spectre_v2_module_string(void) { return ""; }
  193. #endif
  194. static void __init spec2_print_if_insecure(const char *reason)
  195. {
  196. if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
  197. pr_info("%s selected on command line.\n", reason);
  198. }
  199. static void __init spec2_print_if_secure(const char *reason)
  200. {
  201. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
  202. pr_info("%s selected on command line.\n", reason);
  203. }
  204. static inline bool retp_compiler(void)
  205. {
  206. return __is_defined(RETPOLINE);
  207. }
  208. static inline bool match_option(const char *arg, int arglen, const char *opt)
  209. {
  210. int len = strlen(opt);
  211. return len == arglen && !strncmp(arg, opt, len);
  212. }
  213. static const struct {
  214. const char *option;
  215. enum spectre_v2_mitigation_cmd cmd;
  216. bool secure;
  217. } mitigation_options[] = {
  218. { "off", SPECTRE_V2_CMD_NONE, false },
  219. { "on", SPECTRE_V2_CMD_FORCE, true },
  220. { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
  221. { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
  222. { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
  223. { "auto", SPECTRE_V2_CMD_AUTO, false },
  224. };
  225. static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
  226. {
  227. char arg[20];
  228. int ret, i;
  229. enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
  230. if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
  231. return SPECTRE_V2_CMD_NONE;
  232. else {
  233. ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
  234. if (ret < 0)
  235. return SPECTRE_V2_CMD_AUTO;
  236. for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
  237. if (!match_option(arg, ret, mitigation_options[i].option))
  238. continue;
  239. cmd = mitigation_options[i].cmd;
  240. break;
  241. }
  242. if (i >= ARRAY_SIZE(mitigation_options)) {
  243. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  244. return SPECTRE_V2_CMD_AUTO;
  245. }
  246. }
  247. if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
  248. cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
  249. cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
  250. !IS_ENABLED(CONFIG_RETPOLINE)) {
  251. pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
  252. return SPECTRE_V2_CMD_AUTO;
  253. }
  254. if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
  255. boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
  256. pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
  257. return SPECTRE_V2_CMD_AUTO;
  258. }
  259. if (mitigation_options[i].secure)
  260. spec2_print_if_secure(mitigation_options[i].option);
  261. else
  262. spec2_print_if_insecure(mitigation_options[i].option);
  263. return cmd;
  264. }
  265. static void __init spectre_v2_select_mitigation(void)
  266. {
  267. enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
  268. enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
  269. /*
  270. * If the CPU is not affected and the command line mode is NONE or AUTO
  271. * then nothing to do.
  272. */
  273. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
  274. (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
  275. return;
  276. switch (cmd) {
  277. case SPECTRE_V2_CMD_NONE:
  278. return;
  279. case SPECTRE_V2_CMD_FORCE:
  280. case SPECTRE_V2_CMD_AUTO:
  281. if (IS_ENABLED(CONFIG_RETPOLINE))
  282. goto retpoline_auto;
  283. break;
  284. case SPECTRE_V2_CMD_RETPOLINE_AMD:
  285. if (IS_ENABLED(CONFIG_RETPOLINE))
  286. goto retpoline_amd;
  287. break;
  288. case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
  289. if (IS_ENABLED(CONFIG_RETPOLINE))
  290. goto retpoline_generic;
  291. break;
  292. case SPECTRE_V2_CMD_RETPOLINE:
  293. if (IS_ENABLED(CONFIG_RETPOLINE))
  294. goto retpoline_auto;
  295. break;
  296. }
  297. pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
  298. return;
  299. retpoline_auto:
  300. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  301. retpoline_amd:
  302. if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
  303. pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
  304. goto retpoline_generic;
  305. }
  306. mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
  307. SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
  308. setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
  309. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  310. } else {
  311. retpoline_generic:
  312. mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
  313. SPECTRE_V2_RETPOLINE_MINIMAL;
  314. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  315. }
  316. spectre_v2_enabled = mode;
  317. pr_info("%s\n", spectre_v2_strings[mode]);
  318. /*
  319. * If spectre v2 protection has been enabled, unconditionally fill
  320. * RSB during a context switch; this protects against two independent
  321. * issues:
  322. *
  323. * - RSB underflow (and switch to BTB) on Skylake+
  324. * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
  325. */
  326. setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
  327. pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
  328. /* Initialize Indirect Branch Prediction Barrier if supported */
  329. if (boot_cpu_has(X86_FEATURE_IBPB)) {
  330. setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
  331. pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
  332. }
  333. /*
  334. * Retpoline means the kernel is safe because it has no indirect
  335. * branches. But firmware isn't, so use IBRS to protect that.
  336. */
  337. if (boot_cpu_has(X86_FEATURE_IBRS)) {
  338. setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
  339. pr_info("Enabling Restricted Speculation for firmware calls\n");
  340. }
  341. }
  342. #undef pr_fmt
  343. #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
  344. static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
  345. /* The kernel command line selection */
  346. enum ssb_mitigation_cmd {
  347. SPEC_STORE_BYPASS_CMD_NONE,
  348. SPEC_STORE_BYPASS_CMD_AUTO,
  349. SPEC_STORE_BYPASS_CMD_ON,
  350. SPEC_STORE_BYPASS_CMD_PRCTL,
  351. SPEC_STORE_BYPASS_CMD_SECCOMP,
  352. };
  353. static const char *ssb_strings[] = {
  354. [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
  355. [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
  356. [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
  357. [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
  358. };
  359. static const struct {
  360. const char *option;
  361. enum ssb_mitigation_cmd cmd;
  362. } ssb_mitigation_options[] = {
  363. { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
  364. { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
  365. { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
  366. { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
  367. { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
  368. };
  369. static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
  370. {
  371. enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
  372. char arg[20];
  373. int ret, i;
  374. if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
  375. return SPEC_STORE_BYPASS_CMD_NONE;
  376. } else {
  377. ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
  378. arg, sizeof(arg));
  379. if (ret < 0)
  380. return SPEC_STORE_BYPASS_CMD_AUTO;
  381. for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
  382. if (!match_option(arg, ret, ssb_mitigation_options[i].option))
  383. continue;
  384. cmd = ssb_mitigation_options[i].cmd;
  385. break;
  386. }
  387. if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
  388. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  389. return SPEC_STORE_BYPASS_CMD_AUTO;
  390. }
  391. }
  392. return cmd;
  393. }
  394. static enum ssb_mitigation __init __ssb_select_mitigation(void)
  395. {
  396. enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
  397. enum ssb_mitigation_cmd cmd;
  398. if (!boot_cpu_has(X86_FEATURE_SSBD))
  399. return mode;
  400. cmd = ssb_parse_cmdline();
  401. if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
  402. (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
  403. cmd == SPEC_STORE_BYPASS_CMD_AUTO))
  404. return mode;
  405. switch (cmd) {
  406. case SPEC_STORE_BYPASS_CMD_AUTO:
  407. case SPEC_STORE_BYPASS_CMD_SECCOMP:
  408. /*
  409. * Choose prctl+seccomp as the default mode if seccomp is
  410. * enabled.
  411. */
  412. if (IS_ENABLED(CONFIG_SECCOMP))
  413. mode = SPEC_STORE_BYPASS_SECCOMP;
  414. else
  415. mode = SPEC_STORE_BYPASS_PRCTL;
  416. break;
  417. case SPEC_STORE_BYPASS_CMD_ON:
  418. mode = SPEC_STORE_BYPASS_DISABLE;
  419. break;
  420. case SPEC_STORE_BYPASS_CMD_PRCTL:
  421. mode = SPEC_STORE_BYPASS_PRCTL;
  422. break;
  423. case SPEC_STORE_BYPASS_CMD_NONE:
  424. break;
  425. }
  426. /*
  427. * We have three CPU feature flags that are in play here:
  428. * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
  429. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
  430. * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
  431. */
  432. if (mode == SPEC_STORE_BYPASS_DISABLE) {
  433. setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
  434. /*
  435. * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
  436. * a completely different MSR and bit dependent on family.
  437. */
  438. switch (boot_cpu_data.x86_vendor) {
  439. case X86_VENDOR_INTEL:
  440. x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
  441. x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
  442. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  443. break;
  444. case X86_VENDOR_AMD:
  445. x86_amd_ssb_disable();
  446. break;
  447. }
  448. }
  449. return mode;
  450. }
  451. static void ssb_select_mitigation(void)
  452. {
  453. ssb_mode = __ssb_select_mitigation();
  454. if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
  455. pr_info("%s\n", ssb_strings[ssb_mode]);
  456. }
  457. #undef pr_fmt
  458. #define pr_fmt(fmt) "Speculation prctl: " fmt
  459. static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
  460. {
  461. bool update;
  462. if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
  463. ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
  464. return -ENXIO;
  465. switch (ctrl) {
  466. case PR_SPEC_ENABLE:
  467. /* If speculation is force disabled, enable is not allowed */
  468. if (task_spec_ssb_force_disable(task))
  469. return -EPERM;
  470. task_clear_spec_ssb_disable(task);
  471. update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
  472. break;
  473. case PR_SPEC_DISABLE:
  474. task_set_spec_ssb_disable(task);
  475. update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
  476. break;
  477. case PR_SPEC_FORCE_DISABLE:
  478. task_set_spec_ssb_disable(task);
  479. task_set_spec_ssb_force_disable(task);
  480. update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
  481. break;
  482. default:
  483. return -ERANGE;
  484. }
  485. /*
  486. * If being set on non-current task, delay setting the CPU
  487. * mitigation until it is next scheduled.
  488. */
  489. if (task == current && update)
  490. speculative_store_bypass_update_current();
  491. return 0;
  492. }
  493. int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
  494. unsigned long ctrl)
  495. {
  496. switch (which) {
  497. case PR_SPEC_STORE_BYPASS:
  498. return ssb_prctl_set(task, ctrl);
  499. default:
  500. return -ENODEV;
  501. }
  502. }
  503. #ifdef CONFIG_SECCOMP
  504. void arch_seccomp_spec_mitigate(struct task_struct *task)
  505. {
  506. if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
  507. ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
  508. }
  509. #endif
  510. static int ssb_prctl_get(struct task_struct *task)
  511. {
  512. switch (ssb_mode) {
  513. case SPEC_STORE_BYPASS_DISABLE:
  514. return PR_SPEC_DISABLE;
  515. case SPEC_STORE_BYPASS_SECCOMP:
  516. case SPEC_STORE_BYPASS_PRCTL:
  517. if (task_spec_ssb_force_disable(task))
  518. return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
  519. if (task_spec_ssb_disable(task))
  520. return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
  521. return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
  522. default:
  523. if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
  524. return PR_SPEC_ENABLE;
  525. return PR_SPEC_NOT_AFFECTED;
  526. }
  527. }
  528. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  529. {
  530. switch (which) {
  531. case PR_SPEC_STORE_BYPASS:
  532. return ssb_prctl_get(task);
  533. default:
  534. return -ENODEV;
  535. }
  536. }
  537. void x86_spec_ctrl_setup_ap(void)
  538. {
  539. if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
  540. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  541. if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
  542. x86_amd_ssb_disable();
  543. }
  544. #undef pr_fmt
  545. #define pr_fmt(fmt) "L1TF: " fmt
  546. /*
  547. * These CPUs all support 44bits physical address space internally in the
  548. * cache but CPUID can report a smaller number of physical address bits.
  549. *
  550. * The L1TF mitigation uses the top most address bit for the inversion of
  551. * non present PTEs. When the installed memory reaches into the top most
  552. * address bit due to memory holes, which has been observed on machines
  553. * which report 36bits physical address bits and have 32G RAM installed,
  554. * then the mitigation range check in l1tf_select_mitigation() triggers.
  555. * This is a false positive because the mitigation is still possible due to
  556. * the fact that the cache uses 44bit internally. Use the cache bits
  557. * instead of the reported physical bits and adjust them on the affected
  558. * machines to 44bit if the reported bits are less than 44.
  559. */
  560. static void override_cache_bits(struct cpuinfo_x86 *c)
  561. {
  562. if (c->x86 != 6)
  563. return;
  564. switch (c->x86_model) {
  565. case INTEL_FAM6_NEHALEM:
  566. case INTEL_FAM6_WESTMERE:
  567. case INTEL_FAM6_SANDYBRIDGE:
  568. case INTEL_FAM6_IVYBRIDGE:
  569. case INTEL_FAM6_HASWELL_CORE:
  570. case INTEL_FAM6_HASWELL_ULT:
  571. case INTEL_FAM6_HASWELL_GT3E:
  572. case INTEL_FAM6_BROADWELL_CORE:
  573. case INTEL_FAM6_BROADWELL_GT3E:
  574. case INTEL_FAM6_SKYLAKE_MOBILE:
  575. case INTEL_FAM6_SKYLAKE_DESKTOP:
  576. case INTEL_FAM6_KABYLAKE_MOBILE:
  577. case INTEL_FAM6_KABYLAKE_DESKTOP:
  578. if (c->x86_cache_bits < 44)
  579. c->x86_cache_bits = 44;
  580. break;
  581. }
  582. }
  583. static void __init l1tf_select_mitigation(void)
  584. {
  585. u64 half_pa;
  586. if (!boot_cpu_has_bug(X86_BUG_L1TF))
  587. return;
  588. override_cache_bits(&boot_cpu_data);
  589. #if CONFIG_PGTABLE_LEVELS == 2
  590. pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
  591. return;
  592. #endif
  593. half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
  594. if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
  595. pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
  596. pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
  597. half_pa);
  598. pr_info("However, doing so will make a part of your RAM unusable.\n");
  599. pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
  600. return;
  601. }
  602. setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
  603. }
  604. #undef pr_fmt
  605. #ifdef CONFIG_SYSFS
  606. static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
  607. char *buf, unsigned int bug)
  608. {
  609. if (!boot_cpu_has_bug(bug))
  610. return sprintf(buf, "Not affected\n");
  611. switch (bug) {
  612. case X86_BUG_CPU_MELTDOWN:
  613. if (boot_cpu_has(X86_FEATURE_KAISER))
  614. return sprintf(buf, "Mitigation: PTI\n");
  615. break;
  616. case X86_BUG_SPECTRE_V1:
  617. return sprintf(buf, "Mitigation: __user pointer sanitization\n");
  618. case X86_BUG_SPECTRE_V2:
  619. return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
  620. boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
  621. boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
  622. spectre_v2_module_string());
  623. case X86_BUG_SPEC_STORE_BYPASS:
  624. return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
  625. case X86_BUG_L1TF:
  626. if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
  627. return sprintf(buf, "Mitigation: Page Table Inversion\n");
  628. break;
  629. default:
  630. break;
  631. }
  632. return sprintf(buf, "Vulnerable\n");
  633. }
  634. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  635. {
  636. return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
  637. }
  638. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  639. {
  640. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
  641. }
  642. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  643. {
  644. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
  645. }
  646. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  647. {
  648. return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
  649. }
  650. ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
  651. {
  652. return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
  653. }
  654. #endif