feature-fixups.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/init.h>
  18. #include <asm/cputable.h>
  19. #include <asm/code-patching.h>
  20. #include <asm/page.h>
  21. #include <asm/sections.h>
  22. #include <asm/setup.h>
  23. struct fixup_entry {
  24. unsigned long mask;
  25. unsigned long value;
  26. long start_off;
  27. long end_off;
  28. long alt_start_off;
  29. long alt_end_off;
  30. };
  31. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  32. {
  33. /*
  34. * We store the offset to the code as a negative offset from
  35. * the start of the alt_entry, to support the VDSO. This
  36. * routine converts that back into an actual address.
  37. */
  38. return (unsigned int *)((unsigned long)fcur + offset);
  39. }
  40. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  41. unsigned int *alt_start, unsigned int *alt_end)
  42. {
  43. unsigned int instr;
  44. instr = *src;
  45. if (instr_is_relative_branch(*src)) {
  46. unsigned int *target = (unsigned int *)branch_target(src);
  47. /* Branch within the section doesn't need translating */
  48. if (target < alt_start || target > alt_end) {
  49. instr = translate_branch(dest, src);
  50. if (!instr)
  51. return 1;
  52. }
  53. }
  54. patch_instruction(dest, instr);
  55. return 0;
  56. }
  57. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  58. {
  59. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  60. start = calc_addr(fcur, fcur->start_off);
  61. end = calc_addr(fcur, fcur->end_off);
  62. alt_start = calc_addr(fcur, fcur->alt_start_off);
  63. alt_end = calc_addr(fcur, fcur->alt_end_off);
  64. if ((alt_end - alt_start) > (end - start))
  65. return 1;
  66. if ((value & fcur->mask) == fcur->value)
  67. return 0;
  68. src = alt_start;
  69. dest = start;
  70. for (; src < alt_end; src++, dest++) {
  71. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  72. return 1;
  73. }
  74. for (; dest < end; dest++)
  75. patch_instruction(dest, PPC_INST_NOP);
  76. return 0;
  77. }
  78. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  79. {
  80. struct fixup_entry *fcur, *fend;
  81. fcur = fixup_start;
  82. fend = fixup_end;
  83. for (; fcur < fend; fcur++) {
  84. if (patch_feature_section(value, fcur)) {
  85. WARN_ON(1);
  86. printk("Unable to patch feature section at %p - %p" \
  87. " with %p - %p\n",
  88. calc_addr(fcur, fcur->start_off),
  89. calc_addr(fcur, fcur->end_off),
  90. calc_addr(fcur, fcur->alt_start_off),
  91. calc_addr(fcur, fcur->alt_end_off));
  92. }
  93. }
  94. }
  95. #ifdef CONFIG_PPC_BOOK3S_64
  96. void do_rfi_flush_fixups(enum l1d_flush_type types)
  97. {
  98. unsigned int instrs[3], *dest;
  99. long *start, *end;
  100. int i;
  101. start = PTRRELOC(&__start___rfi_flush_fixup),
  102. end = PTRRELOC(&__stop___rfi_flush_fixup);
  103. instrs[0] = 0x60000000; /* nop */
  104. instrs[1] = 0x60000000; /* nop */
  105. instrs[2] = 0x60000000; /* nop */
  106. if (types & L1D_FLUSH_FALLBACK)
  107. /* b .+16 to fallback flush */
  108. instrs[0] = 0x48000010;
  109. i = 0;
  110. if (types & L1D_FLUSH_ORI) {
  111. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  112. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  113. }
  114. if (types & L1D_FLUSH_MTTRIG)
  115. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  116. for (i = 0; start < end; start++, i++) {
  117. dest = (void *)start + *start;
  118. pr_devel("patching dest %lx\n", (unsigned long)dest);
  119. patch_instruction(dest, instrs[0]);
  120. patch_instruction(dest + 1, instrs[1]);
  121. patch_instruction(dest + 2, instrs[2]);
  122. }
  123. printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
  124. }
  125. #endif /* CONFIG_PPC_BOOK3S_64 */
  126. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  127. {
  128. long *start, *end;
  129. unsigned int *dest;
  130. if (!(value & CPU_FTR_LWSYNC))
  131. return ;
  132. start = fixup_start;
  133. end = fixup_end;
  134. for (; start < end; start++) {
  135. dest = (void *)start + *start;
  136. patch_instruction(dest, PPC_INST_LWSYNC);
  137. }
  138. }
  139. void do_final_fixups(void)
  140. {
  141. #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
  142. int *src, *dest;
  143. unsigned long length;
  144. if (PHYSICAL_START == 0)
  145. return;
  146. src = (int *)(KERNELBASE + PHYSICAL_START);
  147. dest = (int *)KERNELBASE;
  148. length = (__end_interrupts - _stext) / sizeof(int);
  149. while (length--) {
  150. patch_instruction(dest, *src);
  151. src++;
  152. dest++;
  153. }
  154. #endif
  155. }
  156. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  157. #define check(x) \
  158. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  159. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  160. static struct fixup_entry fixup;
  161. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  162. {
  163. return (unsigned long)p - (unsigned long)entry;
  164. }
  165. static void test_basic_patching(void)
  166. {
  167. extern unsigned int ftr_fixup_test1;
  168. extern unsigned int end_ftr_fixup_test1;
  169. extern unsigned int ftr_fixup_test1_orig;
  170. extern unsigned int ftr_fixup_test1_expected;
  171. int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
  172. fixup.value = fixup.mask = 8;
  173. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
  174. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
  175. fixup.alt_start_off = fixup.alt_end_off = 0;
  176. /* Sanity check */
  177. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  178. /* Check we don't patch if the value matches */
  179. patch_feature_section(8, &fixup);
  180. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  181. /* Check we do patch if the value doesn't match */
  182. patch_feature_section(0, &fixup);
  183. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  184. /* Check we do patch if the mask doesn't match */
  185. memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
  186. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  187. patch_feature_section(~8, &fixup);
  188. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  189. }
  190. static void test_alternative_patching(void)
  191. {
  192. extern unsigned int ftr_fixup_test2;
  193. extern unsigned int end_ftr_fixup_test2;
  194. extern unsigned int ftr_fixup_test2_orig;
  195. extern unsigned int ftr_fixup_test2_alt;
  196. extern unsigned int ftr_fixup_test2_expected;
  197. int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
  198. fixup.value = fixup.mask = 0xF;
  199. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
  200. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
  201. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
  202. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
  203. /* Sanity check */
  204. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  205. /* Check we don't patch if the value matches */
  206. patch_feature_section(0xF, &fixup);
  207. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  208. /* Check we do patch if the value doesn't match */
  209. patch_feature_section(0, &fixup);
  210. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  211. /* Check we do patch if the mask doesn't match */
  212. memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
  213. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  214. patch_feature_section(~0xF, &fixup);
  215. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  216. }
  217. static void test_alternative_case_too_big(void)
  218. {
  219. extern unsigned int ftr_fixup_test3;
  220. extern unsigned int end_ftr_fixup_test3;
  221. extern unsigned int ftr_fixup_test3_orig;
  222. extern unsigned int ftr_fixup_test3_alt;
  223. int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
  224. fixup.value = fixup.mask = 0xC;
  225. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
  226. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
  227. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
  228. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
  229. /* Sanity check */
  230. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  231. /* Expect nothing to be patched, and the error returned to us */
  232. check(patch_feature_section(0xF, &fixup) == 1);
  233. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  234. check(patch_feature_section(0, &fixup) == 1);
  235. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  236. check(patch_feature_section(~0xF, &fixup) == 1);
  237. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  238. }
  239. static void test_alternative_case_too_small(void)
  240. {
  241. extern unsigned int ftr_fixup_test4;
  242. extern unsigned int end_ftr_fixup_test4;
  243. extern unsigned int ftr_fixup_test4_orig;
  244. extern unsigned int ftr_fixup_test4_alt;
  245. extern unsigned int ftr_fixup_test4_expected;
  246. int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
  247. unsigned long flag;
  248. /* Check a high-bit flag */
  249. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  250. fixup.value = fixup.mask = flag;
  251. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
  252. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
  253. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
  254. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
  255. /* Sanity check */
  256. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  257. /* Check we don't patch if the value matches */
  258. patch_feature_section(flag, &fixup);
  259. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  260. /* Check we do patch if the value doesn't match */
  261. patch_feature_section(0, &fixup);
  262. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  263. /* Check we do patch if the mask doesn't match */
  264. memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
  265. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  266. patch_feature_section(~flag, &fixup);
  267. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  268. }
  269. static void test_alternative_case_with_branch(void)
  270. {
  271. extern unsigned int ftr_fixup_test5;
  272. extern unsigned int end_ftr_fixup_test5;
  273. extern unsigned int ftr_fixup_test5_expected;
  274. int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
  275. check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
  276. }
  277. static void test_alternative_case_with_external_branch(void)
  278. {
  279. extern unsigned int ftr_fixup_test6;
  280. extern unsigned int end_ftr_fixup_test6;
  281. extern unsigned int ftr_fixup_test6_expected;
  282. int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
  283. check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
  284. }
  285. static void test_cpu_macros(void)
  286. {
  287. extern u8 ftr_fixup_test_FTR_macros;
  288. extern u8 ftr_fixup_test_FTR_macros_expected;
  289. unsigned long size = &ftr_fixup_test_FTR_macros_expected -
  290. &ftr_fixup_test_FTR_macros;
  291. /* The fixups have already been done for us during boot */
  292. check(memcmp(&ftr_fixup_test_FTR_macros,
  293. &ftr_fixup_test_FTR_macros_expected, size) == 0);
  294. }
  295. static void test_fw_macros(void)
  296. {
  297. #ifdef CONFIG_PPC64
  298. extern u8 ftr_fixup_test_FW_FTR_macros;
  299. extern u8 ftr_fixup_test_FW_FTR_macros_expected;
  300. unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
  301. &ftr_fixup_test_FW_FTR_macros;
  302. /* The fixups have already been done for us during boot */
  303. check(memcmp(&ftr_fixup_test_FW_FTR_macros,
  304. &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  305. #endif
  306. }
  307. static void test_lwsync_macros(void)
  308. {
  309. extern u8 lwsync_fixup_test;
  310. extern u8 end_lwsync_fixup_test;
  311. extern u8 lwsync_fixup_test_expected_LWSYNC;
  312. extern u8 lwsync_fixup_test_expected_SYNC;
  313. unsigned long size = &end_lwsync_fixup_test -
  314. &lwsync_fixup_test;
  315. /* The fixups have already been done for us during boot */
  316. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  317. check(memcmp(&lwsync_fixup_test,
  318. &lwsync_fixup_test_expected_LWSYNC, size) == 0);
  319. } else {
  320. check(memcmp(&lwsync_fixup_test,
  321. &lwsync_fixup_test_expected_SYNC, size) == 0);
  322. }
  323. }
  324. static int __init test_feature_fixups(void)
  325. {
  326. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  327. test_basic_patching();
  328. test_alternative_patching();
  329. test_alternative_case_too_big();
  330. test_alternative_case_too_small();
  331. test_alternative_case_with_branch();
  332. test_alternative_case_with_external_branch();
  333. test_cpu_macros();
  334. test_fw_macros();
  335. test_lwsync_macros();
  336. return 0;
  337. }
  338. late_initcall(test_feature_fixups);
  339. #endif /* CONFIG_FTR_FIXUP_SELFTEST */