traps_64.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * arch/sh/kernel/traps_64.c
  3. *
  4. * Copyright (C) 2000, 2001 Paolo Alberelli
  5. * Copyright (C) 2003, 2004 Paul Mundt
  6. * Copyright (C) 2003, 2004 Richard Curnow
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/timer.h>
  18. #include <linux/mm.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/delay.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/kallsyms.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/module.h>
  27. #include <linux/perf_event.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. #include <asm/alignment.h>
  31. #include <asm/processor.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/fpu.h>
  34. static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
  35. {
  36. int get_user_error;
  37. unsigned long aligned_pc;
  38. insn_size_t opcode;
  39. if ((pc & 3) == 1) {
  40. /* SHmedia */
  41. aligned_pc = pc & ~3;
  42. if (from_user_mode) {
  43. if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
  44. get_user_error = -EFAULT;
  45. } else {
  46. get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  47. *result_opcode = opcode;
  48. }
  49. return get_user_error;
  50. } else {
  51. /* If the fault was in the kernel, we can either read
  52. * this directly, or if not, we fault.
  53. */
  54. *result_opcode = *(insn_size_t *)aligned_pc;
  55. return 0;
  56. }
  57. } else if ((pc & 1) == 0) {
  58. /* SHcompact */
  59. /* TODO : provide handling for this. We don't really support
  60. user-mode SHcompact yet, and for a kernel fault, this would
  61. have to come from a module built for SHcompact. */
  62. return -EFAULT;
  63. } else {
  64. /* misaligned */
  65. return -EFAULT;
  66. }
  67. }
  68. static int address_is_sign_extended(__u64 a)
  69. {
  70. __u64 b;
  71. #if (NEFF == 32)
  72. b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
  73. return (b == a) ? 1 : 0;
  74. #else
  75. #error "Sign extend check only works for NEFF==32"
  76. #endif
  77. }
  78. /* return -1 for fault, 0 for OK */
  79. static int generate_and_check_address(struct pt_regs *regs,
  80. insn_size_t opcode,
  81. int displacement_not_indexed,
  82. int width_shift,
  83. __u64 *address)
  84. {
  85. __u64 base_address, addr;
  86. int basereg;
  87. switch (1 << width_shift) {
  88. case 1: inc_unaligned_byte_access(); break;
  89. case 2: inc_unaligned_word_access(); break;
  90. case 4: inc_unaligned_dword_access(); break;
  91. case 8: inc_unaligned_multi_access(); break;
  92. }
  93. basereg = (opcode >> 20) & 0x3f;
  94. base_address = regs->regs[basereg];
  95. if (displacement_not_indexed) {
  96. __s64 displacement;
  97. displacement = (opcode >> 10) & 0x3ff;
  98. displacement = sign_extend64(displacement, 9);
  99. addr = (__u64)((__s64)base_address + (displacement << width_shift));
  100. } else {
  101. __u64 offset;
  102. int offsetreg;
  103. offsetreg = (opcode >> 10) & 0x3f;
  104. offset = regs->regs[offsetreg];
  105. addr = base_address + offset;
  106. }
  107. /* Check sign extended */
  108. if (!address_is_sign_extended(addr))
  109. return -1;
  110. /* Check accessible. For misaligned access in the kernel, assume the
  111. address is always accessible (and if not, just fault when the
  112. load/store gets done.) */
  113. if (user_mode(regs)) {
  114. inc_unaligned_user_access();
  115. if (addr >= TASK_SIZE)
  116. return -1;
  117. } else
  118. inc_unaligned_kernel_access();
  119. *address = addr;
  120. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
  121. unaligned_fixups_notify(current, opcode, regs);
  122. return 0;
  123. }
  124. static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
  125. {
  126. unsigned short x;
  127. unsigned char *p, *q;
  128. p = (unsigned char *) (int) address;
  129. q = (unsigned char *) &x;
  130. q[0] = p[0];
  131. q[1] = p[1];
  132. if (do_sign_extend) {
  133. *result = (__u64)(__s64) *(short *) &x;
  134. } else {
  135. *result = (__u64) x;
  136. }
  137. }
  138. static void misaligned_kernel_word_store(__u64 address, __u64 value)
  139. {
  140. unsigned short x;
  141. unsigned char *p, *q;
  142. p = (unsigned char *) (int) address;
  143. q = (unsigned char *) &x;
  144. x = (__u16) value;
  145. p[0] = q[0];
  146. p[1] = q[1];
  147. }
  148. static int misaligned_load(struct pt_regs *regs,
  149. insn_size_t opcode,
  150. int displacement_not_indexed,
  151. int width_shift,
  152. int do_sign_extend)
  153. {
  154. /* Return -1 for a fault, 0 for OK */
  155. int error;
  156. int destreg;
  157. __u64 address;
  158. error = generate_and_check_address(regs, opcode,
  159. displacement_not_indexed, width_shift, &address);
  160. if (error < 0)
  161. return error;
  162. destreg = (opcode >> 4) & 0x3f;
  163. if (user_mode(regs)) {
  164. __u64 buffer;
  165. if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
  166. return -1;
  167. }
  168. if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
  169. return -1; /* fault */
  170. }
  171. switch (width_shift) {
  172. case 1:
  173. if (do_sign_extend) {
  174. regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
  175. } else {
  176. regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
  177. }
  178. break;
  179. case 2:
  180. regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
  181. break;
  182. case 3:
  183. regs->regs[destreg] = buffer;
  184. break;
  185. default:
  186. printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
  187. width_shift, (unsigned long) regs->pc);
  188. break;
  189. }
  190. } else {
  191. /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
  192. __u64 lo, hi;
  193. switch (width_shift) {
  194. case 1:
  195. misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
  196. break;
  197. case 2:
  198. asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
  199. asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
  200. regs->regs[destreg] = lo | hi;
  201. break;
  202. case 3:
  203. asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
  204. asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
  205. regs->regs[destreg] = lo | hi;
  206. break;
  207. default:
  208. printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
  209. width_shift, (unsigned long) regs->pc);
  210. break;
  211. }
  212. }
  213. return 0;
  214. }
  215. static int misaligned_store(struct pt_regs *regs,
  216. insn_size_t opcode,
  217. int displacement_not_indexed,
  218. int width_shift)
  219. {
  220. /* Return -1 for a fault, 0 for OK */
  221. int error;
  222. int srcreg;
  223. __u64 address;
  224. error = generate_and_check_address(regs, opcode,
  225. displacement_not_indexed, width_shift, &address);
  226. if (error < 0)
  227. return error;
  228. srcreg = (opcode >> 4) & 0x3f;
  229. if (user_mode(regs)) {
  230. __u64 buffer;
  231. if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
  232. return -1;
  233. }
  234. switch (width_shift) {
  235. case 1:
  236. *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
  237. break;
  238. case 2:
  239. *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
  240. break;
  241. case 3:
  242. buffer = regs->regs[srcreg];
  243. break;
  244. default:
  245. printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
  246. width_shift, (unsigned long) regs->pc);
  247. break;
  248. }
  249. if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
  250. return -1; /* fault */
  251. }
  252. } else {
  253. /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
  254. __u64 val = regs->regs[srcreg];
  255. switch (width_shift) {
  256. case 1:
  257. misaligned_kernel_word_store(address, val);
  258. break;
  259. case 2:
  260. asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
  261. asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
  262. break;
  263. case 3:
  264. asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
  265. asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
  266. break;
  267. default:
  268. printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
  269. width_shift, (unsigned long) regs->pc);
  270. break;
  271. }
  272. }
  273. return 0;
  274. }
  275. /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
  276. error. */
  277. static int misaligned_fpu_load(struct pt_regs *regs,
  278. insn_size_t opcode,
  279. int displacement_not_indexed,
  280. int width_shift,
  281. int do_paired_load)
  282. {
  283. /* Return -1 for a fault, 0 for OK */
  284. int error;
  285. int destreg;
  286. __u64 address;
  287. error = generate_and_check_address(regs, opcode,
  288. displacement_not_indexed, width_shift, &address);
  289. if (error < 0)
  290. return error;
  291. destreg = (opcode >> 4) & 0x3f;
  292. if (user_mode(regs)) {
  293. __u64 buffer;
  294. __u32 buflo, bufhi;
  295. if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
  296. return -1;
  297. }
  298. if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
  299. return -1; /* fault */
  300. }
  301. /* 'current' may be the current owner of the FPU state, so
  302. context switch the registers into memory so they can be
  303. indexed by register number. */
  304. if (last_task_used_math == current) {
  305. enable_fpu();
  306. save_fpu(current);
  307. disable_fpu();
  308. last_task_used_math = NULL;
  309. regs->sr |= SR_FD;
  310. }
  311. buflo = *(__u32*) &buffer;
  312. bufhi = *(1 + (__u32*) &buffer);
  313. switch (width_shift) {
  314. case 2:
  315. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  316. break;
  317. case 3:
  318. if (do_paired_load) {
  319. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  320. current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
  321. } else {
  322. #if defined(CONFIG_CPU_LITTLE_ENDIAN)
  323. current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
  324. current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
  325. #else
  326. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  327. current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
  328. #endif
  329. }
  330. break;
  331. default:
  332. printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
  333. width_shift, (unsigned long) regs->pc);
  334. break;
  335. }
  336. return 0;
  337. } else {
  338. die ("Misaligned FPU load inside kernel", regs, 0);
  339. return -1;
  340. }
  341. }
  342. static int misaligned_fpu_store(struct pt_regs *regs,
  343. insn_size_t opcode,
  344. int displacement_not_indexed,
  345. int width_shift,
  346. int do_paired_load)
  347. {
  348. /* Return -1 for a fault, 0 for OK */
  349. int error;
  350. int srcreg;
  351. __u64 address;
  352. error = generate_and_check_address(regs, opcode,
  353. displacement_not_indexed, width_shift, &address);
  354. if (error < 0)
  355. return error;
  356. srcreg = (opcode >> 4) & 0x3f;
  357. if (user_mode(regs)) {
  358. __u64 buffer;
  359. /* Initialise these to NaNs. */
  360. __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
  361. if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
  362. return -1;
  363. }
  364. /* 'current' may be the current owner of the FPU state, so
  365. context switch the registers into memory so they can be
  366. indexed by register number. */
  367. if (last_task_used_math == current) {
  368. enable_fpu();
  369. save_fpu(current);
  370. disable_fpu();
  371. last_task_used_math = NULL;
  372. regs->sr |= SR_FD;
  373. }
  374. switch (width_shift) {
  375. case 2:
  376. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  377. break;
  378. case 3:
  379. if (do_paired_load) {
  380. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  381. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  382. } else {
  383. #if defined(CONFIG_CPU_LITTLE_ENDIAN)
  384. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
  385. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  386. #else
  387. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  388. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  389. #endif
  390. }
  391. break;
  392. default:
  393. printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
  394. width_shift, (unsigned long) regs->pc);
  395. break;
  396. }
  397. *(__u32*) &buffer = buflo;
  398. *(1 + (__u32*) &buffer) = bufhi;
  399. if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
  400. return -1; /* fault */
  401. }
  402. return 0;
  403. } else {
  404. die ("Misaligned FPU load inside kernel", regs, 0);
  405. return -1;
  406. }
  407. }
  408. static int misaligned_fixup(struct pt_regs *regs)
  409. {
  410. insn_size_t opcode;
  411. int error;
  412. int major, minor;
  413. unsigned int user_action;
  414. user_action = unaligned_user_action();
  415. if (!(user_action & UM_FIXUP))
  416. return -1;
  417. error = read_opcode(regs->pc, &opcode, user_mode(regs));
  418. if (error < 0) {
  419. return error;
  420. }
  421. major = (opcode >> 26) & 0x3f;
  422. minor = (opcode >> 16) & 0xf;
  423. switch (major) {
  424. case (0x84>>2): /* LD.W */
  425. error = misaligned_load(regs, opcode, 1, 1, 1);
  426. break;
  427. case (0xb0>>2): /* LD.UW */
  428. error = misaligned_load(regs, opcode, 1, 1, 0);
  429. break;
  430. case (0x88>>2): /* LD.L */
  431. error = misaligned_load(regs, opcode, 1, 2, 1);
  432. break;
  433. case (0x8c>>2): /* LD.Q */
  434. error = misaligned_load(regs, opcode, 1, 3, 0);
  435. break;
  436. case (0xa4>>2): /* ST.W */
  437. error = misaligned_store(regs, opcode, 1, 1);
  438. break;
  439. case (0xa8>>2): /* ST.L */
  440. error = misaligned_store(regs, opcode, 1, 2);
  441. break;
  442. case (0xac>>2): /* ST.Q */
  443. error = misaligned_store(regs, opcode, 1, 3);
  444. break;
  445. case (0x40>>2): /* indexed loads */
  446. switch (minor) {
  447. case 0x1: /* LDX.W */
  448. error = misaligned_load(regs, opcode, 0, 1, 1);
  449. break;
  450. case 0x5: /* LDX.UW */
  451. error = misaligned_load(regs, opcode, 0, 1, 0);
  452. break;
  453. case 0x2: /* LDX.L */
  454. error = misaligned_load(regs, opcode, 0, 2, 1);
  455. break;
  456. case 0x3: /* LDX.Q */
  457. error = misaligned_load(regs, opcode, 0, 3, 0);
  458. break;
  459. default:
  460. error = -1;
  461. break;
  462. }
  463. break;
  464. case (0x60>>2): /* indexed stores */
  465. switch (minor) {
  466. case 0x1: /* STX.W */
  467. error = misaligned_store(regs, opcode, 0, 1);
  468. break;
  469. case 0x2: /* STX.L */
  470. error = misaligned_store(regs, opcode, 0, 2);
  471. break;
  472. case 0x3: /* STX.Q */
  473. error = misaligned_store(regs, opcode, 0, 3);
  474. break;
  475. default:
  476. error = -1;
  477. break;
  478. }
  479. break;
  480. case (0x94>>2): /* FLD.S */
  481. error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
  482. break;
  483. case (0x98>>2): /* FLD.P */
  484. error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
  485. break;
  486. case (0x9c>>2): /* FLD.D */
  487. error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
  488. break;
  489. case (0x1c>>2): /* floating indexed loads */
  490. switch (minor) {
  491. case 0x8: /* FLDX.S */
  492. error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
  493. break;
  494. case 0xd: /* FLDX.P */
  495. error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
  496. break;
  497. case 0x9: /* FLDX.D */
  498. error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
  499. break;
  500. default:
  501. error = -1;
  502. break;
  503. }
  504. break;
  505. case (0xb4>>2): /* FLD.S */
  506. error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
  507. break;
  508. case (0xb8>>2): /* FLD.P */
  509. error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
  510. break;
  511. case (0xbc>>2): /* FLD.D */
  512. error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
  513. break;
  514. case (0x3c>>2): /* floating indexed stores */
  515. switch (minor) {
  516. case 0x8: /* FSTX.S */
  517. error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
  518. break;
  519. case 0xd: /* FSTX.P */
  520. error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
  521. break;
  522. case 0x9: /* FSTX.D */
  523. error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
  524. break;
  525. default:
  526. error = -1;
  527. break;
  528. }
  529. break;
  530. default:
  531. /* Fault */
  532. error = -1;
  533. break;
  534. }
  535. if (error < 0) {
  536. return error;
  537. } else {
  538. regs->pc += 4; /* Skip the instruction that's just been emulated */
  539. return 0;
  540. }
  541. }
  542. static void do_unhandled_exception(int signr, char *str, unsigned long error,
  543. struct pt_regs *regs)
  544. {
  545. if (user_mode(regs))
  546. force_sig(signr, current);
  547. die_if_no_fixup(str, regs, error);
  548. }
  549. #define DO_ERROR(signr, str, name) \
  550. asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
  551. { \
  552. do_unhandled_exception(signr, str, error_code, regs); \
  553. }
  554. DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
  555. DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
  556. #if defined(CONFIG_SH64_ID2815_WORKAROUND)
  557. #define OPCODE_INVALID 0
  558. #define OPCODE_USER_VALID 1
  559. #define OPCODE_PRIV_VALID 2
  560. /* getcon/putcon - requires checking which control register is referenced. */
  561. #define OPCODE_CTRL_REG 3
  562. /* Table of valid opcodes for SHmedia mode.
  563. Form a 10-bit value by concatenating the major/minor opcodes i.e.
  564. opcode[31:26,20:16]. The 6 MSBs of this value index into the following
  565. array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
  566. LSBs==4'b0000 etc). */
  567. static unsigned long shmedia_opcode_table[64] = {
  568. 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
  569. 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
  570. 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
  571. 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
  572. 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  573. 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  574. 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  575. 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
  576. };
  577. /* Workaround SH5-101 cut2 silicon defect #2815 :
  578. in some situations, inter-mode branches from SHcompact -> SHmedia
  579. which should take ITLBMISS or EXECPROT exceptions at the target
  580. falsely take RESINST at the target instead. */
  581. void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
  582. {
  583. insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
  584. unsigned long pc, aligned_pc;
  585. unsigned long index, shift;
  586. unsigned long major, minor, combined;
  587. unsigned long reserved_field;
  588. int opcode_state;
  589. int get_user_error;
  590. int signr = SIGILL;
  591. char *exception_name = "reserved_instruction";
  592. pc = regs->pc;
  593. /* SHcompact is not handled */
  594. if (unlikely((pc & 3) == 0))
  595. goto out;
  596. /* SHmedia : check for defect. This requires executable vmas
  597. to be readable too. */
  598. aligned_pc = pc & ~3;
  599. if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
  600. get_user_error = -EFAULT;
  601. else
  602. get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  603. if (get_user_error < 0) {
  604. /*
  605. * Error trying to read opcode. This typically means a
  606. * real fault, not a RESINST any more. So change the
  607. * codes.
  608. */
  609. exception_name = "address error (exec)";
  610. signr = SIGSEGV;
  611. goto out;
  612. }
  613. /* These bits are currently reserved as zero in all valid opcodes */
  614. reserved_field = opcode & 0xf;
  615. if (unlikely(reserved_field))
  616. goto out; /* invalid opcode */
  617. major = (opcode >> 26) & 0x3f;
  618. minor = (opcode >> 16) & 0xf;
  619. combined = (major << 4) | minor;
  620. index = major;
  621. shift = minor << 1;
  622. opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
  623. switch (opcode_state) {
  624. case OPCODE_INVALID:
  625. /* Trap. */
  626. break;
  627. case OPCODE_USER_VALID:
  628. /*
  629. * Restart the instruction: the branch to the instruction
  630. * will now be from an RTE not from SHcompact so the
  631. * silicon defect won't be triggered.
  632. */
  633. return;
  634. case OPCODE_PRIV_VALID:
  635. if (!user_mode(regs)) {
  636. /*
  637. * Should only ever get here if a module has
  638. * SHcompact code inside it. If so, the same fix
  639. * up is needed.
  640. */
  641. return; /* same reason */
  642. }
  643. /*
  644. * Otherwise, user mode trying to execute a privileged
  645. * instruction - fall through to trap.
  646. */
  647. break;
  648. case OPCODE_CTRL_REG:
  649. /* If in privileged mode, return as above. */
  650. if (!user_mode(regs))
  651. return;
  652. /* In user mode ... */
  653. if (combined == 0x9f) { /* GETCON */
  654. unsigned long regno = (opcode >> 20) & 0x3f;
  655. if (regno >= 62)
  656. return;
  657. /* reserved/privileged control register => trap */
  658. } else if (combined == 0x1bf) { /* PUTCON */
  659. unsigned long regno = (opcode >> 4) & 0x3f;
  660. if (regno >= 62)
  661. return;
  662. /* reserved/privileged control register => trap */
  663. }
  664. break;
  665. default:
  666. /* Fall through to trap. */
  667. break;
  668. }
  669. out:
  670. do_unhandled_exception(signr, exception_name, error_code, regs);
  671. }
  672. #else /* CONFIG_SH64_ID2815_WORKAROUND */
  673. /* If the workaround isn't needed, this is just a straightforward reserved
  674. instruction */
  675. DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
  676. #endif /* CONFIG_SH64_ID2815_WORKAROUND */
  677. /* Called with interrupts disabled */
  678. asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
  679. {
  680. die_if_kernel("exception", regs, ex);
  681. }
  682. asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
  683. {
  684. /* Syscall debug */
  685. printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
  686. die_if_kernel("unknown trapa", regs, scId);
  687. return -ENOSYS;
  688. }
  689. /* Implement misaligned load/store handling for kernel (and optionally for user
  690. mode too). Limitation : only SHmedia mode code is handled - there is no
  691. handling at all for misaligned accesses occurring in SHcompact code yet. */
  692. asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
  693. {
  694. if (misaligned_fixup(regs) < 0)
  695. do_unhandled_exception(SIGSEGV, "address error(load)",
  696. error_code, regs);
  697. }
  698. asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
  699. {
  700. if (misaligned_fixup(regs) < 0)
  701. do_unhandled_exception(SIGSEGV, "address error(store)",
  702. error_code, regs);
  703. }
  704. asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
  705. {
  706. u64 peek_real_address_q(u64 addr);
  707. u64 poke_real_address_q(u64 addr, u64 val);
  708. unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
  709. unsigned long long exp_cause;
  710. /* It's not worth ioremapping the debug module registers for the amount
  711. of access we make to them - just go direct to their physical
  712. addresses. */
  713. exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
  714. if (exp_cause & ~4)
  715. printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
  716. (unsigned long)(exp_cause & 0xffffffff));
  717. show_state();
  718. /* Clear all DEBUGINT causes */
  719. poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
  720. }
  721. void per_cpu_trap_init(void)
  722. {
  723. /* Nothing to do for now, VBR initialization later. */
  724. }