ptrace.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/audit.h>
  22. #include <linux/compat.h>
  23. #include <linux/kernel.h>
  24. #include <linux/sched.h>
  25. #include <linux/mm.h>
  26. #include <linux/smp.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/user.h>
  29. #include <linux/seccomp.h>
  30. #include <linux/security.h>
  31. #include <linux/init.h>
  32. #include <linux/signal.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/perf_event.h>
  35. #include <linux/hw_breakpoint.h>
  36. #include <linux/regset.h>
  37. #include <linux/tracehook.h>
  38. #include <linux/elf.h>
  39. #include <asm/compat.h>
  40. #include <asm/cpufeature.h>
  41. #include <asm/debug-monitors.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/syscall.h>
  44. #include <asm/traps.h>
  45. #include <asm/system_misc.h>
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/syscalls.h>
  48. /*
  49. * TODO: does not yet catch signals sent when the child dies.
  50. * in exit.c or in signal.c.
  51. */
  52. /*
  53. * Called by kernel/ptrace.c when detaching..
  54. */
  55. void ptrace_disable(struct task_struct *child)
  56. {
  57. /*
  58. * This would be better off in core code, but PTRACE_DETACH has
  59. * grown its fair share of arch-specific worts and changing it
  60. * is likely to cause regressions on obscure architectures.
  61. */
  62. user_disable_single_step(child);
  63. }
  64. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  65. /*
  66. * Handle hitting a HW-breakpoint.
  67. */
  68. static void ptrace_hbptriggered(struct perf_event *bp,
  69. struct perf_sample_data *data,
  70. struct pt_regs *regs)
  71. {
  72. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  73. siginfo_t info = {
  74. .si_signo = SIGTRAP,
  75. .si_errno = 0,
  76. .si_code = TRAP_HWBKPT,
  77. .si_addr = (void __user *)(bkpt->trigger),
  78. };
  79. #ifdef CONFIG_COMPAT
  80. int i;
  81. if (!is_compat_task())
  82. goto send_sig;
  83. for (i = 0; i < ARM_MAX_BRP; ++i) {
  84. if (current->thread.debug.hbp_break[i] == bp) {
  85. info.si_errno = (i << 1) + 1;
  86. break;
  87. }
  88. }
  89. for (i = 0; i < ARM_MAX_WRP; ++i) {
  90. if (current->thread.debug.hbp_watch[i] == bp) {
  91. info.si_errno = -((i << 1) + 1);
  92. break;
  93. }
  94. }
  95. send_sig:
  96. #endif
  97. force_sig_info(SIGTRAP, &info, current);
  98. }
  99. /*
  100. * Unregister breakpoints from this task and reset the pointers in
  101. * the thread_struct.
  102. */
  103. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  104. {
  105. int i;
  106. struct thread_struct *t = &tsk->thread;
  107. for (i = 0; i < ARM_MAX_BRP; i++) {
  108. if (t->debug.hbp_break[i]) {
  109. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  110. t->debug.hbp_break[i] = NULL;
  111. }
  112. }
  113. for (i = 0; i < ARM_MAX_WRP; i++) {
  114. if (t->debug.hbp_watch[i]) {
  115. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  116. t->debug.hbp_watch[i] = NULL;
  117. }
  118. }
  119. }
  120. void ptrace_hw_copy_thread(struct task_struct *tsk)
  121. {
  122. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  123. }
  124. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  125. struct task_struct *tsk,
  126. unsigned long idx)
  127. {
  128. struct perf_event *bp = ERR_PTR(-EINVAL);
  129. switch (note_type) {
  130. case NT_ARM_HW_BREAK:
  131. if (idx < ARM_MAX_BRP)
  132. bp = tsk->thread.debug.hbp_break[idx];
  133. break;
  134. case NT_ARM_HW_WATCH:
  135. if (idx < ARM_MAX_WRP)
  136. bp = tsk->thread.debug.hbp_watch[idx];
  137. break;
  138. }
  139. return bp;
  140. }
  141. static int ptrace_hbp_set_event(unsigned int note_type,
  142. struct task_struct *tsk,
  143. unsigned long idx,
  144. struct perf_event *bp)
  145. {
  146. int err = -EINVAL;
  147. switch (note_type) {
  148. case NT_ARM_HW_BREAK:
  149. if (idx < ARM_MAX_BRP) {
  150. tsk->thread.debug.hbp_break[idx] = bp;
  151. err = 0;
  152. }
  153. break;
  154. case NT_ARM_HW_WATCH:
  155. if (idx < ARM_MAX_WRP) {
  156. tsk->thread.debug.hbp_watch[idx] = bp;
  157. err = 0;
  158. }
  159. break;
  160. }
  161. return err;
  162. }
  163. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  164. struct task_struct *tsk,
  165. unsigned long idx)
  166. {
  167. struct perf_event *bp;
  168. struct perf_event_attr attr;
  169. int err, type;
  170. switch (note_type) {
  171. case NT_ARM_HW_BREAK:
  172. type = HW_BREAKPOINT_X;
  173. break;
  174. case NT_ARM_HW_WATCH:
  175. type = HW_BREAKPOINT_RW;
  176. break;
  177. default:
  178. return ERR_PTR(-EINVAL);
  179. }
  180. ptrace_breakpoint_init(&attr);
  181. /*
  182. * Initialise fields to sane defaults
  183. * (i.e. values that will pass validation).
  184. */
  185. attr.bp_addr = 0;
  186. attr.bp_len = HW_BREAKPOINT_LEN_4;
  187. attr.bp_type = type;
  188. attr.disabled = 1;
  189. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  190. if (IS_ERR(bp))
  191. return bp;
  192. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  193. if (err)
  194. return ERR_PTR(err);
  195. return bp;
  196. }
  197. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  198. struct arch_hw_breakpoint_ctrl ctrl,
  199. struct perf_event_attr *attr)
  200. {
  201. int err, len, type, disabled = !ctrl.enabled;
  202. attr->disabled = disabled;
  203. if (disabled)
  204. return 0;
  205. err = arch_bp_generic_fields(ctrl, &len, &type);
  206. if (err)
  207. return err;
  208. switch (note_type) {
  209. case NT_ARM_HW_BREAK:
  210. if ((type & HW_BREAKPOINT_X) != type)
  211. return -EINVAL;
  212. break;
  213. case NT_ARM_HW_WATCH:
  214. if ((type & HW_BREAKPOINT_RW) != type)
  215. return -EINVAL;
  216. break;
  217. default:
  218. return -EINVAL;
  219. }
  220. attr->bp_len = len;
  221. attr->bp_type = type;
  222. return 0;
  223. }
  224. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  225. {
  226. u8 num;
  227. u32 reg = 0;
  228. switch (note_type) {
  229. case NT_ARM_HW_BREAK:
  230. num = hw_breakpoint_slots(TYPE_INST);
  231. break;
  232. case NT_ARM_HW_WATCH:
  233. num = hw_breakpoint_slots(TYPE_DATA);
  234. break;
  235. default:
  236. return -EINVAL;
  237. }
  238. reg |= debug_monitors_arch();
  239. reg <<= 8;
  240. reg |= num;
  241. *info = reg;
  242. return 0;
  243. }
  244. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  245. struct task_struct *tsk,
  246. unsigned long idx,
  247. u32 *ctrl)
  248. {
  249. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  250. if (IS_ERR(bp))
  251. return PTR_ERR(bp);
  252. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  253. return 0;
  254. }
  255. static int ptrace_hbp_get_addr(unsigned int note_type,
  256. struct task_struct *tsk,
  257. unsigned long idx,
  258. u64 *addr)
  259. {
  260. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  261. if (IS_ERR(bp))
  262. return PTR_ERR(bp);
  263. *addr = bp ? bp->attr.bp_addr : 0;
  264. return 0;
  265. }
  266. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  267. struct task_struct *tsk,
  268. unsigned long idx)
  269. {
  270. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  271. if (!bp)
  272. bp = ptrace_hbp_create(note_type, tsk, idx);
  273. return bp;
  274. }
  275. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  276. struct task_struct *tsk,
  277. unsigned long idx,
  278. u32 uctrl)
  279. {
  280. int err;
  281. struct perf_event *bp;
  282. struct perf_event_attr attr;
  283. struct arch_hw_breakpoint_ctrl ctrl;
  284. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  285. if (IS_ERR(bp)) {
  286. err = PTR_ERR(bp);
  287. return err;
  288. }
  289. attr = bp->attr;
  290. decode_ctrl_reg(uctrl, &ctrl);
  291. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  292. if (err)
  293. return err;
  294. return modify_user_hw_breakpoint(bp, &attr);
  295. }
  296. static int ptrace_hbp_set_addr(unsigned int note_type,
  297. struct task_struct *tsk,
  298. unsigned long idx,
  299. u64 addr)
  300. {
  301. int err;
  302. struct perf_event *bp;
  303. struct perf_event_attr attr;
  304. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  305. if (IS_ERR(bp)) {
  306. err = PTR_ERR(bp);
  307. return err;
  308. }
  309. attr = bp->attr;
  310. attr.bp_addr = addr;
  311. err = modify_user_hw_breakpoint(bp, &attr);
  312. return err;
  313. }
  314. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  315. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  316. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  317. static int hw_break_get(struct task_struct *target,
  318. const struct user_regset *regset,
  319. unsigned int pos, unsigned int count,
  320. void *kbuf, void __user *ubuf)
  321. {
  322. unsigned int note_type = regset->core_note_type;
  323. int ret, idx = 0, offset, limit;
  324. u32 info, ctrl;
  325. u64 addr;
  326. /* Resource info */
  327. ret = ptrace_hbp_get_resource_info(note_type, &info);
  328. if (ret)
  329. return ret;
  330. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  331. sizeof(info));
  332. if (ret)
  333. return ret;
  334. /* Pad */
  335. offset = offsetof(struct user_hwdebug_state, pad);
  336. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  337. offset + PTRACE_HBP_PAD_SZ);
  338. if (ret)
  339. return ret;
  340. /* (address, ctrl) registers */
  341. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  342. limit = regset->n * regset->size;
  343. while (count && offset < limit) {
  344. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  345. if (ret)
  346. return ret;
  347. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  348. offset, offset + PTRACE_HBP_ADDR_SZ);
  349. if (ret)
  350. return ret;
  351. offset += PTRACE_HBP_ADDR_SZ;
  352. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  353. if (ret)
  354. return ret;
  355. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  356. offset, offset + PTRACE_HBP_CTRL_SZ);
  357. if (ret)
  358. return ret;
  359. offset += PTRACE_HBP_CTRL_SZ;
  360. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  361. offset,
  362. offset + PTRACE_HBP_PAD_SZ);
  363. if (ret)
  364. return ret;
  365. offset += PTRACE_HBP_PAD_SZ;
  366. idx++;
  367. }
  368. return 0;
  369. }
  370. static int hw_break_set(struct task_struct *target,
  371. const struct user_regset *regset,
  372. unsigned int pos, unsigned int count,
  373. const void *kbuf, const void __user *ubuf)
  374. {
  375. unsigned int note_type = regset->core_note_type;
  376. int ret, idx = 0, offset, limit;
  377. u32 ctrl;
  378. u64 addr;
  379. /* Resource info and pad */
  380. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  381. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  382. if (ret)
  383. return ret;
  384. /* (address, ctrl) registers */
  385. limit = regset->n * regset->size;
  386. while (count && offset < limit) {
  387. if (count < PTRACE_HBP_ADDR_SZ)
  388. return -EINVAL;
  389. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  390. offset, offset + PTRACE_HBP_ADDR_SZ);
  391. if (ret)
  392. return ret;
  393. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  394. if (ret)
  395. return ret;
  396. offset += PTRACE_HBP_ADDR_SZ;
  397. if (!count)
  398. break;
  399. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  400. offset, offset + PTRACE_HBP_CTRL_SZ);
  401. if (ret)
  402. return ret;
  403. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  404. if (ret)
  405. return ret;
  406. offset += PTRACE_HBP_CTRL_SZ;
  407. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  408. offset,
  409. offset + PTRACE_HBP_PAD_SZ);
  410. if (ret)
  411. return ret;
  412. offset += PTRACE_HBP_PAD_SZ;
  413. idx++;
  414. }
  415. return 0;
  416. }
  417. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  418. static int gpr_get(struct task_struct *target,
  419. const struct user_regset *regset,
  420. unsigned int pos, unsigned int count,
  421. void *kbuf, void __user *ubuf)
  422. {
  423. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  424. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  425. }
  426. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  427. unsigned int pos, unsigned int count,
  428. const void *kbuf, const void __user *ubuf)
  429. {
  430. int ret;
  431. struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
  432. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  433. if (ret)
  434. return ret;
  435. if (!valid_user_regs(&newregs, target))
  436. return -EINVAL;
  437. task_pt_regs(target)->user_regs = newregs;
  438. return 0;
  439. }
  440. /*
  441. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  442. */
  443. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  444. unsigned int pos, unsigned int count,
  445. void *kbuf, void __user *ubuf)
  446. {
  447. struct user_fpsimd_state *uregs;
  448. uregs = &target->thread.fpsimd_state.user_fpsimd;
  449. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  450. }
  451. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  452. unsigned int pos, unsigned int count,
  453. const void *kbuf, const void __user *ubuf)
  454. {
  455. int ret;
  456. struct user_fpsimd_state newstate =
  457. target->thread.fpsimd_state.user_fpsimd;
  458. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  459. if (ret)
  460. return ret;
  461. target->thread.fpsimd_state.user_fpsimd = newstate;
  462. fpsimd_flush_task_state(target);
  463. return ret;
  464. }
  465. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  466. unsigned int pos, unsigned int count,
  467. void *kbuf, void __user *ubuf)
  468. {
  469. unsigned long *tls = &target->thread.tp_value;
  470. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  471. }
  472. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  473. unsigned int pos, unsigned int count,
  474. const void *kbuf, const void __user *ubuf)
  475. {
  476. int ret;
  477. unsigned long tls = target->thread.tp_value;
  478. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  479. if (ret)
  480. return ret;
  481. target->thread.tp_value = tls;
  482. return ret;
  483. }
  484. static int system_call_get(struct task_struct *target,
  485. const struct user_regset *regset,
  486. unsigned int pos, unsigned int count,
  487. void *kbuf, void __user *ubuf)
  488. {
  489. int syscallno = task_pt_regs(target)->syscallno;
  490. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  491. &syscallno, 0, -1);
  492. }
  493. static int system_call_set(struct task_struct *target,
  494. const struct user_regset *regset,
  495. unsigned int pos, unsigned int count,
  496. const void *kbuf, const void __user *ubuf)
  497. {
  498. int syscallno = task_pt_regs(target)->syscallno;
  499. int ret;
  500. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
  501. if (ret)
  502. return ret;
  503. task_pt_regs(target)->syscallno = syscallno;
  504. return ret;
  505. }
  506. enum aarch64_regset {
  507. REGSET_GPR,
  508. REGSET_FPR,
  509. REGSET_TLS,
  510. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  511. REGSET_HW_BREAK,
  512. REGSET_HW_WATCH,
  513. #endif
  514. REGSET_SYSTEM_CALL,
  515. };
  516. static const struct user_regset aarch64_regsets[] = {
  517. [REGSET_GPR] = {
  518. .core_note_type = NT_PRSTATUS,
  519. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  520. .size = sizeof(u64),
  521. .align = sizeof(u64),
  522. .get = gpr_get,
  523. .set = gpr_set
  524. },
  525. [REGSET_FPR] = {
  526. .core_note_type = NT_PRFPREG,
  527. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  528. /*
  529. * We pretend we have 32-bit registers because the fpsr and
  530. * fpcr are 32-bits wide.
  531. */
  532. .size = sizeof(u32),
  533. .align = sizeof(u32),
  534. .get = fpr_get,
  535. .set = fpr_set
  536. },
  537. [REGSET_TLS] = {
  538. .core_note_type = NT_ARM_TLS,
  539. .n = 1,
  540. .size = sizeof(void *),
  541. .align = sizeof(void *),
  542. .get = tls_get,
  543. .set = tls_set,
  544. },
  545. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  546. [REGSET_HW_BREAK] = {
  547. .core_note_type = NT_ARM_HW_BREAK,
  548. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  549. .size = sizeof(u32),
  550. .align = sizeof(u32),
  551. .get = hw_break_get,
  552. .set = hw_break_set,
  553. },
  554. [REGSET_HW_WATCH] = {
  555. .core_note_type = NT_ARM_HW_WATCH,
  556. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  557. .size = sizeof(u32),
  558. .align = sizeof(u32),
  559. .get = hw_break_get,
  560. .set = hw_break_set,
  561. },
  562. #endif
  563. [REGSET_SYSTEM_CALL] = {
  564. .core_note_type = NT_ARM_SYSTEM_CALL,
  565. .n = 1,
  566. .size = sizeof(int),
  567. .align = sizeof(int),
  568. .get = system_call_get,
  569. .set = system_call_set,
  570. },
  571. };
  572. static const struct user_regset_view user_aarch64_view = {
  573. .name = "aarch64", .e_machine = EM_AARCH64,
  574. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  575. };
  576. #ifdef CONFIG_COMPAT
  577. #include <linux/compat.h>
  578. enum compat_regset {
  579. REGSET_COMPAT_GPR,
  580. REGSET_COMPAT_VFP,
  581. };
  582. static int compat_gpr_get(struct task_struct *target,
  583. const struct user_regset *regset,
  584. unsigned int pos, unsigned int count,
  585. void *kbuf, void __user *ubuf)
  586. {
  587. int ret = 0;
  588. unsigned int i, start, num_regs;
  589. /* Calculate the number of AArch32 registers contained in count */
  590. num_regs = count / regset->size;
  591. /* Convert pos into an register number */
  592. start = pos / regset->size;
  593. if (start + num_regs > regset->n)
  594. return -EIO;
  595. for (i = 0; i < num_regs; ++i) {
  596. unsigned int idx = start + i;
  597. compat_ulong_t reg;
  598. switch (idx) {
  599. case 15:
  600. reg = task_pt_regs(target)->pc;
  601. break;
  602. case 16:
  603. reg = task_pt_regs(target)->pstate;
  604. break;
  605. case 17:
  606. reg = task_pt_regs(target)->orig_x0;
  607. break;
  608. default:
  609. reg = task_pt_regs(target)->regs[idx];
  610. }
  611. if (kbuf) {
  612. memcpy(kbuf, &reg, sizeof(reg));
  613. kbuf += sizeof(reg);
  614. } else {
  615. ret = copy_to_user(ubuf, &reg, sizeof(reg));
  616. if (ret) {
  617. ret = -EFAULT;
  618. break;
  619. }
  620. ubuf += sizeof(reg);
  621. }
  622. }
  623. return ret;
  624. }
  625. static int compat_gpr_set(struct task_struct *target,
  626. const struct user_regset *regset,
  627. unsigned int pos, unsigned int count,
  628. const void *kbuf, const void __user *ubuf)
  629. {
  630. struct pt_regs newregs;
  631. int ret = 0;
  632. unsigned int i, start, num_regs;
  633. /* Calculate the number of AArch32 registers contained in count */
  634. num_regs = count / regset->size;
  635. /* Convert pos into an register number */
  636. start = pos / regset->size;
  637. if (start + num_regs > regset->n)
  638. return -EIO;
  639. newregs = *task_pt_regs(target);
  640. for (i = 0; i < num_regs; ++i) {
  641. unsigned int idx = start + i;
  642. compat_ulong_t reg;
  643. if (kbuf) {
  644. memcpy(&reg, kbuf, sizeof(reg));
  645. kbuf += sizeof(reg);
  646. } else {
  647. ret = copy_from_user(&reg, ubuf, sizeof(reg));
  648. if (ret) {
  649. ret = -EFAULT;
  650. break;
  651. }
  652. ubuf += sizeof(reg);
  653. }
  654. switch (idx) {
  655. case 15:
  656. newregs.pc = reg;
  657. break;
  658. case 16:
  659. newregs.pstate = reg;
  660. break;
  661. case 17:
  662. newregs.orig_x0 = reg;
  663. break;
  664. default:
  665. newregs.regs[idx] = reg;
  666. }
  667. }
  668. if (valid_user_regs(&newregs.user_regs, target))
  669. *task_pt_regs(target) = newregs;
  670. else
  671. ret = -EINVAL;
  672. return ret;
  673. }
  674. static int compat_vfp_get(struct task_struct *target,
  675. const struct user_regset *regset,
  676. unsigned int pos, unsigned int count,
  677. void *kbuf, void __user *ubuf)
  678. {
  679. struct user_fpsimd_state *uregs;
  680. compat_ulong_t fpscr;
  681. int ret;
  682. uregs = &target->thread.fpsimd_state.user_fpsimd;
  683. /*
  684. * The VFP registers are packed into the fpsimd_state, so they all sit
  685. * nicely together for us. We just need to create the fpscr separately.
  686. */
  687. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
  688. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  689. if (count && !ret) {
  690. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  691. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  692. ret = put_user(fpscr, (compat_ulong_t *)ubuf);
  693. }
  694. return ret;
  695. }
  696. static int compat_vfp_set(struct task_struct *target,
  697. const struct user_regset *regset,
  698. unsigned int pos, unsigned int count,
  699. const void *kbuf, const void __user *ubuf)
  700. {
  701. struct user_fpsimd_state *uregs;
  702. compat_ulong_t fpscr;
  703. int ret;
  704. if (pos + count > VFP_STATE_SIZE)
  705. return -EIO;
  706. uregs = &target->thread.fpsimd_state.user_fpsimd;
  707. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  708. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  709. if (count && !ret) {
  710. ret = get_user(fpscr, (compat_ulong_t *)ubuf);
  711. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  712. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  713. }
  714. fpsimd_flush_task_state(target);
  715. return ret;
  716. }
  717. static int compat_tls_get(struct task_struct *target,
  718. const struct user_regset *regset, unsigned int pos,
  719. unsigned int count, void *kbuf, void __user *ubuf)
  720. {
  721. compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
  722. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  723. }
  724. static int compat_tls_set(struct task_struct *target,
  725. const struct user_regset *regset, unsigned int pos,
  726. unsigned int count, const void *kbuf,
  727. const void __user *ubuf)
  728. {
  729. int ret;
  730. compat_ulong_t tls = target->thread.tp_value;
  731. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  732. if (ret)
  733. return ret;
  734. target->thread.tp_value = tls;
  735. return ret;
  736. }
  737. static const struct user_regset aarch32_regsets[] = {
  738. [REGSET_COMPAT_GPR] = {
  739. .core_note_type = NT_PRSTATUS,
  740. .n = COMPAT_ELF_NGREG,
  741. .size = sizeof(compat_elf_greg_t),
  742. .align = sizeof(compat_elf_greg_t),
  743. .get = compat_gpr_get,
  744. .set = compat_gpr_set
  745. },
  746. [REGSET_COMPAT_VFP] = {
  747. .core_note_type = NT_ARM_VFP,
  748. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  749. .size = sizeof(compat_ulong_t),
  750. .align = sizeof(compat_ulong_t),
  751. .get = compat_vfp_get,
  752. .set = compat_vfp_set
  753. },
  754. };
  755. static const struct user_regset_view user_aarch32_view = {
  756. .name = "aarch32", .e_machine = EM_ARM,
  757. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  758. };
  759. static const struct user_regset aarch32_ptrace_regsets[] = {
  760. [REGSET_GPR] = {
  761. .core_note_type = NT_PRSTATUS,
  762. .n = COMPAT_ELF_NGREG,
  763. .size = sizeof(compat_elf_greg_t),
  764. .align = sizeof(compat_elf_greg_t),
  765. .get = compat_gpr_get,
  766. .set = compat_gpr_set
  767. },
  768. [REGSET_FPR] = {
  769. .core_note_type = NT_ARM_VFP,
  770. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  771. .size = sizeof(compat_ulong_t),
  772. .align = sizeof(compat_ulong_t),
  773. .get = compat_vfp_get,
  774. .set = compat_vfp_set
  775. },
  776. [REGSET_TLS] = {
  777. .core_note_type = NT_ARM_TLS,
  778. .n = 1,
  779. .size = sizeof(compat_ulong_t),
  780. .align = sizeof(compat_ulong_t),
  781. .get = compat_tls_get,
  782. .set = compat_tls_set,
  783. },
  784. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  785. [REGSET_HW_BREAK] = {
  786. .core_note_type = NT_ARM_HW_BREAK,
  787. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  788. .size = sizeof(u32),
  789. .align = sizeof(u32),
  790. .get = hw_break_get,
  791. .set = hw_break_set,
  792. },
  793. [REGSET_HW_WATCH] = {
  794. .core_note_type = NT_ARM_HW_WATCH,
  795. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  796. .size = sizeof(u32),
  797. .align = sizeof(u32),
  798. .get = hw_break_get,
  799. .set = hw_break_set,
  800. },
  801. #endif
  802. [REGSET_SYSTEM_CALL] = {
  803. .core_note_type = NT_ARM_SYSTEM_CALL,
  804. .n = 1,
  805. .size = sizeof(int),
  806. .align = sizeof(int),
  807. .get = system_call_get,
  808. .set = system_call_set,
  809. },
  810. };
  811. static const struct user_regset_view user_aarch32_ptrace_view = {
  812. .name = "aarch32", .e_machine = EM_ARM,
  813. .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
  814. };
  815. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  816. compat_ulong_t __user *ret)
  817. {
  818. compat_ulong_t tmp;
  819. if (off & 3)
  820. return -EIO;
  821. if (off == COMPAT_PT_TEXT_ADDR)
  822. tmp = tsk->mm->start_code;
  823. else if (off == COMPAT_PT_DATA_ADDR)
  824. tmp = tsk->mm->start_data;
  825. else if (off == COMPAT_PT_TEXT_END_ADDR)
  826. tmp = tsk->mm->end_code;
  827. else if (off < sizeof(compat_elf_gregset_t))
  828. return copy_regset_to_user(tsk, &user_aarch32_view,
  829. REGSET_COMPAT_GPR, off,
  830. sizeof(compat_ulong_t), ret);
  831. else if (off >= COMPAT_USER_SZ)
  832. return -EIO;
  833. else
  834. tmp = 0;
  835. return put_user(tmp, ret);
  836. }
  837. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  838. compat_ulong_t val)
  839. {
  840. int ret;
  841. mm_segment_t old_fs = get_fs();
  842. if (off & 3 || off >= COMPAT_USER_SZ)
  843. return -EIO;
  844. if (off >= sizeof(compat_elf_gregset_t))
  845. return 0;
  846. set_fs(KERNEL_DS);
  847. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  848. REGSET_COMPAT_GPR, off,
  849. sizeof(compat_ulong_t),
  850. &val);
  851. set_fs(old_fs);
  852. return ret;
  853. }
  854. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  855. /*
  856. * Convert a virtual register number into an index for a thread_info
  857. * breakpoint array. Breakpoints are identified using positive numbers
  858. * whilst watchpoints are negative. The registers are laid out as pairs
  859. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  860. * Register 0 is reserved for describing resource information.
  861. */
  862. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  863. {
  864. return (abs(num) - 1) >> 1;
  865. }
  866. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  867. {
  868. u8 num_brps, num_wrps, debug_arch, wp_len;
  869. u32 reg = 0;
  870. num_brps = hw_breakpoint_slots(TYPE_INST);
  871. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  872. debug_arch = debug_monitors_arch();
  873. wp_len = 8;
  874. reg |= debug_arch;
  875. reg <<= 8;
  876. reg |= wp_len;
  877. reg <<= 8;
  878. reg |= num_wrps;
  879. reg <<= 8;
  880. reg |= num_brps;
  881. *kdata = reg;
  882. return 0;
  883. }
  884. static int compat_ptrace_hbp_get(unsigned int note_type,
  885. struct task_struct *tsk,
  886. compat_long_t num,
  887. u32 *kdata)
  888. {
  889. u64 addr = 0;
  890. u32 ctrl = 0;
  891. int err, idx = compat_ptrace_hbp_num_to_idx(num);;
  892. if (num & 1) {
  893. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  894. *kdata = (u32)addr;
  895. } else {
  896. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  897. *kdata = ctrl;
  898. }
  899. return err;
  900. }
  901. static int compat_ptrace_hbp_set(unsigned int note_type,
  902. struct task_struct *tsk,
  903. compat_long_t num,
  904. u32 *kdata)
  905. {
  906. u64 addr;
  907. u32 ctrl;
  908. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  909. if (num & 1) {
  910. addr = *kdata;
  911. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  912. } else {
  913. ctrl = *kdata;
  914. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  915. }
  916. return err;
  917. }
  918. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  919. compat_ulong_t __user *data)
  920. {
  921. int ret;
  922. u32 kdata;
  923. mm_segment_t old_fs = get_fs();
  924. set_fs(KERNEL_DS);
  925. /* Watchpoint */
  926. if (num < 0) {
  927. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  928. /* Resource info */
  929. } else if (num == 0) {
  930. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  931. /* Breakpoint */
  932. } else {
  933. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  934. }
  935. set_fs(old_fs);
  936. if (!ret)
  937. ret = put_user(kdata, data);
  938. return ret;
  939. }
  940. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  941. compat_ulong_t __user *data)
  942. {
  943. int ret;
  944. u32 kdata = 0;
  945. mm_segment_t old_fs = get_fs();
  946. if (num == 0)
  947. return 0;
  948. ret = get_user(kdata, data);
  949. if (ret)
  950. return ret;
  951. set_fs(KERNEL_DS);
  952. if (num < 0)
  953. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  954. else
  955. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  956. set_fs(old_fs);
  957. return ret;
  958. }
  959. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  960. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  961. compat_ulong_t caddr, compat_ulong_t cdata)
  962. {
  963. unsigned long addr = caddr;
  964. unsigned long data = cdata;
  965. void __user *datap = compat_ptr(data);
  966. int ret;
  967. switch (request) {
  968. case PTRACE_PEEKUSR:
  969. ret = compat_ptrace_read_user(child, addr, datap);
  970. break;
  971. case PTRACE_POKEUSR:
  972. ret = compat_ptrace_write_user(child, addr, data);
  973. break;
  974. case COMPAT_PTRACE_GETREGS:
  975. ret = copy_regset_to_user(child,
  976. &user_aarch32_view,
  977. REGSET_COMPAT_GPR,
  978. 0, sizeof(compat_elf_gregset_t),
  979. datap);
  980. break;
  981. case COMPAT_PTRACE_SETREGS:
  982. ret = copy_regset_from_user(child,
  983. &user_aarch32_view,
  984. REGSET_COMPAT_GPR,
  985. 0, sizeof(compat_elf_gregset_t),
  986. datap);
  987. break;
  988. case COMPAT_PTRACE_GET_THREAD_AREA:
  989. ret = put_user((compat_ulong_t)child->thread.tp_value,
  990. (compat_ulong_t __user *)datap);
  991. break;
  992. case COMPAT_PTRACE_SET_SYSCALL:
  993. task_pt_regs(child)->syscallno = data;
  994. ret = 0;
  995. break;
  996. case COMPAT_PTRACE_GETVFPREGS:
  997. ret = copy_regset_to_user(child,
  998. &user_aarch32_view,
  999. REGSET_COMPAT_VFP,
  1000. 0, VFP_STATE_SIZE,
  1001. datap);
  1002. break;
  1003. case COMPAT_PTRACE_SETVFPREGS:
  1004. ret = copy_regset_from_user(child,
  1005. &user_aarch32_view,
  1006. REGSET_COMPAT_VFP,
  1007. 0, VFP_STATE_SIZE,
  1008. datap);
  1009. break;
  1010. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  1011. case COMPAT_PTRACE_GETHBPREGS:
  1012. ret = compat_ptrace_gethbpregs(child, addr, datap);
  1013. break;
  1014. case COMPAT_PTRACE_SETHBPREGS:
  1015. ret = compat_ptrace_sethbpregs(child, addr, datap);
  1016. break;
  1017. #endif
  1018. default:
  1019. ret = compat_ptrace_request(child, request, addr,
  1020. data);
  1021. break;
  1022. }
  1023. return ret;
  1024. }
  1025. #endif /* CONFIG_COMPAT */
  1026. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1027. {
  1028. #ifdef CONFIG_COMPAT
  1029. /*
  1030. * Core dumping of 32-bit tasks or compat ptrace requests must use the
  1031. * user_aarch32_view compatible with arm32. Native ptrace requests on
  1032. * 32-bit children use an extended user_aarch32_ptrace_view to allow
  1033. * access to the TLS register.
  1034. */
  1035. if (is_compat_task())
  1036. return &user_aarch32_view;
  1037. else if (is_compat_thread(task_thread_info(task)))
  1038. return &user_aarch32_ptrace_view;
  1039. #endif
  1040. return &user_aarch64_view;
  1041. }
  1042. long arch_ptrace(struct task_struct *child, long request,
  1043. unsigned long addr, unsigned long data)
  1044. {
  1045. return ptrace_request(child, request, addr, data);
  1046. }
  1047. enum ptrace_syscall_dir {
  1048. PTRACE_SYSCALL_ENTER = 0,
  1049. PTRACE_SYSCALL_EXIT,
  1050. };
  1051. static void tracehook_report_syscall(struct pt_regs *regs,
  1052. enum ptrace_syscall_dir dir)
  1053. {
  1054. int regno;
  1055. unsigned long saved_reg;
  1056. /*
  1057. * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
  1058. * used to denote syscall entry/exit:
  1059. */
  1060. regno = (is_compat_task() ? 12 : 7);
  1061. saved_reg = regs->regs[regno];
  1062. regs->regs[regno] = dir;
  1063. if (dir == PTRACE_SYSCALL_EXIT)
  1064. tracehook_report_syscall_exit(regs, 0);
  1065. else if (tracehook_report_syscall_entry(regs))
  1066. regs->syscallno = ~0UL;
  1067. regs->regs[regno] = saved_reg;
  1068. }
  1069. asmlinkage int syscall_trace_enter(struct pt_regs *regs)
  1070. {
  1071. /* Do the secure computing check first; failures should be fast. */
  1072. if (secure_computing() == -1)
  1073. return -1;
  1074. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1075. tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
  1076. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  1077. trace_sys_enter(regs, regs->syscallno);
  1078. audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
  1079. regs->regs[2], regs->regs[3]);
  1080. return regs->syscallno;
  1081. }
  1082. asmlinkage void syscall_trace_exit(struct pt_regs *regs)
  1083. {
  1084. audit_syscall_exit(regs);
  1085. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  1086. trace_sys_exit(regs, regs_return_value(regs));
  1087. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1088. tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
  1089. }
  1090. /*
  1091. * Bits which are always architecturally RES0 per ARM DDI 0487A.h
  1092. * Userspace cannot use these until they have an architectural meaning.
  1093. * We also reserve IL for the kernel; SS is handled dynamically.
  1094. */
  1095. #define SPSR_EL1_AARCH64_RES0_BITS \
  1096. (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
  1097. GENMASK_ULL(5, 5))
  1098. #define SPSR_EL1_AARCH32_RES0_BITS \
  1099. (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
  1100. static int valid_compat_regs(struct user_pt_regs *regs)
  1101. {
  1102. regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
  1103. if (!system_supports_mixed_endian_el0()) {
  1104. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  1105. regs->pstate |= COMPAT_PSR_E_BIT;
  1106. else
  1107. regs->pstate &= ~COMPAT_PSR_E_BIT;
  1108. }
  1109. if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
  1110. (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
  1111. (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
  1112. (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
  1113. return 1;
  1114. }
  1115. /*
  1116. * Force PSR to a valid 32-bit EL0t, preserving the same bits as
  1117. * arch/arm.
  1118. */
  1119. regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
  1120. COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
  1121. COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
  1122. COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
  1123. COMPAT_PSR_T_BIT;
  1124. regs->pstate |= PSR_MODE32_BIT;
  1125. return 0;
  1126. }
  1127. static int valid_native_regs(struct user_pt_regs *regs)
  1128. {
  1129. regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
  1130. if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
  1131. (regs->pstate & PSR_D_BIT) == 0 &&
  1132. (regs->pstate & PSR_A_BIT) == 0 &&
  1133. (regs->pstate & PSR_I_BIT) == 0 &&
  1134. (regs->pstate & PSR_F_BIT) == 0) {
  1135. return 1;
  1136. }
  1137. /* Force PSR to a valid 64-bit EL0t */
  1138. regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
  1139. return 0;
  1140. }
  1141. /*
  1142. * Are the current registers suitable for user mode? (used to maintain
  1143. * security in signal handlers)
  1144. */
  1145. int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
  1146. {
  1147. if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
  1148. regs->pstate &= ~DBG_SPSR_SS;
  1149. if (is_compat_thread(task_thread_info(task)))
  1150. return valid_compat_regs(regs);
  1151. else
  1152. return valid_native_regs(regs);
  1153. }