regset.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * FPU register's regset abstraction, for ptrace, core dumps, etc.
  3. */
  4. #include <asm/fpu/internal.h>
  5. #include <asm/fpu/signal.h>
  6. #include <asm/fpu/regset.h>
  7. /*
  8. * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
  9. * as the "regset->n" for the xstate regset will be updated based on the feature
  10. * capabilites supported by the xsave.
  11. */
  12. int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
  13. {
  14. struct fpu *target_fpu = &target->thread.fpu;
  15. return target_fpu->fpstate_active ? regset->n : 0;
  16. }
  17. int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
  18. {
  19. struct fpu *target_fpu = &target->thread.fpu;
  20. return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
  21. }
  22. int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
  23. unsigned int pos, unsigned int count,
  24. void *kbuf, void __user *ubuf)
  25. {
  26. struct fpu *fpu = &target->thread.fpu;
  27. if (!cpu_has_fxsr)
  28. return -ENODEV;
  29. fpu__activate_fpstate_read(fpu);
  30. fpstate_sanitize_xstate(fpu);
  31. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  32. &fpu->state.fxsave, 0, -1);
  33. }
  34. int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
  35. unsigned int pos, unsigned int count,
  36. const void *kbuf, const void __user *ubuf)
  37. {
  38. struct fpu *fpu = &target->thread.fpu;
  39. int ret;
  40. if (!cpu_has_fxsr)
  41. return -ENODEV;
  42. fpu__activate_fpstate_write(fpu);
  43. fpstate_sanitize_xstate(fpu);
  44. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  45. &fpu->state.fxsave, 0, -1);
  46. /*
  47. * mxcsr reserved bits must be masked to zero for security reasons.
  48. */
  49. fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
  50. /*
  51. * update the header bits in the xsave header, indicating the
  52. * presence of FP and SSE state.
  53. */
  54. if (cpu_has_xsave)
  55. fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
  56. return ret;
  57. }
  58. int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
  59. unsigned int pos, unsigned int count,
  60. void *kbuf, void __user *ubuf)
  61. {
  62. struct fpu *fpu = &target->thread.fpu;
  63. struct xregs_state *xsave;
  64. int ret;
  65. if (!cpu_has_xsave)
  66. return -ENODEV;
  67. fpu__activate_fpstate_read(fpu);
  68. xsave = &fpu->state.xsave;
  69. /*
  70. * Copy the 48bytes defined by the software first into the xstate
  71. * memory layout in the thread struct, so that we can copy the entire
  72. * xstateregs to the user using one user_regset_copyout().
  73. */
  74. memcpy(&xsave->i387.sw_reserved,
  75. xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
  76. /*
  77. * Copy the xstate memory layout.
  78. */
  79. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
  80. return ret;
  81. }
  82. int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
  83. unsigned int pos, unsigned int count,
  84. const void *kbuf, const void __user *ubuf)
  85. {
  86. struct fpu *fpu = &target->thread.fpu;
  87. struct xregs_state *xsave;
  88. int ret;
  89. if (!cpu_has_xsave)
  90. return -ENODEV;
  91. fpu__activate_fpstate_write(fpu);
  92. xsave = &fpu->state.xsave;
  93. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
  94. /* xcomp_bv must be 0 when using uncompacted format */
  95. if (!ret && xsave->header.xcomp_bv)
  96. ret = -EINVAL;
  97. /*
  98. * mxcsr reserved bits must be masked to zero for security reasons.
  99. */
  100. xsave->i387.mxcsr &= mxcsr_feature_mask;
  101. xsave->header.xfeatures &= xfeatures_mask;
  102. /*
  103. * These bits must be zero.
  104. */
  105. memset(&xsave->header.reserved, 0, 48);
  106. /*
  107. * In case of failure, mark all states as init:
  108. */
  109. if (ret)
  110. fpstate_init(&fpu->state);
  111. return ret;
  112. }
  113. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  114. /*
  115. * FPU tag word conversions.
  116. */
  117. static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
  118. {
  119. unsigned int tmp; /* to avoid 16 bit prefixes in the code */
  120. /* Transform each pair of bits into 01 (valid) or 00 (empty) */
  121. tmp = ~twd;
  122. tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
  123. /* and move the valid bits to the lower byte. */
  124. tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
  125. tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
  126. tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
  127. return tmp;
  128. }
  129. #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
  130. #define FP_EXP_TAG_VALID 0
  131. #define FP_EXP_TAG_ZERO 1
  132. #define FP_EXP_TAG_SPECIAL 2
  133. #define FP_EXP_TAG_EMPTY 3
  134. static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
  135. {
  136. struct _fpxreg *st;
  137. u32 tos = (fxsave->swd >> 11) & 7;
  138. u32 twd = (unsigned long) fxsave->twd;
  139. u32 tag;
  140. u32 ret = 0xffff0000u;
  141. int i;
  142. for (i = 0; i < 8; i++, twd >>= 1) {
  143. if (twd & 0x1) {
  144. st = FPREG_ADDR(fxsave, (i - tos) & 7);
  145. switch (st->exponent & 0x7fff) {
  146. case 0x7fff:
  147. tag = FP_EXP_TAG_SPECIAL;
  148. break;
  149. case 0x0000:
  150. if (!st->significand[0] &&
  151. !st->significand[1] &&
  152. !st->significand[2] &&
  153. !st->significand[3])
  154. tag = FP_EXP_TAG_ZERO;
  155. else
  156. tag = FP_EXP_TAG_SPECIAL;
  157. break;
  158. default:
  159. if (st->significand[3] & 0x8000)
  160. tag = FP_EXP_TAG_VALID;
  161. else
  162. tag = FP_EXP_TAG_SPECIAL;
  163. break;
  164. }
  165. } else {
  166. tag = FP_EXP_TAG_EMPTY;
  167. }
  168. ret |= tag << (2 * i);
  169. }
  170. return ret;
  171. }
  172. /*
  173. * FXSR floating point environment conversions.
  174. */
  175. void
  176. convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
  177. {
  178. struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
  179. struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
  180. struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
  181. int i;
  182. env->cwd = fxsave->cwd | 0xffff0000u;
  183. env->swd = fxsave->swd | 0xffff0000u;
  184. env->twd = twd_fxsr_to_i387(fxsave);
  185. #ifdef CONFIG_X86_64
  186. env->fip = fxsave->rip;
  187. env->foo = fxsave->rdp;
  188. /*
  189. * should be actually ds/cs at fpu exception time, but
  190. * that information is not available in 64bit mode.
  191. */
  192. env->fcs = task_pt_regs(tsk)->cs;
  193. if (tsk == current) {
  194. savesegment(ds, env->fos);
  195. } else {
  196. env->fos = tsk->thread.ds;
  197. }
  198. env->fos |= 0xffff0000;
  199. #else
  200. env->fip = fxsave->fip;
  201. env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
  202. env->foo = fxsave->foo;
  203. env->fos = fxsave->fos;
  204. #endif
  205. for (i = 0; i < 8; ++i)
  206. memcpy(&to[i], &from[i], sizeof(to[0]));
  207. }
  208. void convert_to_fxsr(struct task_struct *tsk,
  209. const struct user_i387_ia32_struct *env)
  210. {
  211. struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
  212. struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
  213. struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
  214. int i;
  215. fxsave->cwd = env->cwd;
  216. fxsave->swd = env->swd;
  217. fxsave->twd = twd_i387_to_fxsr(env->twd);
  218. fxsave->fop = (u16) ((u32) env->fcs >> 16);
  219. #ifdef CONFIG_X86_64
  220. fxsave->rip = env->fip;
  221. fxsave->rdp = env->foo;
  222. /* cs and ds ignored */
  223. #else
  224. fxsave->fip = env->fip;
  225. fxsave->fcs = (env->fcs & 0xffff);
  226. fxsave->foo = env->foo;
  227. fxsave->fos = env->fos;
  228. #endif
  229. for (i = 0; i < 8; ++i)
  230. memcpy(&to[i], &from[i], sizeof(from[0]));
  231. }
  232. int fpregs_get(struct task_struct *target, const struct user_regset *regset,
  233. unsigned int pos, unsigned int count,
  234. void *kbuf, void __user *ubuf)
  235. {
  236. struct fpu *fpu = &target->thread.fpu;
  237. struct user_i387_ia32_struct env;
  238. fpu__activate_fpstate_read(fpu);
  239. if (!static_cpu_has(X86_FEATURE_FPU))
  240. return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
  241. if (!cpu_has_fxsr)
  242. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  243. &fpu->state.fsave, 0,
  244. -1);
  245. fpstate_sanitize_xstate(fpu);
  246. if (kbuf && pos == 0 && count == sizeof(env)) {
  247. convert_from_fxsr(kbuf, target);
  248. return 0;
  249. }
  250. convert_from_fxsr(&env, target);
  251. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
  252. }
  253. int fpregs_set(struct task_struct *target, const struct user_regset *regset,
  254. unsigned int pos, unsigned int count,
  255. const void *kbuf, const void __user *ubuf)
  256. {
  257. struct fpu *fpu = &target->thread.fpu;
  258. struct user_i387_ia32_struct env;
  259. int ret;
  260. fpu__activate_fpstate_write(fpu);
  261. fpstate_sanitize_xstate(fpu);
  262. if (!static_cpu_has(X86_FEATURE_FPU))
  263. return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
  264. if (!cpu_has_fxsr)
  265. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  266. &fpu->state.fsave, 0,
  267. -1);
  268. if (pos > 0 || count < sizeof(env))
  269. convert_from_fxsr(&env, target);
  270. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
  271. if (!ret)
  272. convert_to_fxsr(target, &env);
  273. /*
  274. * update the header bit in the xsave header, indicating the
  275. * presence of FP.
  276. */
  277. if (cpu_has_xsave)
  278. fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
  279. return ret;
  280. }
  281. /*
  282. * FPU state for core dumps.
  283. * This is only used for a.out dumps now.
  284. * It is declared generically using elf_fpregset_t (which is
  285. * struct user_i387_struct) but is in fact only used for 32-bit
  286. * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
  287. */
  288. int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
  289. {
  290. struct task_struct *tsk = current;
  291. struct fpu *fpu = &tsk->thread.fpu;
  292. int fpvalid;
  293. fpvalid = fpu->fpstate_active;
  294. if (fpvalid)
  295. fpvalid = !fpregs_get(tsk, NULL,
  296. 0, sizeof(struct user_i387_ia32_struct),
  297. ufpu, NULL);
  298. return fpvalid;
  299. }
  300. EXPORT_SYMBOL(dump_fpu);
  301. #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */