gaccess.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * access guest memory
  3. *
  4. * Copyright IBM Corp. 2008, 2014
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/ptrace.h>
  18. #include "kvm-s390.h"
  19. /**
  20. * kvm_s390_real_to_abs - convert guest real address to guest absolute address
  21. * @vcpu - guest virtual cpu
  22. * @gra - guest real address
  23. *
  24. * Returns the guest absolute address that corresponds to the passed guest real
  25. * address @gra of a virtual guest cpu by applying its prefix.
  26. */
  27. static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
  28. unsigned long gra)
  29. {
  30. unsigned long prefix = kvm_s390_get_prefix(vcpu);
  31. if (gra < 2 * PAGE_SIZE)
  32. gra += prefix;
  33. else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
  34. gra -= prefix;
  35. return gra;
  36. }
  37. /**
  38. * kvm_s390_logical_to_effective - convert guest logical to effective address
  39. * @vcpu: guest virtual cpu
  40. * @ga: guest logical address
  41. *
  42. * Convert a guest vcpu logical address to a guest vcpu effective address by
  43. * applying the rules of the vcpu's addressing mode defined by PSW bits 31
  44. * and 32 (extendended/basic addressing mode).
  45. *
  46. * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
  47. * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
  48. * of @ga will be zeroed and the remaining bits will be returned.
  49. */
  50. static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
  51. unsigned long ga)
  52. {
  53. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  54. if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
  55. return ga;
  56. if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
  57. return ga & ((1UL << 31) - 1);
  58. return ga & ((1UL << 24) - 1);
  59. }
  60. /*
  61. * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
  62. * which shall only be used to access the lowcore of a vcpu.
  63. * These functions should be used for e.g. interrupt handlers where no
  64. * guest memory access protection facilities, like key or low address
  65. * protection, are applicable.
  66. * At a later point guest vcpu lowcore access should happen via pinned
  67. * prefix pages, so that these pages can be accessed directly via the
  68. * kernel mapping. All of these *_lc functions can be removed then.
  69. */
  70. /**
  71. * put_guest_lc - write a simple variable to a guest vcpu's lowcore
  72. * @vcpu: virtual cpu
  73. * @x: value to copy to guest
  74. * @gra: vcpu's destination guest real address
  75. *
  76. * Copies a simple value from kernel space to a guest vcpu's lowcore.
  77. * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
  78. * must be located in the vcpu's lowcore. Otherwise the result is undefined.
  79. *
  80. * Returns zero on success or -EFAULT on error.
  81. *
  82. * Note: an error indicates that either the kernel is out of memory or
  83. * the guest memory mapping is broken. In any case the best solution
  84. * would be to terminate the guest.
  85. * It is wrong to inject a guest exception.
  86. */
  87. #define put_guest_lc(vcpu, x, gra) \
  88. ({ \
  89. struct kvm_vcpu *__vcpu = (vcpu); \
  90. __typeof__(*(gra)) __x = (x); \
  91. unsigned long __gpa; \
  92. \
  93. __gpa = (unsigned long)(gra); \
  94. __gpa += kvm_s390_get_prefix(__vcpu); \
  95. kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
  96. })
  97. /**
  98. * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
  99. * @vcpu: virtual cpu
  100. * @gra: vcpu's source guest real address
  101. * @data: source address in kernel space
  102. * @len: number of bytes to copy
  103. *
  104. * Copy data from kernel space to guest vcpu's lowcore. The entire range must
  105. * be located within the vcpu's lowcore, otherwise the result is undefined.
  106. *
  107. * Returns zero on success or -EFAULT on error.
  108. *
  109. * Note: an error indicates that either the kernel is out of memory or
  110. * the guest memory mapping is broken. In any case the best solution
  111. * would be to terminate the guest.
  112. * It is wrong to inject a guest exception.
  113. */
  114. static inline __must_check
  115. int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
  116. unsigned long len)
  117. {
  118. unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
  119. return kvm_write_guest(vcpu->kvm, gpa, data, len);
  120. }
  121. /**
  122. * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
  123. * @vcpu: virtual cpu
  124. * @gra: vcpu's source guest real address
  125. * @data: destination address in kernel space
  126. * @len: number of bytes to copy
  127. *
  128. * Copy data from guest vcpu's lowcore to kernel space. The entire range must
  129. * be located within the vcpu's lowcore, otherwise the result is undefined.
  130. *
  131. * Returns zero on success or -EFAULT on error.
  132. *
  133. * Note: an error indicates that either the kernel is out of memory or
  134. * the guest memory mapping is broken. In any case the best solution
  135. * would be to terminate the guest.
  136. * It is wrong to inject a guest exception.
  137. */
  138. static inline __must_check
  139. int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
  140. unsigned long len)
  141. {
  142. unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
  143. return kvm_read_guest(vcpu->kvm, gpa, data, len);
  144. }
  145. int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
  146. ar_t ar, unsigned long *gpa, int write);
  147. int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  148. unsigned long length, int is_write);
  149. int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
  150. unsigned long len, int write);
  151. int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  152. void *data, unsigned long len, int write);
  153. /**
  154. * write_guest - copy data from kernel space to guest space
  155. * @vcpu: virtual cpu
  156. * @ga: guest address
  157. * @ar: access register
  158. * @data: source address in kernel space
  159. * @len: number of bytes to copy
  160. *
  161. * Copy @len bytes from @data (kernel space) to @ga (guest address).
  162. * In order to copy data to guest space the PSW of the vcpu is inspected:
  163. * If DAT is off data will be copied to guest real or absolute memory.
  164. * If DAT is on data will be copied to the address space as specified by
  165. * the address space bits of the PSW:
  166. * Primary, secondary, home space or access register mode.
  167. * The addressing mode of the PSW is also inspected, so that address wrap
  168. * around is taken into account for 24-, 31- and 64-bit addressing mode,
  169. * if the to be copied data crosses page boundaries in guest address space.
  170. * In addition also low address and DAT protection are inspected before
  171. * copying any data (key protection is currently not implemented).
  172. *
  173. * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
  174. * In case of an access exception (e.g. protection exception) pgm will contain
  175. * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
  176. * will inject a correct exception into the guest.
  177. * If no access exception happened, the contents of pgm are undefined when
  178. * this function returns.
  179. *
  180. * Returns: - zero on success
  181. * - a negative value if e.g. the guest mapping is broken or in
  182. * case of out-of-memory. In this case the contents of pgm are
  183. * undefined. Also parts of @data may have been copied to guest
  184. * space.
  185. * - a positive value if an access exception happened. In this case
  186. * the returned value is the program interruption code and the
  187. * contents of pgm may be used to inject an exception into the
  188. * guest. No data has been copied to guest space.
  189. *
  190. * Note: in case an access exception is recognized no data has been copied to
  191. * guest space (this is also true, if the to be copied data would cross
  192. * one or more page boundaries in guest space).
  193. * Therefore this function may be used for nullifying and suppressing
  194. * instruction emulation.
  195. * It may also be used for terminating instructions, if it is undefined
  196. * if data has been changed in guest space in case of an exception.
  197. */
  198. static inline __must_check
  199. int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
  200. unsigned long len)
  201. {
  202. return access_guest(vcpu, ga, ar, data, len, 1);
  203. }
  204. /**
  205. * read_guest - copy data from guest space to kernel space
  206. * @vcpu: virtual cpu
  207. * @ga: guest address
  208. * @ar: access register
  209. * @data: destination address in kernel space
  210. * @len: number of bytes to copy
  211. *
  212. * Copy @len bytes from @ga (guest address) to @data (kernel space).
  213. *
  214. * The behaviour of read_guest is identical to write_guest, except that
  215. * data will be copied from guest space to kernel space.
  216. */
  217. static inline __must_check
  218. int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
  219. unsigned long len)
  220. {
  221. return access_guest(vcpu, ga, ar, data, len, 0);
  222. }
  223. /**
  224. * write_guest_abs - copy data from kernel space to guest space absolute
  225. * @vcpu: virtual cpu
  226. * @gpa: guest physical (absolute) address
  227. * @data: source address in kernel space
  228. * @len: number of bytes to copy
  229. *
  230. * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
  231. * It is up to the caller to ensure that the entire guest memory range is
  232. * valid memory before calling this function.
  233. * Guest low address and key protection are not checked.
  234. *
  235. * Returns zero on success or -EFAULT on error.
  236. *
  237. * If an error occurs data may have been copied partially to guest memory.
  238. */
  239. static inline __must_check
  240. int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
  241. unsigned long len)
  242. {
  243. return kvm_write_guest(vcpu->kvm, gpa, data, len);
  244. }
  245. /**
  246. * read_guest_abs - copy data from guest space absolute to kernel space
  247. * @vcpu: virtual cpu
  248. * @gpa: guest physical (absolute) address
  249. * @data: destination address in kernel space
  250. * @len: number of bytes to copy
  251. *
  252. * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
  253. * It is up to the caller to ensure that the entire guest memory range is
  254. * valid memory before calling this function.
  255. * Guest key protection is not checked.
  256. *
  257. * Returns zero on success or -EFAULT on error.
  258. *
  259. * If an error occurs data may have been copied partially to kernel space.
  260. */
  261. static inline __must_check
  262. int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
  263. unsigned long len)
  264. {
  265. return kvm_read_guest(vcpu->kvm, gpa, data, len);
  266. }
  267. /**
  268. * write_guest_real - copy data from kernel space to guest space real
  269. * @vcpu: virtual cpu
  270. * @gra: guest real address
  271. * @data: source address in kernel space
  272. * @len: number of bytes to copy
  273. *
  274. * Copy @len bytes from @data (kernel space) to @gra (guest real address).
  275. * It is up to the caller to ensure that the entire guest memory range is
  276. * valid memory before calling this function.
  277. * Guest low address and key protection are not checked.
  278. *
  279. * Returns zero on success or -EFAULT on error.
  280. *
  281. * If an error occurs data may have been copied partially to guest memory.
  282. */
  283. static inline __must_check
  284. int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
  285. unsigned long len)
  286. {
  287. return access_guest_real(vcpu, gra, data, len, 1);
  288. }
  289. /**
  290. * read_guest_real - copy data from guest space real to kernel space
  291. * @vcpu: virtual cpu
  292. * @gra: guest real address
  293. * @data: destination address in kernel space
  294. * @len: number of bytes to copy
  295. *
  296. * Copy @len bytes from @gra (guest real address) to @data (kernel space).
  297. * It is up to the caller to ensure that the entire guest memory range is
  298. * valid memory before calling this function.
  299. * Guest key protection is not checked.
  300. *
  301. * Returns zero on success or -EFAULT on error.
  302. *
  303. * If an error occurs data may have been copied partially to kernel space.
  304. */
  305. static inline __must_check
  306. int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
  307. unsigned long len)
  308. {
  309. return access_guest_real(vcpu, gra, data, len, 0);
  310. }
  311. void ipte_lock(struct kvm_vcpu *vcpu);
  312. void ipte_unlock(struct kvm_vcpu *vcpu);
  313. int ipte_lock_held(struct kvm_vcpu *vcpu);
  314. int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
  315. #endif /* __KVM_S390_GACCESS_H */