regmap.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/emulate.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/mm.h>
  22. #include <linux/kvm_host.h>
  23. #include <asm/kvm_emulate.h>
  24. #include <asm/ptrace.h>
  25. #define VCPU_NR_MODES 6
  26. #define REG_OFFSET(_reg) \
  27. (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
  28. #define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
  29. static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
  30. /* USR Registers */
  31. {
  32. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  33. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  34. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  35. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  36. USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  37. REG_OFFSET(pc)
  38. },
  39. /* FIQ Registers */
  40. {
  41. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  42. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  43. USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  44. REG_OFFSET(compat_r8_fiq), /* r8 */
  45. REG_OFFSET(compat_r9_fiq), /* r9 */
  46. REG_OFFSET(compat_r10_fiq), /* r10 */
  47. REG_OFFSET(compat_r11_fiq), /* r11 */
  48. REG_OFFSET(compat_r12_fiq), /* r12 */
  49. REG_OFFSET(compat_sp_fiq), /* r13 */
  50. REG_OFFSET(compat_lr_fiq), /* r14 */
  51. REG_OFFSET(pc)
  52. },
  53. /* IRQ Registers */
  54. {
  55. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  56. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  57. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  58. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  59. USR_REG_OFFSET(12),
  60. REG_OFFSET(compat_sp_irq), /* r13 */
  61. REG_OFFSET(compat_lr_irq), /* r14 */
  62. REG_OFFSET(pc)
  63. },
  64. /* SVC Registers */
  65. {
  66. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  67. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  68. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  69. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  70. USR_REG_OFFSET(12),
  71. REG_OFFSET(compat_sp_svc), /* r13 */
  72. REG_OFFSET(compat_lr_svc), /* r14 */
  73. REG_OFFSET(pc)
  74. },
  75. /* ABT Registers */
  76. {
  77. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  78. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  79. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  80. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  81. USR_REG_OFFSET(12),
  82. REG_OFFSET(compat_sp_abt), /* r13 */
  83. REG_OFFSET(compat_lr_abt), /* r14 */
  84. REG_OFFSET(pc)
  85. },
  86. /* UND Registers */
  87. {
  88. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  89. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  90. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  91. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  92. USR_REG_OFFSET(12),
  93. REG_OFFSET(compat_sp_und), /* r13 */
  94. REG_OFFSET(compat_lr_und), /* r14 */
  95. REG_OFFSET(pc)
  96. },
  97. };
  98. /*
  99. * Return a pointer to the register number valid in the current mode of
  100. * the virtual CPU.
  101. */
  102. unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
  103. {
  104. unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
  105. unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
  106. switch (mode) {
  107. case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
  108. mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
  109. break;
  110. case COMPAT_PSR_MODE_ABT:
  111. mode = 4;
  112. break;
  113. case COMPAT_PSR_MODE_UND:
  114. mode = 5;
  115. break;
  116. case COMPAT_PSR_MODE_SYS:
  117. mode = 0; /* SYS maps to USR */
  118. break;
  119. default:
  120. BUG();
  121. }
  122. return reg_array + vcpu_reg_offsets[mode][reg_num];
  123. }
  124. /*
  125. * Return the SPSR for the current mode of the virtual CPU.
  126. */
  127. unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
  128. {
  129. unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
  130. switch (mode) {
  131. case COMPAT_PSR_MODE_SVC:
  132. mode = KVM_SPSR_SVC;
  133. break;
  134. case COMPAT_PSR_MODE_ABT:
  135. mode = KVM_SPSR_ABT;
  136. break;
  137. case COMPAT_PSR_MODE_UND:
  138. mode = KVM_SPSR_UND;
  139. break;
  140. case COMPAT_PSR_MODE_IRQ:
  141. mode = KVM_SPSR_IRQ;
  142. break;
  143. case COMPAT_PSR_MODE_FIQ:
  144. mode = KVM_SPSR_FIQ;
  145. break;
  146. default:
  147. BUG();
  148. }
  149. return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
  150. }