guestdbg.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. * kvm guest debug support
  3. *
  4. * Copyright IBM Corp. 2014
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  11. */
  12. #include <linux/kvm_host.h>
  13. #include <linux/errno.h>
  14. #include "kvm-s390.h"
  15. #include "gaccess.h"
  16. /*
  17. * Extends the address range given by *start and *stop to include the address
  18. * range starting with estart and the length len. Takes care of overflowing
  19. * intervals and tries to minimize the overall intervall size.
  20. */
  21. static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
  22. {
  23. u64 estop;
  24. if (len > 0)
  25. len--;
  26. else
  27. len = 0;
  28. estop = estart + len;
  29. /* 0-0 range represents "not set" */
  30. if ((*start == 0) && (*stop == 0)) {
  31. *start = estart;
  32. *stop = estop;
  33. } else if (*start <= *stop) {
  34. /* increase the existing range */
  35. if (estart < *start)
  36. *start = estart;
  37. if (estop > *stop)
  38. *stop = estop;
  39. } else {
  40. /* "overflowing" interval, whereby *stop > *start */
  41. if (estart <= *stop) {
  42. if (estop > *stop)
  43. *stop = estop;
  44. } else if (estop > *start) {
  45. if (estart < *start)
  46. *start = estart;
  47. }
  48. /* minimize the range */
  49. else if ((estop - *stop) < (*start - estart))
  50. *stop = estop;
  51. else
  52. *start = estart;
  53. }
  54. }
  55. #define MAX_INST_SIZE 6
  56. static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
  57. {
  58. unsigned long start, len;
  59. u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
  60. u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
  61. u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
  62. int i;
  63. if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
  64. vcpu->arch.guestdbg.hw_bp_info == NULL)
  65. return;
  66. /*
  67. * If the guest is not interrested in branching events, we can savely
  68. * limit them to the PER address range.
  69. */
  70. if (!(*cr9 & PER_EVENT_BRANCH))
  71. *cr9 |= PER_CONTROL_BRANCH_ADDRESS;
  72. *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
  73. for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
  74. start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
  75. len = vcpu->arch.guestdbg.hw_bp_info[i].len;
  76. /*
  77. * The instruction in front of the desired bp has to
  78. * report instruction-fetching events
  79. */
  80. if (start < MAX_INST_SIZE) {
  81. len += start;
  82. start = 0;
  83. } else {
  84. start -= MAX_INST_SIZE;
  85. len += MAX_INST_SIZE;
  86. }
  87. extend_address_range(cr10, cr11, start, len);
  88. }
  89. }
  90. static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
  91. {
  92. unsigned long start, len;
  93. u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
  94. u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
  95. u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
  96. int i;
  97. if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
  98. vcpu->arch.guestdbg.hw_wp_info == NULL)
  99. return;
  100. /* if host uses storage alternation for special address
  101. * spaces, enable all events and give all to the guest */
  102. if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
  103. *cr9 &= ~PER_CONTROL_ALTERATION;
  104. *cr10 = 0;
  105. *cr11 = PSW_ADDR_INSN;
  106. } else {
  107. *cr9 &= ~PER_CONTROL_ALTERATION;
  108. *cr9 |= PER_EVENT_STORE;
  109. for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
  110. start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
  111. len = vcpu->arch.guestdbg.hw_wp_info[i].len;
  112. extend_address_range(cr10, cr11, start, len);
  113. }
  114. }
  115. }
  116. void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
  117. {
  118. vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
  119. vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
  120. vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
  121. vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
  122. }
  123. void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
  124. {
  125. vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
  126. vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
  127. vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
  128. vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
  129. }
  130. void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
  131. {
  132. /*
  133. * TODO: if guest psw has per enabled, otherwise 0s!
  134. * This reduces the amount of reported events.
  135. * Need to intercept all psw changes!
  136. */
  137. if (guestdbg_sstep_enabled(vcpu)) {
  138. /* disable timer (clock-comparator) interrupts */
  139. vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
  140. vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
  141. vcpu->arch.sie_block->gcr[10] = 0;
  142. vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
  143. }
  144. if (guestdbg_hw_bp_enabled(vcpu)) {
  145. enable_all_hw_bp(vcpu);
  146. enable_all_hw_wp(vcpu);
  147. }
  148. /* TODO: Instruction-fetching-nullification not allowed for now */
  149. if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
  150. vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
  151. }
  152. #define MAX_WP_SIZE 100
  153. static int __import_wp_info(struct kvm_vcpu *vcpu,
  154. struct kvm_hw_breakpoint *bp_data,
  155. struct kvm_hw_wp_info_arch *wp_info)
  156. {
  157. int ret = 0;
  158. wp_info->len = bp_data->len;
  159. wp_info->addr = bp_data->addr;
  160. wp_info->phys_addr = bp_data->phys_addr;
  161. wp_info->old_data = NULL;
  162. if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
  163. return -EINVAL;
  164. wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
  165. if (!wp_info->old_data)
  166. return -ENOMEM;
  167. /* try to backup the original value */
  168. ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
  169. wp_info->len);
  170. if (ret) {
  171. kfree(wp_info->old_data);
  172. wp_info->old_data = NULL;
  173. }
  174. return ret;
  175. }
  176. #define MAX_BP_COUNT 50
  177. int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
  178. struct kvm_guest_debug *dbg)
  179. {
  180. int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
  181. struct kvm_hw_breakpoint *bp_data = NULL;
  182. struct kvm_hw_wp_info_arch *wp_info = NULL;
  183. struct kvm_hw_bp_info_arch *bp_info = NULL;
  184. if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
  185. return 0;
  186. else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
  187. return -EINVAL;
  188. size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
  189. bp_data = kmalloc(size, GFP_KERNEL);
  190. if (!bp_data) {
  191. ret = -ENOMEM;
  192. goto error;
  193. }
  194. if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
  195. ret = -EFAULT;
  196. goto error;
  197. }
  198. for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
  199. switch (bp_data[i].type) {
  200. case KVM_HW_WP_WRITE:
  201. nr_wp++;
  202. break;
  203. case KVM_HW_BP:
  204. nr_bp++;
  205. break;
  206. default:
  207. break;
  208. }
  209. }
  210. size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
  211. if (size > 0) {
  212. wp_info = kmalloc(size, GFP_KERNEL);
  213. if (!wp_info) {
  214. ret = -ENOMEM;
  215. goto error;
  216. }
  217. }
  218. size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
  219. if (size > 0) {
  220. bp_info = kmalloc(size, GFP_KERNEL);
  221. if (!bp_info) {
  222. ret = -ENOMEM;
  223. goto error;
  224. }
  225. }
  226. for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
  227. switch (bp_data[i].type) {
  228. case KVM_HW_WP_WRITE:
  229. ret = __import_wp_info(vcpu, &bp_data[i],
  230. &wp_info[nr_wp]);
  231. if (ret)
  232. goto error;
  233. nr_wp++;
  234. break;
  235. case KVM_HW_BP:
  236. bp_info[nr_bp].len = bp_data[i].len;
  237. bp_info[nr_bp].addr = bp_data[i].addr;
  238. nr_bp++;
  239. break;
  240. }
  241. }
  242. vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
  243. vcpu->arch.guestdbg.hw_bp_info = bp_info;
  244. vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
  245. vcpu->arch.guestdbg.hw_wp_info = wp_info;
  246. return 0;
  247. error:
  248. kfree(bp_data);
  249. kfree(wp_info);
  250. kfree(bp_info);
  251. return ret;
  252. }
  253. void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
  254. {
  255. int i;
  256. struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
  257. for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
  258. hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
  259. kfree(hw_wp_info->old_data);
  260. hw_wp_info->old_data = NULL;
  261. }
  262. kfree(vcpu->arch.guestdbg.hw_wp_info);
  263. vcpu->arch.guestdbg.hw_wp_info = NULL;
  264. kfree(vcpu->arch.guestdbg.hw_bp_info);
  265. vcpu->arch.guestdbg.hw_bp_info = NULL;
  266. vcpu->arch.guestdbg.nr_hw_wp = 0;
  267. vcpu->arch.guestdbg.nr_hw_bp = 0;
  268. }
  269. static inline int in_addr_range(u64 addr, u64 a, u64 b)
  270. {
  271. if (a <= b)
  272. return (addr >= a) && (addr <= b);
  273. else
  274. /* "overflowing" interval */
  275. return (addr <= a) && (addr >= b);
  276. }
  277. #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
  278. static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
  279. unsigned long addr)
  280. {
  281. struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
  282. int i;
  283. if (vcpu->arch.guestdbg.nr_hw_bp == 0)
  284. return NULL;
  285. for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
  286. /* addr is directly the start or in the range of a bp */
  287. if (addr == bp_info->addr)
  288. goto found;
  289. if (bp_info->len > 0 &&
  290. in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
  291. goto found;
  292. bp_info++;
  293. }
  294. return NULL;
  295. found:
  296. return bp_info;
  297. }
  298. static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
  299. {
  300. int i;
  301. struct kvm_hw_wp_info_arch *wp_info = NULL;
  302. void *temp = NULL;
  303. if (vcpu->arch.guestdbg.nr_hw_wp == 0)
  304. return NULL;
  305. for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
  306. wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
  307. if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
  308. continue;
  309. temp = kmalloc(wp_info->len, GFP_KERNEL);
  310. if (!temp)
  311. continue;
  312. /* refetch the wp data and compare it to the old value */
  313. if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
  314. wp_info->len)) {
  315. if (memcmp(temp, wp_info->old_data, wp_info->len)) {
  316. kfree(temp);
  317. return wp_info;
  318. }
  319. }
  320. kfree(temp);
  321. temp = NULL;
  322. }
  323. return NULL;
  324. }
  325. void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
  326. {
  327. vcpu->run->exit_reason = KVM_EXIT_DEBUG;
  328. vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
  329. }
  330. #define per_bp_event(code) \
  331. (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
  332. #define per_write_wp_event(code) \
  333. (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
  334. static int debug_exit_required(struct kvm_vcpu *vcpu)
  335. {
  336. u32 perc = (vcpu->arch.sie_block->perc << 24);
  337. struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
  338. struct kvm_hw_wp_info_arch *wp_info = NULL;
  339. struct kvm_hw_bp_info_arch *bp_info = NULL;
  340. unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
  341. unsigned long peraddr = vcpu->arch.sie_block->peraddr;
  342. if (guestdbg_hw_bp_enabled(vcpu)) {
  343. if (per_write_wp_event(perc) &&
  344. vcpu->arch.guestdbg.nr_hw_wp > 0) {
  345. wp_info = any_wp_changed(vcpu);
  346. if (wp_info) {
  347. debug_exit->addr = wp_info->addr;
  348. debug_exit->type = KVM_HW_WP_WRITE;
  349. goto exit_required;
  350. }
  351. }
  352. if (per_bp_event(perc) &&
  353. vcpu->arch.guestdbg.nr_hw_bp > 0) {
  354. bp_info = find_hw_bp(vcpu, addr);
  355. /* remove duplicate events if PC==PER address */
  356. if (bp_info && (addr != peraddr)) {
  357. debug_exit->addr = addr;
  358. debug_exit->type = KVM_HW_BP;
  359. vcpu->arch.guestdbg.last_bp = addr;
  360. goto exit_required;
  361. }
  362. /* breakpoint missed */
  363. bp_info = find_hw_bp(vcpu, peraddr);
  364. if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
  365. debug_exit->addr = peraddr;
  366. debug_exit->type = KVM_HW_BP;
  367. goto exit_required;
  368. }
  369. }
  370. }
  371. if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
  372. debug_exit->addr = addr;
  373. debug_exit->type = KVM_SINGLESTEP;
  374. goto exit_required;
  375. }
  376. return 0;
  377. exit_required:
  378. return 1;
  379. }
  380. #define guest_per_enabled(vcpu) \
  381. (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
  382. static void filter_guest_per_event(struct kvm_vcpu *vcpu)
  383. {
  384. u32 perc = vcpu->arch.sie_block->perc << 24;
  385. u64 peraddr = vcpu->arch.sie_block->peraddr;
  386. u64 addr = vcpu->arch.sie_block->gpsw.addr;
  387. u64 cr9 = vcpu->arch.sie_block->gcr[9];
  388. u64 cr10 = vcpu->arch.sie_block->gcr[10];
  389. u64 cr11 = vcpu->arch.sie_block->gcr[11];
  390. /* filter all events, demanded by the guest */
  391. u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
  392. if (!guest_per_enabled(vcpu))
  393. guest_perc = 0;
  394. /* filter "successful-branching" events */
  395. if (guest_perc & PER_EVENT_BRANCH &&
  396. cr9 & PER_CONTROL_BRANCH_ADDRESS &&
  397. !in_addr_range(addr, cr10, cr11))
  398. guest_perc &= ~PER_EVENT_BRANCH;
  399. /* filter "instruction-fetching" events */
  400. if (guest_perc & PER_EVENT_IFETCH &&
  401. !in_addr_range(peraddr, cr10, cr11))
  402. guest_perc &= ~PER_EVENT_IFETCH;
  403. /* All other PER events will be given to the guest */
  404. /* TODO: Check alterated address/address space */
  405. vcpu->arch.sie_block->perc = guest_perc >> 24;
  406. if (!guest_perc)
  407. vcpu->arch.sie_block->iprcc &= ~PGM_PER;
  408. }
  409. #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
  410. #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
  411. #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
  412. #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
  413. void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
  414. {
  415. int new_as;
  416. if (debug_exit_required(vcpu))
  417. vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
  418. filter_guest_per_event(vcpu);
  419. /*
  420. * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
  421. * a space-switch event. PER events enforce space-switch events
  422. * for these instructions. So if no PER event for the guest is left,
  423. * we might have to filter the space-switch element out, too.
  424. */
  425. if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
  426. vcpu->arch.sie_block->iprcc = 0;
  427. new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  428. /*
  429. * If the AS changed from / to home, we had RP, SAC or SACF
  430. * instruction. Check primary and home space-switch-event
  431. * controls. (theoretically home -> home produced no event)
  432. */
  433. if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
  434. (pssec(vcpu) || hssec(vcpu)))
  435. vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
  436. /*
  437. * PT, PTI, PR, PC instruction operate on primary AS only. Check
  438. * if the primary-space-switch-event control was or got set.
  439. */
  440. if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
  441. (pssec(vcpu) || old_ssec(vcpu)))
  442. vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
  443. }
  444. }