vgic-v2-emul.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /*
  2. * Contains GICv2 specific emulation code, was in vgic.c before.
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/kvm.h>
  21. #include <linux/kvm_host.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/irqchip/arm-gic.h>
  26. #include <asm/kvm_emulate.h>
  27. #include <asm/kvm_arm.h>
  28. #include <asm/kvm_mmu.h>
  29. #include "vgic.h"
  30. #define GICC_ARCH_VERSION_V2 0x2
  31. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
  32. static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
  33. {
  34. return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
  35. }
  36. static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
  37. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  38. {
  39. u32 reg;
  40. u32 word_offset = offset & 3;
  41. switch (offset & ~3) {
  42. case 0: /* GICD_CTLR */
  43. reg = vcpu->kvm->arch.vgic.enabled;
  44. vgic_reg_access(mmio, &reg, word_offset,
  45. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  46. if (mmio->is_write) {
  47. vcpu->kvm->arch.vgic.enabled = reg & 1;
  48. vgic_update_state(vcpu->kvm);
  49. return true;
  50. }
  51. break;
  52. case 4: /* GICD_TYPER */
  53. reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  54. reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
  55. vgic_reg_access(mmio, &reg, word_offset,
  56. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  57. break;
  58. case 8: /* GICD_IIDR */
  59. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  60. vgic_reg_access(mmio, &reg, word_offset,
  61. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  62. break;
  63. }
  64. return false;
  65. }
  66. static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
  67. struct kvm_exit_mmio *mmio,
  68. phys_addr_t offset)
  69. {
  70. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  71. vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
  72. }
  73. static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
  74. struct kvm_exit_mmio *mmio,
  75. phys_addr_t offset)
  76. {
  77. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  78. vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
  79. }
  80. static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
  81. struct kvm_exit_mmio *mmio,
  82. phys_addr_t offset)
  83. {
  84. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  85. vcpu->vcpu_id);
  86. }
  87. static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
  88. struct kvm_exit_mmio *mmio,
  89. phys_addr_t offset)
  90. {
  91. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  92. vcpu->vcpu_id);
  93. }
  94. static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
  95. struct kvm_exit_mmio *mmio,
  96. phys_addr_t offset)
  97. {
  98. return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
  99. vcpu->vcpu_id);
  100. }
  101. static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
  102. struct kvm_exit_mmio *mmio,
  103. phys_addr_t offset)
  104. {
  105. return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
  106. vcpu->vcpu_id);
  107. }
  108. static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
  109. struct kvm_exit_mmio *mmio,
  110. phys_addr_t offset)
  111. {
  112. u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  113. vcpu->vcpu_id, offset);
  114. vgic_reg_access(mmio, reg, offset,
  115. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  116. return false;
  117. }
  118. #define GICD_ITARGETSR_SIZE 32
  119. #define GICD_CPUTARGETS_BITS 8
  120. #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
  121. static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
  122. {
  123. struct vgic_dist *dist = &kvm->arch.vgic;
  124. int i;
  125. u32 val = 0;
  126. irq -= VGIC_NR_PRIVATE_IRQS;
  127. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
  128. val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
  129. return val;
  130. }
  131. static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
  132. {
  133. struct vgic_dist *dist = &kvm->arch.vgic;
  134. struct kvm_vcpu *vcpu;
  135. int i, c;
  136. unsigned long *bmap;
  137. u32 target;
  138. irq -= VGIC_NR_PRIVATE_IRQS;
  139. /*
  140. * Pick the LSB in each byte. This ensures we target exactly
  141. * one vcpu per IRQ. If the byte is null, assume we target
  142. * CPU0.
  143. */
  144. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
  145. int shift = i * GICD_CPUTARGETS_BITS;
  146. target = ffs((val >> shift) & 0xffU);
  147. target = target ? (target - 1) : 0;
  148. dist->irq_spi_cpu[irq + i] = target;
  149. kvm_for_each_vcpu(c, vcpu, kvm) {
  150. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
  151. if (c == target)
  152. set_bit(irq + i, bmap);
  153. else
  154. clear_bit(irq + i, bmap);
  155. }
  156. }
  157. }
  158. static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
  159. struct kvm_exit_mmio *mmio,
  160. phys_addr_t offset)
  161. {
  162. u32 reg;
  163. /* We treat the banked interrupts targets as read-only */
  164. if (offset < 32) {
  165. u32 roreg;
  166. roreg = 1 << vcpu->vcpu_id;
  167. roreg |= roreg << 8;
  168. roreg |= roreg << 16;
  169. vgic_reg_access(mmio, &roreg, offset,
  170. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  171. return false;
  172. }
  173. reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
  174. vgic_reg_access(mmio, &reg, offset,
  175. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  176. if (mmio->is_write) {
  177. vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
  178. vgic_update_state(vcpu->kvm);
  179. return true;
  180. }
  181. return false;
  182. }
  183. static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
  184. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  185. {
  186. u32 *reg;
  187. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  188. vcpu->vcpu_id, offset >> 1);
  189. return vgic_handle_cfg_reg(reg, mmio, offset);
  190. }
  191. static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
  192. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  193. {
  194. u32 reg;
  195. vgic_reg_access(mmio, &reg, offset,
  196. ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
  197. if (mmio->is_write) {
  198. vgic_dispatch_sgi(vcpu, reg);
  199. vgic_update_state(vcpu->kvm);
  200. return true;
  201. }
  202. return false;
  203. }
  204. /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
  205. static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  206. struct kvm_exit_mmio *mmio,
  207. phys_addr_t offset)
  208. {
  209. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  210. int sgi;
  211. int min_sgi = (offset & ~0x3);
  212. int max_sgi = min_sgi + 3;
  213. int vcpu_id = vcpu->vcpu_id;
  214. u32 reg = 0;
  215. /* Copy source SGIs from distributor side */
  216. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  217. u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
  218. reg |= ((u32)sources) << (8 * (sgi - min_sgi));
  219. }
  220. mmio_data_write(mmio, ~0, reg);
  221. return false;
  222. }
  223. static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  224. struct kvm_exit_mmio *mmio,
  225. phys_addr_t offset, bool set)
  226. {
  227. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  228. int sgi;
  229. int min_sgi = (offset & ~0x3);
  230. int max_sgi = min_sgi + 3;
  231. int vcpu_id = vcpu->vcpu_id;
  232. u32 reg;
  233. bool updated = false;
  234. reg = mmio_data_read(mmio, ~0);
  235. /* Clear pending SGIs on the distributor */
  236. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  237. u8 mask = reg >> (8 * (sgi - min_sgi));
  238. u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
  239. if (set) {
  240. if ((*src & mask) != mask)
  241. updated = true;
  242. *src |= mask;
  243. } else {
  244. if (*src & mask)
  245. updated = true;
  246. *src &= ~mask;
  247. }
  248. }
  249. if (updated)
  250. vgic_update_state(vcpu->kvm);
  251. return updated;
  252. }
  253. static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
  254. struct kvm_exit_mmio *mmio,
  255. phys_addr_t offset)
  256. {
  257. if (!mmio->is_write)
  258. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  259. else
  260. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
  261. }
  262. static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
  263. struct kvm_exit_mmio *mmio,
  264. phys_addr_t offset)
  265. {
  266. if (!mmio->is_write)
  267. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  268. else
  269. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
  270. }
  271. static const struct vgic_io_range vgic_dist_ranges[] = {
  272. {
  273. .base = GIC_DIST_CTRL,
  274. .len = 12,
  275. .bits_per_irq = 0,
  276. .handle_mmio = handle_mmio_misc,
  277. },
  278. {
  279. .base = GIC_DIST_IGROUP,
  280. .len = VGIC_MAX_IRQS / 8,
  281. .bits_per_irq = 1,
  282. .handle_mmio = handle_mmio_raz_wi,
  283. },
  284. {
  285. .base = GIC_DIST_ENABLE_SET,
  286. .len = VGIC_MAX_IRQS / 8,
  287. .bits_per_irq = 1,
  288. .handle_mmio = handle_mmio_set_enable_reg,
  289. },
  290. {
  291. .base = GIC_DIST_ENABLE_CLEAR,
  292. .len = VGIC_MAX_IRQS / 8,
  293. .bits_per_irq = 1,
  294. .handle_mmio = handle_mmio_clear_enable_reg,
  295. },
  296. {
  297. .base = GIC_DIST_PENDING_SET,
  298. .len = VGIC_MAX_IRQS / 8,
  299. .bits_per_irq = 1,
  300. .handle_mmio = handle_mmio_set_pending_reg,
  301. },
  302. {
  303. .base = GIC_DIST_PENDING_CLEAR,
  304. .len = VGIC_MAX_IRQS / 8,
  305. .bits_per_irq = 1,
  306. .handle_mmio = handle_mmio_clear_pending_reg,
  307. },
  308. {
  309. .base = GIC_DIST_ACTIVE_SET,
  310. .len = VGIC_MAX_IRQS / 8,
  311. .bits_per_irq = 1,
  312. .handle_mmio = handle_mmio_set_active_reg,
  313. },
  314. {
  315. .base = GIC_DIST_ACTIVE_CLEAR,
  316. .len = VGIC_MAX_IRQS / 8,
  317. .bits_per_irq = 1,
  318. .handle_mmio = handle_mmio_clear_active_reg,
  319. },
  320. {
  321. .base = GIC_DIST_PRI,
  322. .len = VGIC_MAX_IRQS,
  323. .bits_per_irq = 8,
  324. .handle_mmio = handle_mmio_priority_reg,
  325. },
  326. {
  327. .base = GIC_DIST_TARGET,
  328. .len = VGIC_MAX_IRQS,
  329. .bits_per_irq = 8,
  330. .handle_mmio = handle_mmio_target_reg,
  331. },
  332. {
  333. .base = GIC_DIST_CONFIG,
  334. .len = VGIC_MAX_IRQS / 4,
  335. .bits_per_irq = 2,
  336. .handle_mmio = handle_mmio_cfg_reg,
  337. },
  338. {
  339. .base = GIC_DIST_SOFTINT,
  340. .len = 4,
  341. .handle_mmio = handle_mmio_sgi_reg,
  342. },
  343. {
  344. .base = GIC_DIST_SGI_PENDING_CLEAR,
  345. .len = VGIC_NR_SGIS,
  346. .handle_mmio = handle_mmio_sgi_clear,
  347. },
  348. {
  349. .base = GIC_DIST_SGI_PENDING_SET,
  350. .len = VGIC_NR_SGIS,
  351. .handle_mmio = handle_mmio_sgi_set,
  352. },
  353. {}
  354. };
  355. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
  356. {
  357. struct kvm *kvm = vcpu->kvm;
  358. struct vgic_dist *dist = &kvm->arch.vgic;
  359. int nrcpus = atomic_read(&kvm->online_vcpus);
  360. u8 target_cpus;
  361. int sgi, mode, c, vcpu_id;
  362. vcpu_id = vcpu->vcpu_id;
  363. sgi = reg & 0xf;
  364. target_cpus = (reg >> 16) & 0xff;
  365. mode = (reg >> 24) & 3;
  366. switch (mode) {
  367. case 0:
  368. if (!target_cpus)
  369. return;
  370. break;
  371. case 1:
  372. target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
  373. break;
  374. case 2:
  375. target_cpus = 1 << vcpu_id;
  376. break;
  377. }
  378. kvm_for_each_vcpu(c, vcpu, kvm) {
  379. if (target_cpus & 1) {
  380. /* Flag the SGI as pending */
  381. vgic_dist_irq_set_pending(vcpu, sgi);
  382. *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
  383. kvm_debug("SGI%d from CPU%d to CPU%d\n",
  384. sgi, vcpu_id, c);
  385. }
  386. target_cpus >>= 1;
  387. }
  388. }
  389. static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  390. {
  391. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  392. unsigned long sources;
  393. int vcpu_id = vcpu->vcpu_id;
  394. int c;
  395. sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
  396. for_each_set_bit(c, &sources, dist->nr_cpus) {
  397. if (vgic_queue_irq(vcpu, c, irq))
  398. clear_bit(c, &sources);
  399. }
  400. *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
  401. /*
  402. * If the sources bitmap has been cleared it means that we
  403. * could queue all the SGIs onto link registers (see the
  404. * clear_bit above), and therefore we are done with them in
  405. * our emulated gic and can get rid of them.
  406. */
  407. if (!sources) {
  408. vgic_dist_irq_clear_pending(vcpu, irq);
  409. vgic_cpu_irq_clear(vcpu, irq);
  410. return true;
  411. }
  412. return false;
  413. }
  414. /**
  415. * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
  416. * @kvm: pointer to the kvm struct
  417. *
  418. * Map the virtual CPU interface into the VM before running any VCPUs. We
  419. * can't do this at creation time, because user space must first set the
  420. * virtual CPU interface address in the guest physical address space.
  421. */
  422. static int vgic_v2_map_resources(struct kvm *kvm,
  423. const struct vgic_params *params)
  424. {
  425. struct vgic_dist *dist = &kvm->arch.vgic;
  426. int ret = 0;
  427. if (!irqchip_in_kernel(kvm))
  428. return 0;
  429. mutex_lock(&kvm->lock);
  430. if (vgic_ready(kvm))
  431. goto out;
  432. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  433. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  434. kvm_err("Need to set vgic cpu and dist addresses first\n");
  435. ret = -ENXIO;
  436. goto out;
  437. }
  438. vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
  439. KVM_VGIC_V2_DIST_SIZE,
  440. vgic_dist_ranges, -1, &dist->dist_iodev);
  441. /*
  442. * Initialize the vgic if this hasn't already been done on demand by
  443. * accessing the vgic state from userspace.
  444. */
  445. ret = vgic_init(kvm);
  446. if (ret) {
  447. kvm_err("Unable to allocate maps\n");
  448. goto out_unregister;
  449. }
  450. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  451. params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
  452. true);
  453. if (ret) {
  454. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  455. goto out_unregister;
  456. }
  457. dist->ready = true;
  458. goto out;
  459. out_unregister:
  460. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
  461. out:
  462. if (ret)
  463. kvm_vgic_destroy(kvm);
  464. mutex_unlock(&kvm->lock);
  465. return ret;
  466. }
  467. static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  468. {
  469. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  470. *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
  471. }
  472. static int vgic_v2_init_model(struct kvm *kvm)
  473. {
  474. int i;
  475. for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
  476. vgic_set_target_reg(kvm, 0, i);
  477. return 0;
  478. }
  479. void vgic_v2_init_emulation(struct kvm *kvm)
  480. {
  481. struct vgic_dist *dist = &kvm->arch.vgic;
  482. dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
  483. dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
  484. dist->vm_ops.init_model = vgic_v2_init_model;
  485. dist->vm_ops.map_resources = vgic_v2_map_resources;
  486. kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
  487. }
  488. static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
  489. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  490. {
  491. bool updated = false;
  492. struct vgic_vmcr vmcr;
  493. u32 *vmcr_field;
  494. u32 reg;
  495. vgic_get_vmcr(vcpu, &vmcr);
  496. switch (offset & ~0x3) {
  497. case GIC_CPU_CTRL:
  498. vmcr_field = &vmcr.ctlr;
  499. break;
  500. case GIC_CPU_PRIMASK:
  501. vmcr_field = &vmcr.pmr;
  502. break;
  503. case GIC_CPU_BINPOINT:
  504. vmcr_field = &vmcr.bpr;
  505. break;
  506. case GIC_CPU_ALIAS_BINPOINT:
  507. vmcr_field = &vmcr.abpr;
  508. break;
  509. default:
  510. BUG();
  511. }
  512. if (!mmio->is_write) {
  513. reg = *vmcr_field;
  514. mmio_data_write(mmio, ~0, reg);
  515. } else {
  516. reg = mmio_data_read(mmio, ~0);
  517. if (reg != *vmcr_field) {
  518. *vmcr_field = reg;
  519. vgic_set_vmcr(vcpu, &vmcr);
  520. updated = true;
  521. }
  522. }
  523. return updated;
  524. }
  525. static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
  526. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  527. {
  528. return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
  529. }
  530. static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
  531. struct kvm_exit_mmio *mmio,
  532. phys_addr_t offset)
  533. {
  534. u32 reg;
  535. if (mmio->is_write)
  536. return false;
  537. /* GICC_IIDR */
  538. reg = (PRODUCT_ID_KVM << 20) |
  539. (GICC_ARCH_VERSION_V2 << 16) |
  540. (IMPLEMENTER_ARM << 0);
  541. mmio_data_write(mmio, ~0, reg);
  542. return false;
  543. }
  544. /*
  545. * CPU Interface Register accesses - these are not accessed by the VM, but by
  546. * user space for saving and restoring VGIC state.
  547. */
  548. static const struct vgic_io_range vgic_cpu_ranges[] = {
  549. {
  550. .base = GIC_CPU_CTRL,
  551. .len = 12,
  552. .handle_mmio = handle_cpu_mmio_misc,
  553. },
  554. {
  555. .base = GIC_CPU_ALIAS_BINPOINT,
  556. .len = 4,
  557. .handle_mmio = handle_mmio_abpr,
  558. },
  559. {
  560. .base = GIC_CPU_ACTIVEPRIO,
  561. .len = 16,
  562. .handle_mmio = handle_mmio_raz_wi,
  563. },
  564. {
  565. .base = GIC_CPU_IDENT,
  566. .len = 4,
  567. .handle_mmio = handle_cpu_mmio_ident,
  568. },
  569. };
  570. static int vgic_attr_regs_access(struct kvm_device *dev,
  571. struct kvm_device_attr *attr,
  572. u32 *reg, bool is_write)
  573. {
  574. const struct vgic_io_range *r = NULL, *ranges;
  575. phys_addr_t offset;
  576. int ret, cpuid, c;
  577. struct kvm_vcpu *vcpu, *tmp_vcpu;
  578. struct vgic_dist *vgic;
  579. struct kvm_exit_mmio mmio;
  580. u32 data;
  581. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  582. cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
  583. KVM_DEV_ARM_VGIC_CPUID_SHIFT;
  584. mutex_lock(&dev->kvm->lock);
  585. ret = vgic_init(dev->kvm);
  586. if (ret)
  587. goto out;
  588. if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
  589. ret = -EINVAL;
  590. goto out;
  591. }
  592. vcpu = kvm_get_vcpu(dev->kvm, cpuid);
  593. vgic = &dev->kvm->arch.vgic;
  594. mmio.len = 4;
  595. mmio.is_write = is_write;
  596. mmio.data = &data;
  597. if (is_write)
  598. mmio_data_write(&mmio, ~0, *reg);
  599. switch (attr->group) {
  600. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  601. mmio.phys_addr = vgic->vgic_dist_base + offset;
  602. ranges = vgic_dist_ranges;
  603. break;
  604. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  605. mmio.phys_addr = vgic->vgic_cpu_base + offset;
  606. ranges = vgic_cpu_ranges;
  607. break;
  608. default:
  609. BUG();
  610. }
  611. r = vgic_find_range(ranges, 4, offset);
  612. if (unlikely(!r || !r->handle_mmio)) {
  613. ret = -ENXIO;
  614. goto out;
  615. }
  616. spin_lock(&vgic->lock);
  617. /*
  618. * Ensure that no other VCPU is running by checking the vcpu->cpu
  619. * field. If no other VPCUs are running we can safely access the VGIC
  620. * state, because even if another VPU is run after this point, that
  621. * VCPU will not touch the vgic state, because it will block on
  622. * getting the vgic->lock in kvm_vgic_sync_hwstate().
  623. */
  624. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
  625. if (unlikely(tmp_vcpu->cpu != -1)) {
  626. ret = -EBUSY;
  627. goto out_vgic_unlock;
  628. }
  629. }
  630. /*
  631. * Move all pending IRQs from the LRs on all VCPUs so the pending
  632. * state can be properly represented in the register state accessible
  633. * through this API.
  634. */
  635. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
  636. vgic_unqueue_irqs(tmp_vcpu);
  637. offset -= r->base;
  638. r->handle_mmio(vcpu, &mmio, offset);
  639. if (!is_write)
  640. *reg = mmio_data_read(&mmio, ~0);
  641. ret = 0;
  642. out_vgic_unlock:
  643. spin_unlock(&vgic->lock);
  644. out:
  645. mutex_unlock(&dev->kvm->lock);
  646. return ret;
  647. }
  648. static int vgic_v2_create(struct kvm_device *dev, u32 type)
  649. {
  650. return kvm_vgic_create(dev->kvm, type);
  651. }
  652. static void vgic_v2_destroy(struct kvm_device *dev)
  653. {
  654. kfree(dev);
  655. }
  656. static int vgic_v2_set_attr(struct kvm_device *dev,
  657. struct kvm_device_attr *attr)
  658. {
  659. int ret;
  660. ret = vgic_set_common_attr(dev, attr);
  661. if (ret != -ENXIO)
  662. return ret;
  663. switch (attr->group) {
  664. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  665. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  666. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  667. u32 reg;
  668. if (get_user(reg, uaddr))
  669. return -EFAULT;
  670. return vgic_attr_regs_access(dev, attr, &reg, true);
  671. }
  672. }
  673. return -ENXIO;
  674. }
  675. static int vgic_v2_get_attr(struct kvm_device *dev,
  676. struct kvm_device_attr *attr)
  677. {
  678. int ret;
  679. ret = vgic_get_common_attr(dev, attr);
  680. if (ret != -ENXIO)
  681. return ret;
  682. switch (attr->group) {
  683. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  684. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  685. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  686. u32 reg = 0;
  687. ret = vgic_attr_regs_access(dev, attr, &reg, false);
  688. if (ret)
  689. return ret;
  690. return put_user(reg, uaddr);
  691. }
  692. }
  693. return -ENXIO;
  694. }
  695. static int vgic_v2_has_attr(struct kvm_device *dev,
  696. struct kvm_device_attr *attr)
  697. {
  698. phys_addr_t offset;
  699. switch (attr->group) {
  700. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  701. switch (attr->attr) {
  702. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  703. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  704. return 0;
  705. }
  706. break;
  707. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  708. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  709. return vgic_has_attr_regs(vgic_dist_ranges, offset);
  710. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  711. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  712. return vgic_has_attr_regs(vgic_cpu_ranges, offset);
  713. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  714. return 0;
  715. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  716. switch (attr->attr) {
  717. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  718. return 0;
  719. }
  720. }
  721. return -ENXIO;
  722. }
  723. struct kvm_device_ops kvm_arm_vgic_v2_ops = {
  724. .name = "kvm-arm-vgic-v2",
  725. .create = vgic_v2_create,
  726. .destroy = vgic_v2_destroy,
  727. .set_attr = vgic_v2_set_attr,
  728. .get_attr = vgic_v2_get_attr,
  729. .has_attr = vgic_v2_has_attr,
  730. };