vgic-v3-emul.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /*
  2. * GICv3 distributor and redistributor emulation
  3. *
  4. * GICv3 emulation is currently only supported on a GICv3 host (because
  5. * we rely on the hardware's CPU interface virtualization support), but
  6. * supports both hardware with or without the optional GICv2 backwards
  7. * compatibility features.
  8. *
  9. * Limitations of the emulation:
  10. * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
  11. * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
  12. * - We do not support the message based interrupts (MBIs) triggered by
  13. * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
  14. * - We do not support the (optional) backwards compatibility feature.
  15. * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
  16. * the compatiblity feature, you can use a GICv2 in the guest, though.
  17. * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
  18. * - Priorities are not emulated (same as the GICv2 emulation). Linux
  19. * as a guest is fine with this, because it does not use priorities.
  20. * - We only support Group1 interrupts. Again Linux uses only those.
  21. *
  22. * Copyright (C) 2014 ARM Ltd.
  23. * Author: Andre Przywara <andre.przywara@arm.com>
  24. *
  25. * This program is free software; you can redistribute it and/or modify
  26. * it under the terms of the GNU General Public License version 2 as
  27. * published by the Free Software Foundation.
  28. *
  29. * This program is distributed in the hope that it will be useful,
  30. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  31. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  32. * GNU General Public License for more details.
  33. *
  34. * You should have received a copy of the GNU General Public License
  35. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  36. */
  37. #include <linux/cpu.h>
  38. #include <linux/kvm.h>
  39. #include <linux/kvm_host.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/irqchip/arm-gic-v3.h>
  42. #include <kvm/arm_vgic.h>
  43. #include <asm/kvm_emulate.h>
  44. #include <asm/kvm_arm.h>
  45. #include <asm/kvm_mmu.h>
  46. #include "vgic.h"
  47. static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
  48. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  49. {
  50. u32 reg = 0xffffffff;
  51. vgic_reg_access(mmio, &reg, offset,
  52. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  53. return false;
  54. }
  55. static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
  56. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  57. {
  58. u32 reg = 0;
  59. /*
  60. * Force ARE and DS to 1, the guest cannot change this.
  61. * For the time being we only support Group1 interrupts.
  62. */
  63. if (vcpu->kvm->arch.vgic.enabled)
  64. reg = GICD_CTLR_ENABLE_SS_G1;
  65. reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
  66. vgic_reg_access(mmio, &reg, offset,
  67. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  68. if (mmio->is_write) {
  69. vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
  70. vgic_update_state(vcpu->kvm);
  71. return true;
  72. }
  73. return false;
  74. }
  75. /*
  76. * As this implementation does not provide compatibility
  77. * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
  78. * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
  79. * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
  80. */
  81. #define INTERRUPT_ID_BITS 10
  82. static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
  83. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  84. {
  85. u32 reg;
  86. reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
  87. reg |= (INTERRUPT_ID_BITS - 1) << 19;
  88. vgic_reg_access(mmio, &reg, offset,
  89. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  90. return false;
  91. }
  92. static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
  93. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  94. {
  95. u32 reg;
  96. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  97. vgic_reg_access(mmio, &reg, offset,
  98. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  99. return false;
  100. }
  101. static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
  102. struct kvm_exit_mmio *mmio,
  103. phys_addr_t offset)
  104. {
  105. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  106. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  107. vcpu->vcpu_id,
  108. ACCESS_WRITE_SETBIT);
  109. vgic_reg_access(mmio, NULL, offset,
  110. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  111. return false;
  112. }
  113. static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
  114. struct kvm_exit_mmio *mmio,
  115. phys_addr_t offset)
  116. {
  117. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  118. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  119. vcpu->vcpu_id,
  120. ACCESS_WRITE_CLEARBIT);
  121. vgic_reg_access(mmio, NULL, offset,
  122. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  123. return false;
  124. }
  125. static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
  126. struct kvm_exit_mmio *mmio,
  127. phys_addr_t offset)
  128. {
  129. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  130. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  131. vcpu->vcpu_id);
  132. vgic_reg_access(mmio, NULL, offset,
  133. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  134. return false;
  135. }
  136. static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
  137. struct kvm_exit_mmio *mmio,
  138. phys_addr_t offset)
  139. {
  140. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  141. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  142. vcpu->vcpu_id);
  143. vgic_reg_access(mmio, NULL, offset,
  144. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  145. return false;
  146. }
  147. static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu *vcpu,
  148. struct kvm_exit_mmio *mmio,
  149. phys_addr_t offset)
  150. {
  151. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  152. return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
  153. vcpu->vcpu_id);
  154. vgic_reg_access(mmio, NULL, offset,
  155. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  156. return false;
  157. }
  158. static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu *vcpu,
  159. struct kvm_exit_mmio *mmio,
  160. phys_addr_t offset)
  161. {
  162. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  163. return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
  164. vcpu->vcpu_id);
  165. vgic_reg_access(mmio, NULL, offset,
  166. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  167. return false;
  168. }
  169. static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
  170. struct kvm_exit_mmio *mmio,
  171. phys_addr_t offset)
  172. {
  173. u32 *reg;
  174. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
  175. vgic_reg_access(mmio, NULL, offset,
  176. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  177. return false;
  178. }
  179. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  180. vcpu->vcpu_id, offset);
  181. vgic_reg_access(mmio, reg, offset,
  182. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  183. return false;
  184. }
  185. static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
  186. struct kvm_exit_mmio *mmio,
  187. phys_addr_t offset)
  188. {
  189. u32 *reg;
  190. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
  191. vgic_reg_access(mmio, NULL, offset,
  192. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  193. return false;
  194. }
  195. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  196. vcpu->vcpu_id, offset >> 1);
  197. return vgic_handle_cfg_reg(reg, mmio, offset);
  198. }
  199. /*
  200. * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
  201. * when we store the target MPIDR written by the guest.
  202. */
  203. static u32 compress_mpidr(unsigned long mpidr)
  204. {
  205. u32 ret;
  206. ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  207. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
  208. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
  209. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
  210. return ret;
  211. }
  212. static unsigned long uncompress_mpidr(u32 value)
  213. {
  214. unsigned long mpidr;
  215. mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
  216. mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
  217. mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
  218. mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
  219. return mpidr;
  220. }
  221. /*
  222. * Lookup the given MPIDR value to get the vcpu_id (if there is one)
  223. * and store that in the irq_spi_cpu[] array.
  224. * This limits the number of VCPUs to 255 for now, extending the data
  225. * type (or storing kvm_vcpu pointers) should lift the limit.
  226. * Store the original MPIDR value in an extra array to support read-as-written.
  227. * Unallocated MPIDRs are translated to a special value and caught
  228. * before any array accesses.
  229. */
  230. static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
  231. struct kvm_exit_mmio *mmio,
  232. phys_addr_t offset)
  233. {
  234. struct kvm *kvm = vcpu->kvm;
  235. struct vgic_dist *dist = &kvm->arch.vgic;
  236. int spi;
  237. u32 reg;
  238. int vcpu_id;
  239. unsigned long *bmap, mpidr;
  240. /*
  241. * The upper 32 bits of each 64 bit register are zero,
  242. * as we don't support Aff3.
  243. */
  244. if ((offset & 4)) {
  245. vgic_reg_access(mmio, NULL, offset,
  246. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  247. return false;
  248. }
  249. /* This region only covers SPIs, so no handling of private IRQs here. */
  250. spi = offset / 8;
  251. /* get the stored MPIDR for this IRQ */
  252. mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
  253. reg = mpidr;
  254. vgic_reg_access(mmio, &reg, offset,
  255. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  256. if (!mmio->is_write)
  257. return false;
  258. /*
  259. * Now clear the currently assigned vCPU from the map, making room
  260. * for the new one to be written below
  261. */
  262. vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
  263. if (likely(vcpu)) {
  264. vcpu_id = vcpu->vcpu_id;
  265. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  266. __clear_bit(spi, bmap);
  267. }
  268. dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
  269. vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
  270. /*
  271. * The spec says that non-existent MPIDR values should not be
  272. * forwarded to any existent (v)CPU, but should be able to become
  273. * pending anyway. We simply keep the irq_spi_target[] array empty, so
  274. * the interrupt will never be injected.
  275. * irq_spi_cpu[irq] gets a magic value in this case.
  276. */
  277. if (likely(vcpu)) {
  278. vcpu_id = vcpu->vcpu_id;
  279. dist->irq_spi_cpu[spi] = vcpu_id;
  280. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  281. __set_bit(spi, bmap);
  282. } else {
  283. dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
  284. }
  285. vgic_update_state(kvm);
  286. return true;
  287. }
  288. /*
  289. * We should be careful about promising too much when a guest reads
  290. * this register. Don't claim to be like any hardware implementation,
  291. * but just report the GIC as version 3 - which is what a Linux guest
  292. * would check.
  293. */
  294. static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
  295. struct kvm_exit_mmio *mmio,
  296. phys_addr_t offset)
  297. {
  298. u32 reg = 0;
  299. switch (offset + GICD_IDREGS) {
  300. case GICD_PIDR2:
  301. reg = 0x3b;
  302. break;
  303. }
  304. vgic_reg_access(mmio, &reg, offset,
  305. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  306. return false;
  307. }
  308. static const struct vgic_io_range vgic_v3_dist_ranges[] = {
  309. {
  310. .base = GICD_CTLR,
  311. .len = 0x04,
  312. .bits_per_irq = 0,
  313. .handle_mmio = handle_mmio_ctlr,
  314. },
  315. {
  316. .base = GICD_TYPER,
  317. .len = 0x04,
  318. .bits_per_irq = 0,
  319. .handle_mmio = handle_mmio_typer,
  320. },
  321. {
  322. .base = GICD_IIDR,
  323. .len = 0x04,
  324. .bits_per_irq = 0,
  325. .handle_mmio = handle_mmio_iidr,
  326. },
  327. {
  328. /* this register is optional, it is RAZ/WI if not implemented */
  329. .base = GICD_STATUSR,
  330. .len = 0x04,
  331. .bits_per_irq = 0,
  332. .handle_mmio = handle_mmio_raz_wi,
  333. },
  334. {
  335. /* this write only register is WI when TYPER.MBIS=0 */
  336. .base = GICD_SETSPI_NSR,
  337. .len = 0x04,
  338. .bits_per_irq = 0,
  339. .handle_mmio = handle_mmio_raz_wi,
  340. },
  341. {
  342. /* this write only register is WI when TYPER.MBIS=0 */
  343. .base = GICD_CLRSPI_NSR,
  344. .len = 0x04,
  345. .bits_per_irq = 0,
  346. .handle_mmio = handle_mmio_raz_wi,
  347. },
  348. {
  349. /* this is RAZ/WI when DS=1 */
  350. .base = GICD_SETSPI_SR,
  351. .len = 0x04,
  352. .bits_per_irq = 0,
  353. .handle_mmio = handle_mmio_raz_wi,
  354. },
  355. {
  356. /* this is RAZ/WI when DS=1 */
  357. .base = GICD_CLRSPI_SR,
  358. .len = 0x04,
  359. .bits_per_irq = 0,
  360. .handle_mmio = handle_mmio_raz_wi,
  361. },
  362. {
  363. .base = GICD_IGROUPR,
  364. .len = 0x80,
  365. .bits_per_irq = 1,
  366. .handle_mmio = handle_mmio_rao_wi,
  367. },
  368. {
  369. .base = GICD_ISENABLER,
  370. .len = 0x80,
  371. .bits_per_irq = 1,
  372. .handle_mmio = handle_mmio_set_enable_reg_dist,
  373. },
  374. {
  375. .base = GICD_ICENABLER,
  376. .len = 0x80,
  377. .bits_per_irq = 1,
  378. .handle_mmio = handle_mmio_clear_enable_reg_dist,
  379. },
  380. {
  381. .base = GICD_ISPENDR,
  382. .len = 0x80,
  383. .bits_per_irq = 1,
  384. .handle_mmio = handle_mmio_set_pending_reg_dist,
  385. },
  386. {
  387. .base = GICD_ICPENDR,
  388. .len = 0x80,
  389. .bits_per_irq = 1,
  390. .handle_mmio = handle_mmio_clear_pending_reg_dist,
  391. },
  392. {
  393. .base = GICD_ISACTIVER,
  394. .len = 0x80,
  395. .bits_per_irq = 1,
  396. .handle_mmio = handle_mmio_set_active_reg_dist,
  397. },
  398. {
  399. .base = GICD_ICACTIVER,
  400. .len = 0x80,
  401. .bits_per_irq = 1,
  402. .handle_mmio = handle_mmio_clear_active_reg_dist,
  403. },
  404. {
  405. .base = GICD_IPRIORITYR,
  406. .len = 0x400,
  407. .bits_per_irq = 8,
  408. .handle_mmio = handle_mmio_priority_reg_dist,
  409. },
  410. {
  411. /* TARGETSRn is RES0 when ARE=1 */
  412. .base = GICD_ITARGETSR,
  413. .len = 0x400,
  414. .bits_per_irq = 8,
  415. .handle_mmio = handle_mmio_raz_wi,
  416. },
  417. {
  418. .base = GICD_ICFGR,
  419. .len = 0x100,
  420. .bits_per_irq = 2,
  421. .handle_mmio = handle_mmio_cfg_reg_dist,
  422. },
  423. {
  424. /* this is RAZ/WI when DS=1 */
  425. .base = GICD_IGRPMODR,
  426. .len = 0x80,
  427. .bits_per_irq = 1,
  428. .handle_mmio = handle_mmio_raz_wi,
  429. },
  430. {
  431. /* this is RAZ/WI when DS=1 */
  432. .base = GICD_NSACR,
  433. .len = 0x100,
  434. .bits_per_irq = 2,
  435. .handle_mmio = handle_mmio_raz_wi,
  436. },
  437. {
  438. /* this is RAZ/WI when ARE=1 */
  439. .base = GICD_SGIR,
  440. .len = 0x04,
  441. .handle_mmio = handle_mmio_raz_wi,
  442. },
  443. {
  444. /* this is RAZ/WI when ARE=1 */
  445. .base = GICD_CPENDSGIR,
  446. .len = 0x10,
  447. .handle_mmio = handle_mmio_raz_wi,
  448. },
  449. {
  450. /* this is RAZ/WI when ARE=1 */
  451. .base = GICD_SPENDSGIR,
  452. .len = 0x10,
  453. .handle_mmio = handle_mmio_raz_wi,
  454. },
  455. {
  456. .base = GICD_IROUTER + 0x100,
  457. .len = 0x1ee0,
  458. .bits_per_irq = 64,
  459. .handle_mmio = handle_mmio_route_reg,
  460. },
  461. {
  462. .base = GICD_IDREGS,
  463. .len = 0x30,
  464. .bits_per_irq = 0,
  465. .handle_mmio = handle_mmio_idregs,
  466. },
  467. {},
  468. };
  469. static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
  470. struct kvm_exit_mmio *mmio,
  471. phys_addr_t offset)
  472. {
  473. /* since we don't support LPIs, this register is zero for now */
  474. vgic_reg_access(mmio, NULL, offset,
  475. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  476. return false;
  477. }
  478. static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
  479. struct kvm_exit_mmio *mmio,
  480. phys_addr_t offset)
  481. {
  482. u32 reg;
  483. u64 mpidr;
  484. struct kvm_vcpu *redist_vcpu = mmio->private;
  485. int target_vcpu_id = redist_vcpu->vcpu_id;
  486. /* the upper 32 bits contain the affinity value */
  487. if ((offset & ~3) == 4) {
  488. mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
  489. reg = compress_mpidr(mpidr);
  490. vgic_reg_access(mmio, &reg, offset,
  491. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  492. return false;
  493. }
  494. reg = redist_vcpu->vcpu_id << 8;
  495. if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
  496. reg |= GICR_TYPER_LAST;
  497. vgic_reg_access(mmio, &reg, offset,
  498. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  499. return false;
  500. }
  501. static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
  502. struct kvm_exit_mmio *mmio,
  503. phys_addr_t offset)
  504. {
  505. struct kvm_vcpu *redist_vcpu = mmio->private;
  506. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  507. redist_vcpu->vcpu_id,
  508. ACCESS_WRITE_SETBIT);
  509. }
  510. static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
  511. struct kvm_exit_mmio *mmio,
  512. phys_addr_t offset)
  513. {
  514. struct kvm_vcpu *redist_vcpu = mmio->private;
  515. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  516. redist_vcpu->vcpu_id,
  517. ACCESS_WRITE_CLEARBIT);
  518. }
  519. static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu *vcpu,
  520. struct kvm_exit_mmio *mmio,
  521. phys_addr_t offset)
  522. {
  523. struct kvm_vcpu *redist_vcpu = mmio->private;
  524. return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
  525. redist_vcpu->vcpu_id);
  526. }
  527. static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu *vcpu,
  528. struct kvm_exit_mmio *mmio,
  529. phys_addr_t offset)
  530. {
  531. struct kvm_vcpu *redist_vcpu = mmio->private;
  532. return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
  533. redist_vcpu->vcpu_id);
  534. }
  535. static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
  536. struct kvm_exit_mmio *mmio,
  537. phys_addr_t offset)
  538. {
  539. struct kvm_vcpu *redist_vcpu = mmio->private;
  540. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  541. redist_vcpu->vcpu_id);
  542. }
  543. static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
  544. struct kvm_exit_mmio *mmio,
  545. phys_addr_t offset)
  546. {
  547. struct kvm_vcpu *redist_vcpu = mmio->private;
  548. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  549. redist_vcpu->vcpu_id);
  550. }
  551. static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
  552. struct kvm_exit_mmio *mmio,
  553. phys_addr_t offset)
  554. {
  555. struct kvm_vcpu *redist_vcpu = mmio->private;
  556. u32 *reg;
  557. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  558. redist_vcpu->vcpu_id, offset);
  559. vgic_reg_access(mmio, reg, offset,
  560. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  561. return false;
  562. }
  563. static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
  564. struct kvm_exit_mmio *mmio,
  565. phys_addr_t offset)
  566. {
  567. struct kvm_vcpu *redist_vcpu = mmio->private;
  568. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  569. redist_vcpu->vcpu_id, offset >> 1);
  570. return vgic_handle_cfg_reg(reg, mmio, offset);
  571. }
  572. #define SGI_base(x) ((x) + SZ_64K)
  573. static const struct vgic_io_range vgic_redist_ranges[] = {
  574. {
  575. .base = GICR_CTLR,
  576. .len = 0x04,
  577. .bits_per_irq = 0,
  578. .handle_mmio = handle_mmio_ctlr_redist,
  579. },
  580. {
  581. .base = GICR_TYPER,
  582. .len = 0x08,
  583. .bits_per_irq = 0,
  584. .handle_mmio = handle_mmio_typer_redist,
  585. },
  586. {
  587. .base = GICR_IIDR,
  588. .len = 0x04,
  589. .bits_per_irq = 0,
  590. .handle_mmio = handle_mmio_iidr,
  591. },
  592. {
  593. .base = GICR_WAKER,
  594. .len = 0x04,
  595. .bits_per_irq = 0,
  596. .handle_mmio = handle_mmio_raz_wi,
  597. },
  598. {
  599. .base = GICR_IDREGS,
  600. .len = 0x30,
  601. .bits_per_irq = 0,
  602. .handle_mmio = handle_mmio_idregs,
  603. },
  604. {
  605. .base = SGI_base(GICR_IGROUPR0),
  606. .len = 0x04,
  607. .bits_per_irq = 1,
  608. .handle_mmio = handle_mmio_rao_wi,
  609. },
  610. {
  611. .base = SGI_base(GICR_ISENABLER0),
  612. .len = 0x04,
  613. .bits_per_irq = 1,
  614. .handle_mmio = handle_mmio_set_enable_reg_redist,
  615. },
  616. {
  617. .base = SGI_base(GICR_ICENABLER0),
  618. .len = 0x04,
  619. .bits_per_irq = 1,
  620. .handle_mmio = handle_mmio_clear_enable_reg_redist,
  621. },
  622. {
  623. .base = SGI_base(GICR_ISPENDR0),
  624. .len = 0x04,
  625. .bits_per_irq = 1,
  626. .handle_mmio = handle_mmio_set_pending_reg_redist,
  627. },
  628. {
  629. .base = SGI_base(GICR_ICPENDR0),
  630. .len = 0x04,
  631. .bits_per_irq = 1,
  632. .handle_mmio = handle_mmio_clear_pending_reg_redist,
  633. },
  634. {
  635. .base = SGI_base(GICR_ISACTIVER0),
  636. .len = 0x04,
  637. .bits_per_irq = 1,
  638. .handle_mmio = handle_mmio_set_active_reg_redist,
  639. },
  640. {
  641. .base = SGI_base(GICR_ICACTIVER0),
  642. .len = 0x04,
  643. .bits_per_irq = 1,
  644. .handle_mmio = handle_mmio_clear_active_reg_redist,
  645. },
  646. {
  647. .base = SGI_base(GICR_IPRIORITYR0),
  648. .len = 0x20,
  649. .bits_per_irq = 8,
  650. .handle_mmio = handle_mmio_priority_reg_redist,
  651. },
  652. {
  653. .base = SGI_base(GICR_ICFGR0),
  654. .len = 0x08,
  655. .bits_per_irq = 2,
  656. .handle_mmio = handle_mmio_cfg_reg_redist,
  657. },
  658. {
  659. .base = SGI_base(GICR_IGRPMODR0),
  660. .len = 0x04,
  661. .bits_per_irq = 1,
  662. .handle_mmio = handle_mmio_raz_wi,
  663. },
  664. {
  665. .base = SGI_base(GICR_NSACR),
  666. .len = 0x04,
  667. .handle_mmio = handle_mmio_raz_wi,
  668. },
  669. {},
  670. };
  671. static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  672. {
  673. if (vgic_queue_irq(vcpu, 0, irq)) {
  674. vgic_dist_irq_clear_pending(vcpu, irq);
  675. vgic_cpu_irq_clear(vcpu, irq);
  676. return true;
  677. }
  678. return false;
  679. }
  680. static int vgic_v3_map_resources(struct kvm *kvm,
  681. const struct vgic_params *params)
  682. {
  683. int ret = 0;
  684. struct vgic_dist *dist = &kvm->arch.vgic;
  685. gpa_t rdbase = dist->vgic_redist_base;
  686. struct vgic_io_device *iodevs = NULL;
  687. int i;
  688. if (!irqchip_in_kernel(kvm))
  689. return 0;
  690. mutex_lock(&kvm->lock);
  691. if (vgic_ready(kvm))
  692. goto out;
  693. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  694. IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
  695. kvm_err("Need to set vgic distributor addresses first\n");
  696. ret = -ENXIO;
  697. goto out;
  698. }
  699. /*
  700. * For a VGICv3 we require the userland to explicitly initialize
  701. * the VGIC before we need to use it.
  702. */
  703. if (!vgic_initialized(kvm)) {
  704. ret = -EBUSY;
  705. goto out;
  706. }
  707. ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
  708. GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
  709. -1, &dist->dist_iodev);
  710. if (ret)
  711. goto out;
  712. iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
  713. if (!iodevs) {
  714. ret = -ENOMEM;
  715. goto out_unregister;
  716. }
  717. for (i = 0; i < dist->nr_cpus; i++) {
  718. ret = vgic_register_kvm_io_dev(kvm, rdbase,
  719. SZ_128K, vgic_redist_ranges,
  720. i, &iodevs[i]);
  721. if (ret)
  722. goto out_unregister;
  723. rdbase += GIC_V3_REDIST_SIZE;
  724. }
  725. dist->redist_iodevs = iodevs;
  726. dist->ready = true;
  727. goto out;
  728. out_unregister:
  729. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
  730. if (iodevs) {
  731. for (i = 0; i < dist->nr_cpus; i++) {
  732. if (iodevs[i].dev.ops)
  733. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
  734. &iodevs[i].dev);
  735. }
  736. }
  737. out:
  738. if (ret)
  739. kvm_vgic_destroy(kvm);
  740. mutex_unlock(&kvm->lock);
  741. return ret;
  742. }
  743. static int vgic_v3_init_model(struct kvm *kvm)
  744. {
  745. int i;
  746. u32 mpidr;
  747. struct vgic_dist *dist = &kvm->arch.vgic;
  748. int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
  749. dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
  750. GFP_KERNEL);
  751. if (!dist->irq_spi_mpidr)
  752. return -ENOMEM;
  753. /* Initialize the target VCPUs for each IRQ to VCPU 0 */
  754. mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
  755. for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
  756. dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
  757. dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
  758. vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
  759. }
  760. return 0;
  761. }
  762. /* GICv3 does not keep track of SGI sources anymore. */
  763. static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  764. {
  765. }
  766. void vgic_v3_init_emulation(struct kvm *kvm)
  767. {
  768. struct vgic_dist *dist = &kvm->arch.vgic;
  769. dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
  770. dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
  771. dist->vm_ops.init_model = vgic_v3_init_model;
  772. dist->vm_ops.map_resources = vgic_v3_map_resources;
  773. kvm->arch.max_vcpus = KVM_MAX_VCPUS;
  774. }
  775. /*
  776. * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
  777. * generation register ICC_SGI1R_EL1) with a given VCPU.
  778. * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
  779. * return -1.
  780. */
  781. static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
  782. {
  783. unsigned long affinity;
  784. int level0;
  785. /*
  786. * Split the current VCPU's MPIDR into affinity level 0 and the
  787. * rest as this is what we have to compare against.
  788. */
  789. affinity = kvm_vcpu_get_mpidr_aff(vcpu);
  790. level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
  791. affinity &= ~MPIDR_LEVEL_MASK;
  792. /* bail out if the upper three levels don't match */
  793. if (sgi_aff != affinity)
  794. return -1;
  795. /* Is this VCPU's bit set in the mask ? */
  796. if (!(sgi_cpu_mask & BIT(level0)))
  797. return -1;
  798. return level0;
  799. }
  800. #define SGI_AFFINITY_LEVEL(reg, level) \
  801. ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
  802. >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
  803. /**
  804. * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
  805. * @vcpu: The VCPU requesting a SGI
  806. * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
  807. *
  808. * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
  809. * This will trap in sys_regs.c and call this function.
  810. * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
  811. * target processors as well as a bitmask of 16 Aff0 CPUs.
  812. * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
  813. * check for matching ones. If this bit is set, we signal all, but not the
  814. * calling VCPU.
  815. */
  816. void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
  817. {
  818. struct kvm *kvm = vcpu->kvm;
  819. struct kvm_vcpu *c_vcpu;
  820. struct vgic_dist *dist = &kvm->arch.vgic;
  821. u16 target_cpus;
  822. u64 mpidr;
  823. int sgi, c;
  824. int vcpu_id = vcpu->vcpu_id;
  825. bool broadcast;
  826. int updated = 0;
  827. sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
  828. broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
  829. target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
  830. mpidr = SGI_AFFINITY_LEVEL(reg, 3);
  831. mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
  832. mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
  833. /*
  834. * We take the dist lock here, because we come from the sysregs
  835. * code path and not from the MMIO one (which already takes the lock).
  836. */
  837. spin_lock(&dist->lock);
  838. /*
  839. * We iterate over all VCPUs to find the MPIDRs matching the request.
  840. * If we have handled one CPU, we clear it's bit to detect early
  841. * if we are already finished. This avoids iterating through all
  842. * VCPUs when most of the times we just signal a single VCPU.
  843. */
  844. kvm_for_each_vcpu(c, c_vcpu, kvm) {
  845. /* Exit early if we have dealt with all requested CPUs */
  846. if (!broadcast && target_cpus == 0)
  847. break;
  848. /* Don't signal the calling VCPU */
  849. if (broadcast && c == vcpu_id)
  850. continue;
  851. if (!broadcast) {
  852. int level0;
  853. level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
  854. if (level0 == -1)
  855. continue;
  856. /* remove this matching VCPU from the mask */
  857. target_cpus &= ~BIT(level0);
  858. }
  859. /* Flag the SGI as pending */
  860. vgic_dist_irq_set_pending(c_vcpu, sgi);
  861. updated = 1;
  862. kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
  863. }
  864. if (updated)
  865. vgic_update_state(vcpu->kvm);
  866. spin_unlock(&dist->lock);
  867. if (updated)
  868. vgic_kick_vcpus(vcpu->kvm);
  869. }
  870. static int vgic_v3_create(struct kvm_device *dev, u32 type)
  871. {
  872. return kvm_vgic_create(dev->kvm, type);
  873. }
  874. static void vgic_v3_destroy(struct kvm_device *dev)
  875. {
  876. kfree(dev);
  877. }
  878. static int vgic_v3_set_attr(struct kvm_device *dev,
  879. struct kvm_device_attr *attr)
  880. {
  881. int ret;
  882. ret = vgic_set_common_attr(dev, attr);
  883. if (ret != -ENXIO)
  884. return ret;
  885. switch (attr->group) {
  886. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  887. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  888. return -ENXIO;
  889. }
  890. return -ENXIO;
  891. }
  892. static int vgic_v3_get_attr(struct kvm_device *dev,
  893. struct kvm_device_attr *attr)
  894. {
  895. int ret;
  896. ret = vgic_get_common_attr(dev, attr);
  897. if (ret != -ENXIO)
  898. return ret;
  899. switch (attr->group) {
  900. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  901. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  902. return -ENXIO;
  903. }
  904. return -ENXIO;
  905. }
  906. static int vgic_v3_has_attr(struct kvm_device *dev,
  907. struct kvm_device_attr *attr)
  908. {
  909. switch (attr->group) {
  910. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  911. switch (attr->attr) {
  912. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  913. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  914. return -ENXIO;
  915. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  916. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  917. return 0;
  918. }
  919. break;
  920. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  921. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  922. return -ENXIO;
  923. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  924. return 0;
  925. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  926. switch (attr->attr) {
  927. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  928. return 0;
  929. }
  930. }
  931. return -ENXIO;
  932. }
  933. struct kvm_device_ops kvm_arm_vgic_v3_ops = {
  934. .name = "kvm-arm-vgic-v3",
  935. .create = vgic_v3_create,
  936. .destroy = vgic_v3_destroy,
  937. .set_attr = vgic_v3_set_attr,
  938. .get_attr = vgic_v3_get_attr,
  939. .has_attr = vgic_v3_has_attr,
  940. };