i8259.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * 8259 interrupt controller emulation
  3. *
  4. * Copyright (c) 2003-2004 Fabrice Bellard
  5. * Copyright (c) 2007 Intel Corporation
  6. * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. * Authors:
  26. * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
  27. * Port from Qemu.
  28. */
  29. #include <linux/mm.h>
  30. #include <linux/slab.h>
  31. #include <linux/bitops.h>
  32. #include "irq.h"
  33. #include <linux/kvm_host.h>
  34. #include "trace.h"
  35. #define pr_pic_unimpl(fmt, ...) \
  36. pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)
  37. static void pic_irq_request(struct kvm *kvm, int level);
  38. static void pic_lock(struct kvm_pic *s)
  39. __acquires(&s->lock)
  40. {
  41. spin_lock(&s->lock);
  42. }
  43. static void pic_unlock(struct kvm_pic *s)
  44. __releases(&s->lock)
  45. {
  46. bool wakeup = s->wakeup_needed;
  47. struct kvm_vcpu *vcpu, *found = NULL;
  48. int i;
  49. s->wakeup_needed = false;
  50. spin_unlock(&s->lock);
  51. if (wakeup) {
  52. kvm_for_each_vcpu(i, vcpu, s->kvm) {
  53. if (kvm_apic_accept_pic_intr(vcpu)) {
  54. found = vcpu;
  55. break;
  56. }
  57. }
  58. if (!found)
  59. return;
  60. kvm_make_request(KVM_REQ_EVENT, found);
  61. kvm_vcpu_kick(found);
  62. }
  63. }
  64. static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
  65. {
  66. s->isr &= ~(1 << irq);
  67. if (s != &s->pics_state->pics[0])
  68. irq += 8;
  69. /*
  70. * We are dropping lock while calling ack notifiers since ack
  71. * notifier callbacks for assigned devices call into PIC recursively.
  72. * Other interrupt may be delivered to PIC while lock is dropped but
  73. * it should be safe since PIC state is already updated at this stage.
  74. */
  75. pic_unlock(s->pics_state);
  76. kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
  77. pic_lock(s->pics_state);
  78. }
  79. /*
  80. * set irq level. If an edge is detected, then the IRR is set to 1
  81. */
  82. static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
  83. {
  84. int mask, ret = 1;
  85. mask = 1 << irq;
  86. if (s->elcr & mask) /* level triggered */
  87. if (level) {
  88. ret = !(s->irr & mask);
  89. s->irr |= mask;
  90. s->last_irr |= mask;
  91. } else {
  92. s->irr &= ~mask;
  93. s->last_irr &= ~mask;
  94. }
  95. else /* edge triggered */
  96. if (level) {
  97. if ((s->last_irr & mask) == 0) {
  98. ret = !(s->irr & mask);
  99. s->irr |= mask;
  100. }
  101. s->last_irr |= mask;
  102. } else
  103. s->last_irr &= ~mask;
  104. return (s->imr & mask) ? -1 : ret;
  105. }
  106. /*
  107. * return the highest priority found in mask (highest = smallest
  108. * number). Return 8 if no irq
  109. */
  110. static inline int get_priority(struct kvm_kpic_state *s, int mask)
  111. {
  112. int priority;
  113. if (mask == 0)
  114. return 8;
  115. priority = 0;
  116. while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
  117. priority++;
  118. return priority;
  119. }
  120. /*
  121. * return the pic wanted interrupt. return -1 if none
  122. */
  123. static int pic_get_irq(struct kvm_kpic_state *s)
  124. {
  125. int mask, cur_priority, priority;
  126. mask = s->irr & ~s->imr;
  127. priority = get_priority(s, mask);
  128. if (priority == 8)
  129. return -1;
  130. /*
  131. * compute current priority. If special fully nested mode on the
  132. * master, the IRQ coming from the slave is not taken into account
  133. * for the priority computation.
  134. */
  135. mask = s->isr;
  136. if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
  137. mask &= ~(1 << 2);
  138. cur_priority = get_priority(s, mask);
  139. if (priority < cur_priority)
  140. /*
  141. * higher priority found: an irq should be generated
  142. */
  143. return (priority + s->priority_add) & 7;
  144. else
  145. return -1;
  146. }
  147. /*
  148. * raise irq to CPU if necessary. must be called every time the active
  149. * irq may change
  150. */
  151. static void pic_update_irq(struct kvm_pic *s)
  152. {
  153. int irq2, irq;
  154. irq2 = pic_get_irq(&s->pics[1]);
  155. if (irq2 >= 0) {
  156. /*
  157. * if irq request by slave pic, signal master PIC
  158. */
  159. pic_set_irq1(&s->pics[0], 2, 1);
  160. pic_set_irq1(&s->pics[0], 2, 0);
  161. }
  162. irq = pic_get_irq(&s->pics[0]);
  163. pic_irq_request(s->kvm, irq >= 0);
  164. }
  165. void kvm_pic_update_irq(struct kvm_pic *s)
  166. {
  167. pic_lock(s);
  168. pic_update_irq(s);
  169. pic_unlock(s);
  170. }
  171. int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
  172. {
  173. int ret, irq_level;
  174. BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
  175. pic_lock(s);
  176. irq_level = __kvm_irq_line_state(&s->irq_states[irq],
  177. irq_source_id, level);
  178. ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
  179. pic_update_irq(s);
  180. trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
  181. s->pics[irq >> 3].imr, ret == 0);
  182. pic_unlock(s);
  183. return ret;
  184. }
  185. void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
  186. {
  187. int i;
  188. pic_lock(s);
  189. for (i = 0; i < PIC_NUM_PINS; i++)
  190. __clear_bit(irq_source_id, &s->irq_states[i]);
  191. pic_unlock(s);
  192. }
  193. /*
  194. * acknowledge interrupt 'irq'
  195. */
  196. static inline void pic_intack(struct kvm_kpic_state *s, int irq)
  197. {
  198. s->isr |= 1 << irq;
  199. /*
  200. * We don't clear a level sensitive interrupt here
  201. */
  202. if (!(s->elcr & (1 << irq)))
  203. s->irr &= ~(1 << irq);
  204. if (s->auto_eoi) {
  205. if (s->rotate_on_auto_eoi)
  206. s->priority_add = (irq + 1) & 7;
  207. pic_clear_isr(s, irq);
  208. }
  209. }
  210. int kvm_pic_read_irq(struct kvm *kvm)
  211. {
  212. int irq, irq2, intno;
  213. struct kvm_pic *s = pic_irqchip(kvm);
  214. s->output = 0;
  215. pic_lock(s);
  216. irq = pic_get_irq(&s->pics[0]);
  217. if (irq >= 0) {
  218. pic_intack(&s->pics[0], irq);
  219. if (irq == 2) {
  220. irq2 = pic_get_irq(&s->pics[1]);
  221. if (irq2 >= 0)
  222. pic_intack(&s->pics[1], irq2);
  223. else
  224. /*
  225. * spurious IRQ on slave controller
  226. */
  227. irq2 = 7;
  228. intno = s->pics[1].irq_base + irq2;
  229. irq = irq2 + 8;
  230. } else
  231. intno = s->pics[0].irq_base + irq;
  232. } else {
  233. /*
  234. * spurious IRQ on host controller
  235. */
  236. irq = 7;
  237. intno = s->pics[0].irq_base + irq;
  238. }
  239. pic_update_irq(s);
  240. pic_unlock(s);
  241. return intno;
  242. }
  243. void kvm_pic_reset(struct kvm_kpic_state *s)
  244. {
  245. int irq, i;
  246. struct kvm_vcpu *vcpu;
  247. u8 edge_irr = s->irr & ~s->elcr;
  248. bool found = false;
  249. s->last_irr = 0;
  250. s->irr &= s->elcr;
  251. s->imr = 0;
  252. s->priority_add = 0;
  253. s->special_mask = 0;
  254. s->read_reg_select = 0;
  255. if (!s->init4) {
  256. s->special_fully_nested_mode = 0;
  257. s->auto_eoi = 0;
  258. }
  259. s->init_state = 1;
  260. kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
  261. if (kvm_apic_accept_pic_intr(vcpu)) {
  262. found = true;
  263. break;
  264. }
  265. if (!found)
  266. return;
  267. for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
  268. if (edge_irr & (1 << irq))
  269. pic_clear_isr(s, irq);
  270. }
  271. static void pic_ioport_write(void *opaque, u32 addr, u32 val)
  272. {
  273. struct kvm_kpic_state *s = opaque;
  274. int priority, cmd, irq;
  275. addr &= 1;
  276. if (addr == 0) {
  277. if (val & 0x10) {
  278. s->init4 = val & 1;
  279. if (val & 0x02)
  280. pr_pic_unimpl("single mode not supported");
  281. if (val & 0x08)
  282. pr_pic_unimpl(
  283. "level sensitive irq not supported");
  284. kvm_pic_reset(s);
  285. } else if (val & 0x08) {
  286. if (val & 0x04)
  287. s->poll = 1;
  288. if (val & 0x02)
  289. s->read_reg_select = val & 1;
  290. if (val & 0x40)
  291. s->special_mask = (val >> 5) & 1;
  292. } else {
  293. cmd = val >> 5;
  294. switch (cmd) {
  295. case 0:
  296. case 4:
  297. s->rotate_on_auto_eoi = cmd >> 2;
  298. break;
  299. case 1: /* end of interrupt */
  300. case 5:
  301. priority = get_priority(s, s->isr);
  302. if (priority != 8) {
  303. irq = (priority + s->priority_add) & 7;
  304. if (cmd == 5)
  305. s->priority_add = (irq + 1) & 7;
  306. pic_clear_isr(s, irq);
  307. pic_update_irq(s->pics_state);
  308. }
  309. break;
  310. case 3:
  311. irq = val & 7;
  312. pic_clear_isr(s, irq);
  313. pic_update_irq(s->pics_state);
  314. break;
  315. case 6:
  316. s->priority_add = (val + 1) & 7;
  317. pic_update_irq(s->pics_state);
  318. break;
  319. case 7:
  320. irq = val & 7;
  321. s->priority_add = (irq + 1) & 7;
  322. pic_clear_isr(s, irq);
  323. pic_update_irq(s->pics_state);
  324. break;
  325. default:
  326. break; /* no operation */
  327. }
  328. }
  329. } else
  330. switch (s->init_state) {
  331. case 0: { /* normal mode */
  332. u8 imr_diff = s->imr ^ val,
  333. off = (s == &s->pics_state->pics[0]) ? 0 : 8;
  334. s->imr = val;
  335. for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
  336. if (imr_diff & (1 << irq))
  337. kvm_fire_mask_notifiers(
  338. s->pics_state->kvm,
  339. SELECT_PIC(irq + off),
  340. irq + off,
  341. !!(s->imr & (1 << irq)));
  342. pic_update_irq(s->pics_state);
  343. break;
  344. }
  345. case 1:
  346. s->irq_base = val & 0xf8;
  347. s->init_state = 2;
  348. break;
  349. case 2:
  350. if (s->init4)
  351. s->init_state = 3;
  352. else
  353. s->init_state = 0;
  354. break;
  355. case 3:
  356. s->special_fully_nested_mode = (val >> 4) & 1;
  357. s->auto_eoi = (val >> 1) & 1;
  358. s->init_state = 0;
  359. break;
  360. }
  361. }
  362. static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
  363. {
  364. int ret;
  365. ret = pic_get_irq(s);
  366. if (ret >= 0) {
  367. if (addr1 >> 7) {
  368. s->pics_state->pics[0].isr &= ~(1 << 2);
  369. s->pics_state->pics[0].irr &= ~(1 << 2);
  370. }
  371. s->irr &= ~(1 << ret);
  372. pic_clear_isr(s, ret);
  373. if (addr1 >> 7 || ret != 2)
  374. pic_update_irq(s->pics_state);
  375. } else {
  376. ret = 0x07;
  377. pic_update_irq(s->pics_state);
  378. }
  379. return ret;
  380. }
  381. static u32 pic_ioport_read(void *opaque, u32 addr1)
  382. {
  383. struct kvm_kpic_state *s = opaque;
  384. unsigned int addr;
  385. int ret;
  386. addr = addr1;
  387. addr &= 1;
  388. if (s->poll) {
  389. ret = pic_poll_read(s, addr1);
  390. s->poll = 0;
  391. } else
  392. if (addr == 0)
  393. if (s->read_reg_select)
  394. ret = s->isr;
  395. else
  396. ret = s->irr;
  397. else
  398. ret = s->imr;
  399. return ret;
  400. }
  401. static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
  402. {
  403. struct kvm_kpic_state *s = opaque;
  404. s->elcr = val & s->elcr_mask;
  405. }
  406. static u32 elcr_ioport_read(void *opaque, u32 addr1)
  407. {
  408. struct kvm_kpic_state *s = opaque;
  409. return s->elcr;
  410. }
  411. static int picdev_in_range(gpa_t addr)
  412. {
  413. switch (addr) {
  414. case 0x20:
  415. case 0x21:
  416. case 0xa0:
  417. case 0xa1:
  418. case 0x4d0:
  419. case 0x4d1:
  420. return 1;
  421. default:
  422. return 0;
  423. }
  424. }
  425. static int picdev_write(struct kvm_pic *s,
  426. gpa_t addr, int len, const void *val)
  427. {
  428. unsigned char data = *(unsigned char *)val;
  429. if (!picdev_in_range(addr))
  430. return -EOPNOTSUPP;
  431. if (len != 1) {
  432. pr_pic_unimpl("non byte write\n");
  433. return 0;
  434. }
  435. pic_lock(s);
  436. switch (addr) {
  437. case 0x20:
  438. case 0x21:
  439. case 0xa0:
  440. case 0xa1:
  441. pic_ioport_write(&s->pics[addr >> 7], addr, data);
  442. break;
  443. case 0x4d0:
  444. case 0x4d1:
  445. elcr_ioport_write(&s->pics[addr & 1], addr, data);
  446. break;
  447. }
  448. pic_unlock(s);
  449. return 0;
  450. }
  451. static int picdev_read(struct kvm_pic *s,
  452. gpa_t addr, int len, void *val)
  453. {
  454. unsigned char data = 0;
  455. if (!picdev_in_range(addr))
  456. return -EOPNOTSUPP;
  457. if (len != 1) {
  458. memset(val, 0, len);
  459. pr_pic_unimpl("non byte read\n");
  460. return 0;
  461. }
  462. pic_lock(s);
  463. switch (addr) {
  464. case 0x20:
  465. case 0x21:
  466. case 0xa0:
  467. case 0xa1:
  468. data = pic_ioport_read(&s->pics[addr >> 7], addr);
  469. break;
  470. case 0x4d0:
  471. case 0x4d1:
  472. data = elcr_ioport_read(&s->pics[addr & 1], addr);
  473. break;
  474. }
  475. *(unsigned char *)val = data;
  476. pic_unlock(s);
  477. return 0;
  478. }
  479. static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  480. gpa_t addr, int len, const void *val)
  481. {
  482. return picdev_write(container_of(dev, struct kvm_pic, dev_master),
  483. addr, len, val);
  484. }
  485. static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  486. gpa_t addr, int len, void *val)
  487. {
  488. return picdev_read(container_of(dev, struct kvm_pic, dev_master),
  489. addr, len, val);
  490. }
  491. static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  492. gpa_t addr, int len, const void *val)
  493. {
  494. return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
  495. addr, len, val);
  496. }
  497. static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  498. gpa_t addr, int len, void *val)
  499. {
  500. return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
  501. addr, len, val);
  502. }
  503. static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  504. gpa_t addr, int len, const void *val)
  505. {
  506. return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
  507. addr, len, val);
  508. }
  509. static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  510. gpa_t addr, int len, void *val)
  511. {
  512. return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
  513. addr, len, val);
  514. }
  515. /*
  516. * callback when PIC0 irq status changed
  517. */
  518. static void pic_irq_request(struct kvm *kvm, int level)
  519. {
  520. struct kvm_pic *s = pic_irqchip(kvm);
  521. if (!s->output)
  522. s->wakeup_needed = true;
  523. s->output = level;
  524. }
  525. static const struct kvm_io_device_ops picdev_master_ops = {
  526. .read = picdev_master_read,
  527. .write = picdev_master_write,
  528. };
  529. static const struct kvm_io_device_ops picdev_slave_ops = {
  530. .read = picdev_slave_read,
  531. .write = picdev_slave_write,
  532. };
  533. static const struct kvm_io_device_ops picdev_eclr_ops = {
  534. .read = picdev_eclr_read,
  535. .write = picdev_eclr_write,
  536. };
  537. struct kvm_pic *kvm_create_pic(struct kvm *kvm)
  538. {
  539. struct kvm_pic *s;
  540. int ret;
  541. s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
  542. if (!s)
  543. return NULL;
  544. spin_lock_init(&s->lock);
  545. s->kvm = kvm;
  546. s->pics[0].elcr_mask = 0xf8;
  547. s->pics[1].elcr_mask = 0xde;
  548. s->pics[0].pics_state = s;
  549. s->pics[1].pics_state = s;
  550. /*
  551. * Initialize PIO device
  552. */
  553. kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
  554. kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
  555. kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops);
  556. mutex_lock(&kvm->slots_lock);
  557. ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
  558. &s->dev_master);
  559. if (ret < 0)
  560. goto fail_unlock;
  561. ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
  562. if (ret < 0)
  563. goto fail_unreg_2;
  564. ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
  565. if (ret < 0)
  566. goto fail_unreg_1;
  567. mutex_unlock(&kvm->slots_lock);
  568. return s;
  569. fail_unreg_1:
  570. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
  571. fail_unreg_2:
  572. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
  573. fail_unlock:
  574. mutex_unlock(&kvm->slots_lock);
  575. kfree(s);
  576. return NULL;
  577. }
  578. void kvm_destroy_pic(struct kvm_pic *vpic)
  579. {
  580. kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
  581. kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
  582. kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
  583. kfree(vpic);
  584. }