privcmd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/mm.h>
  16. #include <linux/mman.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/swap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/miscdevice.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/tlb.h>
  26. #include <asm/xen/hypervisor.h>
  27. #include <asm/xen/hypercall.h>
  28. #include <xen/xen.h>
  29. #include <xen/privcmd.h>
  30. #include <xen/interface/xen.h>
  31. #include <xen/features.h>
  32. #include <xen/page.h>
  33. #include <xen/xen-ops.h>
  34. #include <xen/balloon.h>
  35. #include "privcmd.h"
  36. MODULE_LICENSE("GPL");
  37. #define PRIV_VMA_LOCKED ((void *)1)
  38. static int privcmd_vma_range_is_mapped(
  39. struct vm_area_struct *vma,
  40. unsigned long addr,
  41. unsigned long nr_pages);
  42. static long privcmd_ioctl_hypercall(void __user *udata)
  43. {
  44. struct privcmd_hypercall hypercall;
  45. long ret;
  46. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  47. return -EFAULT;
  48. xen_preemptible_hcall_begin();
  49. ret = privcmd_call(hypercall.op,
  50. hypercall.arg[0], hypercall.arg[1],
  51. hypercall.arg[2], hypercall.arg[3],
  52. hypercall.arg[4]);
  53. xen_preemptible_hcall_end();
  54. return ret;
  55. }
  56. static void free_page_list(struct list_head *pages)
  57. {
  58. struct page *p, *n;
  59. list_for_each_entry_safe(p, n, pages, lru)
  60. __free_page(p);
  61. INIT_LIST_HEAD(pages);
  62. }
  63. /*
  64. * Given an array of items in userspace, return a list of pages
  65. * containing the data. If copying fails, either because of memory
  66. * allocation failure or a problem reading user memory, return an
  67. * error code; its up to the caller to dispose of any partial list.
  68. */
  69. static int gather_array(struct list_head *pagelist,
  70. unsigned nelem, size_t size,
  71. const void __user *data)
  72. {
  73. unsigned pageidx;
  74. void *pagedata;
  75. int ret;
  76. if (size > PAGE_SIZE)
  77. return 0;
  78. pageidx = PAGE_SIZE;
  79. pagedata = NULL; /* quiet, gcc */
  80. while (nelem--) {
  81. if (pageidx > PAGE_SIZE-size) {
  82. struct page *page = alloc_page(GFP_KERNEL);
  83. ret = -ENOMEM;
  84. if (page == NULL)
  85. goto fail;
  86. pagedata = page_address(page);
  87. list_add_tail(&page->lru, pagelist);
  88. pageidx = 0;
  89. }
  90. ret = -EFAULT;
  91. if (copy_from_user(pagedata + pageidx, data, size))
  92. goto fail;
  93. data += size;
  94. pageidx += size;
  95. }
  96. ret = 0;
  97. fail:
  98. return ret;
  99. }
  100. /*
  101. * Call function "fn" on each element of the array fragmented
  102. * over a list of pages.
  103. */
  104. static int traverse_pages(unsigned nelem, size_t size,
  105. struct list_head *pos,
  106. int (*fn)(void *data, void *state),
  107. void *state)
  108. {
  109. void *pagedata;
  110. unsigned pageidx;
  111. int ret = 0;
  112. BUG_ON(size > PAGE_SIZE);
  113. pageidx = PAGE_SIZE;
  114. pagedata = NULL; /* hush, gcc */
  115. while (nelem--) {
  116. if (pageidx > PAGE_SIZE-size) {
  117. struct page *page;
  118. pos = pos->next;
  119. page = list_entry(pos, struct page, lru);
  120. pagedata = page_address(page);
  121. pageidx = 0;
  122. }
  123. ret = (*fn)(pagedata + pageidx, state);
  124. if (ret)
  125. break;
  126. pageidx += size;
  127. }
  128. return ret;
  129. }
  130. /*
  131. * Similar to traverse_pages, but use each page as a "block" of
  132. * data to be processed as one unit.
  133. */
  134. static int traverse_pages_block(unsigned nelem, size_t size,
  135. struct list_head *pos,
  136. int (*fn)(void *data, int nr, void *state),
  137. void *state)
  138. {
  139. void *pagedata;
  140. unsigned pageidx;
  141. int ret = 0;
  142. BUG_ON(size > PAGE_SIZE);
  143. pageidx = PAGE_SIZE;
  144. while (nelem) {
  145. int nr = (PAGE_SIZE/size);
  146. struct page *page;
  147. if (nr > nelem)
  148. nr = nelem;
  149. pos = pos->next;
  150. page = list_entry(pos, struct page, lru);
  151. pagedata = page_address(page);
  152. ret = (*fn)(pagedata, nr, state);
  153. if (ret)
  154. break;
  155. nelem -= nr;
  156. }
  157. return ret;
  158. }
  159. struct mmap_gfn_state {
  160. unsigned long va;
  161. struct vm_area_struct *vma;
  162. domid_t domain;
  163. };
  164. static int mmap_gfn_range(void *data, void *state)
  165. {
  166. struct privcmd_mmap_entry *msg = data;
  167. struct mmap_gfn_state *st = state;
  168. struct vm_area_struct *vma = st->vma;
  169. int rc;
  170. /* Do not allow range to wrap the address space. */
  171. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  172. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  173. return -EINVAL;
  174. /* Range chunks must be contiguous in va space. */
  175. if ((msg->va != st->va) ||
  176. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  177. return -EINVAL;
  178. rc = xen_remap_domain_gfn_range(vma,
  179. msg->va & PAGE_MASK,
  180. msg->mfn, msg->npages,
  181. vma->vm_page_prot,
  182. st->domain, NULL);
  183. if (rc < 0)
  184. return rc;
  185. st->va += msg->npages << PAGE_SHIFT;
  186. return 0;
  187. }
  188. static long privcmd_ioctl_mmap(void __user *udata)
  189. {
  190. struct privcmd_mmap mmapcmd;
  191. struct mm_struct *mm = current->mm;
  192. struct vm_area_struct *vma;
  193. int rc;
  194. LIST_HEAD(pagelist);
  195. struct mmap_gfn_state state;
  196. /* We only support privcmd_ioctl_mmap_batch for auto translated. */
  197. if (xen_feature(XENFEAT_auto_translated_physmap))
  198. return -ENOSYS;
  199. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  200. return -EFAULT;
  201. rc = gather_array(&pagelist,
  202. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  203. mmapcmd.entry);
  204. if (rc || list_empty(&pagelist))
  205. goto out;
  206. down_write(&mm->mmap_sem);
  207. {
  208. struct page *page = list_first_entry(&pagelist,
  209. struct page, lru);
  210. struct privcmd_mmap_entry *msg = page_address(page);
  211. vma = find_vma(mm, msg->va);
  212. rc = -EINVAL;
  213. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  214. goto out_up;
  215. vma->vm_private_data = PRIV_VMA_LOCKED;
  216. }
  217. state.va = vma->vm_start;
  218. state.vma = vma;
  219. state.domain = mmapcmd.dom;
  220. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  221. &pagelist,
  222. mmap_gfn_range, &state);
  223. out_up:
  224. up_write(&mm->mmap_sem);
  225. out:
  226. free_page_list(&pagelist);
  227. return rc;
  228. }
  229. struct mmap_batch_state {
  230. domid_t domain;
  231. unsigned long va;
  232. struct vm_area_struct *vma;
  233. int index;
  234. /* A tristate:
  235. * 0 for no errors
  236. * 1 if at least one error has happened (and no
  237. * -ENOENT errors have happened)
  238. * -ENOENT if at least 1 -ENOENT has happened.
  239. */
  240. int global_error;
  241. int version;
  242. /* User-space gfn array to store errors in the second pass for V1. */
  243. xen_pfn_t __user *user_gfn;
  244. /* User-space int array to store errors in the second pass for V2. */
  245. int __user *user_err;
  246. };
  247. /* auto translated dom0 note: if domU being created is PV, then gfn is
  248. * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  249. */
  250. static int mmap_batch_fn(void *data, int nr, void *state)
  251. {
  252. xen_pfn_t *gfnp = data;
  253. struct mmap_batch_state *st = state;
  254. struct vm_area_struct *vma = st->vma;
  255. struct page **pages = vma->vm_private_data;
  256. struct page **cur_pages = NULL;
  257. int ret;
  258. if (xen_feature(XENFEAT_auto_translated_physmap))
  259. cur_pages = &pages[st->index];
  260. BUG_ON(nr < 0);
  261. ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
  262. (int *)gfnp, st->vma->vm_page_prot,
  263. st->domain, cur_pages);
  264. /* Adjust the global_error? */
  265. if (ret != nr) {
  266. if (ret == -ENOENT)
  267. st->global_error = -ENOENT;
  268. else {
  269. /* Record that at least one error has happened. */
  270. if (st->global_error == 0)
  271. st->global_error = 1;
  272. }
  273. }
  274. st->va += XEN_PAGE_SIZE * nr;
  275. st->index += nr / XEN_PFN_PER_PAGE;
  276. return 0;
  277. }
  278. static int mmap_return_error(int err, struct mmap_batch_state *st)
  279. {
  280. int ret;
  281. if (st->version == 1) {
  282. if (err) {
  283. xen_pfn_t gfn;
  284. ret = get_user(gfn, st->user_gfn);
  285. if (ret < 0)
  286. return ret;
  287. /*
  288. * V1 encodes the error codes in the 32bit top
  289. * nibble of the gfn (with its known
  290. * limitations vis-a-vis 64 bit callers).
  291. */
  292. gfn |= (err == -ENOENT) ?
  293. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  294. PRIVCMD_MMAPBATCH_MFN_ERROR;
  295. return __put_user(gfn, st->user_gfn++);
  296. } else
  297. st->user_gfn++;
  298. } else { /* st->version == 2 */
  299. if (err)
  300. return __put_user(err, st->user_err++);
  301. else
  302. st->user_err++;
  303. }
  304. return 0;
  305. }
  306. static int mmap_return_errors(void *data, int nr, void *state)
  307. {
  308. struct mmap_batch_state *st = state;
  309. int *errs = data;
  310. int i;
  311. int ret;
  312. for (i = 0; i < nr; i++) {
  313. ret = mmap_return_error(errs[i], st);
  314. if (ret < 0)
  315. return ret;
  316. }
  317. return 0;
  318. }
  319. /* Allocate pfns that are then mapped with gfns from foreign domid. Update
  320. * the vma with the page info to use later.
  321. * Returns: 0 if success, otherwise -errno
  322. */
  323. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  324. {
  325. int rc;
  326. struct page **pages;
  327. pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  328. if (pages == NULL)
  329. return -ENOMEM;
  330. rc = alloc_xenballooned_pages(numpgs, pages);
  331. if (rc != 0) {
  332. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  333. numpgs, rc);
  334. kfree(pages);
  335. return -ENOMEM;
  336. }
  337. BUG_ON(vma->vm_private_data != NULL);
  338. vma->vm_private_data = pages;
  339. return 0;
  340. }
  341. static const struct vm_operations_struct privcmd_vm_ops;
  342. static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
  343. {
  344. int ret;
  345. struct privcmd_mmapbatch_v2 m;
  346. struct mm_struct *mm = current->mm;
  347. struct vm_area_struct *vma;
  348. unsigned long nr_pages;
  349. LIST_HEAD(pagelist);
  350. struct mmap_batch_state state;
  351. switch (version) {
  352. case 1:
  353. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  354. return -EFAULT;
  355. /* Returns per-frame error in m.arr. */
  356. m.err = NULL;
  357. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  358. return -EFAULT;
  359. break;
  360. case 2:
  361. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  362. return -EFAULT;
  363. /* Returns per-frame error code in m.err. */
  364. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  365. return -EFAULT;
  366. break;
  367. default:
  368. return -EINVAL;
  369. }
  370. nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
  371. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  372. return -EINVAL;
  373. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  374. if (ret)
  375. goto out;
  376. if (list_empty(&pagelist)) {
  377. ret = -EINVAL;
  378. goto out;
  379. }
  380. if (version == 2) {
  381. /* Zero error array now to only copy back actual errors. */
  382. if (clear_user(m.err, sizeof(int) * m.num)) {
  383. ret = -EFAULT;
  384. goto out;
  385. }
  386. }
  387. down_write(&mm->mmap_sem);
  388. vma = find_vma(mm, m.addr);
  389. if (!vma ||
  390. vma->vm_ops != &privcmd_vm_ops) {
  391. ret = -EINVAL;
  392. goto out_unlock;
  393. }
  394. /*
  395. * Caller must either:
  396. *
  397. * Map the whole VMA range, which will also allocate all the
  398. * pages required for the auto_translated_physmap case.
  399. *
  400. * Or
  401. *
  402. * Map unmapped holes left from a previous map attempt (e.g.,
  403. * because those foreign frames were previously paged out).
  404. */
  405. if (vma->vm_private_data == NULL) {
  406. if (m.addr != vma->vm_start ||
  407. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  408. ret = -EINVAL;
  409. goto out_unlock;
  410. }
  411. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  412. ret = alloc_empty_pages(vma, nr_pages);
  413. if (ret < 0)
  414. goto out_unlock;
  415. } else
  416. vma->vm_private_data = PRIV_VMA_LOCKED;
  417. } else {
  418. if (m.addr < vma->vm_start ||
  419. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  420. ret = -EINVAL;
  421. goto out_unlock;
  422. }
  423. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  424. ret = -EINVAL;
  425. goto out_unlock;
  426. }
  427. }
  428. state.domain = m.dom;
  429. state.vma = vma;
  430. state.va = m.addr;
  431. state.index = 0;
  432. state.global_error = 0;
  433. state.version = version;
  434. BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
  435. /* mmap_batch_fn guarantees ret == 0 */
  436. BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
  437. &pagelist, mmap_batch_fn, &state));
  438. up_write(&mm->mmap_sem);
  439. if (state.global_error) {
  440. /* Write back errors in second pass. */
  441. state.user_gfn = (xen_pfn_t *)m.arr;
  442. state.user_err = m.err;
  443. ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
  444. &pagelist, mmap_return_errors, &state);
  445. } else
  446. ret = 0;
  447. /* If we have not had any EFAULT-like global errors then set the global
  448. * error to -ENOENT if necessary. */
  449. if ((ret == 0) && (state.global_error == -ENOENT))
  450. ret = -ENOENT;
  451. out:
  452. free_page_list(&pagelist);
  453. return ret;
  454. out_unlock:
  455. up_write(&mm->mmap_sem);
  456. goto out;
  457. }
  458. static long privcmd_ioctl(struct file *file,
  459. unsigned int cmd, unsigned long data)
  460. {
  461. int ret = -ENOSYS;
  462. void __user *udata = (void __user *) data;
  463. switch (cmd) {
  464. case IOCTL_PRIVCMD_HYPERCALL:
  465. ret = privcmd_ioctl_hypercall(udata);
  466. break;
  467. case IOCTL_PRIVCMD_MMAP:
  468. ret = privcmd_ioctl_mmap(udata);
  469. break;
  470. case IOCTL_PRIVCMD_MMAPBATCH:
  471. ret = privcmd_ioctl_mmap_batch(udata, 1);
  472. break;
  473. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  474. ret = privcmd_ioctl_mmap_batch(udata, 2);
  475. break;
  476. default:
  477. ret = -EINVAL;
  478. break;
  479. }
  480. return ret;
  481. }
  482. static void privcmd_close(struct vm_area_struct *vma)
  483. {
  484. struct page **pages = vma->vm_private_data;
  485. int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  486. int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
  487. int rc;
  488. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  489. return;
  490. rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
  491. if (rc == 0)
  492. free_xenballooned_pages(numpgs, pages);
  493. else
  494. pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
  495. numpgs, rc);
  496. kfree(pages);
  497. }
  498. static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  499. {
  500. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  501. vma, vma->vm_start, vma->vm_end,
  502. vmf->pgoff, vmf->virtual_address);
  503. return VM_FAULT_SIGBUS;
  504. }
  505. static const struct vm_operations_struct privcmd_vm_ops = {
  506. .close = privcmd_close,
  507. .fault = privcmd_fault
  508. };
  509. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  510. {
  511. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  512. * how to recreate these mappings */
  513. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
  514. VM_DONTEXPAND | VM_DONTDUMP;
  515. vma->vm_ops = &privcmd_vm_ops;
  516. vma->vm_private_data = NULL;
  517. return 0;
  518. }
  519. /*
  520. * For MMAPBATCH*. This allows asserting the singleshot mapping
  521. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  522. * can be then retried until success.
  523. */
  524. static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
  525. unsigned long addr, void *data)
  526. {
  527. return pte_none(*pte) ? 0 : -EBUSY;
  528. }
  529. static int privcmd_vma_range_is_mapped(
  530. struct vm_area_struct *vma,
  531. unsigned long addr,
  532. unsigned long nr_pages)
  533. {
  534. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  535. is_mapped_fn, NULL) != 0;
  536. }
  537. const struct file_operations xen_privcmd_fops = {
  538. .owner = THIS_MODULE,
  539. .unlocked_ioctl = privcmd_ioctl,
  540. .mmap = privcmd_mmap,
  541. };
  542. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  543. static struct miscdevice privcmd_dev = {
  544. .minor = MISC_DYNAMIC_MINOR,
  545. .name = "xen/privcmd",
  546. .fops = &xen_privcmd_fops,
  547. };
  548. static int __init privcmd_init(void)
  549. {
  550. int err;
  551. if (!xen_domain())
  552. return -ENODEV;
  553. err = misc_register(&privcmd_dev);
  554. if (err != 0) {
  555. pr_err("Could not register Xen privcmd device\n");
  556. return err;
  557. }
  558. return 0;
  559. }
  560. static void __exit privcmd_exit(void)
  561. {
  562. misc_deregister(&privcmd_dev);
  563. }
  564. module_init(privcmd_init);
  565. module_exit(privcmd_exit);