paravirt_patch_64.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #include <asm/paravirt.h>
  2. #include <asm/asm-offsets.h>
  3. #include <linux/stringify.h>
  4. DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
  5. DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
  6. DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
  7. DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
  8. DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
  9. DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
  10. DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
  11. DEF_NATIVE(pv_cpu_ops, clts, "clts");
  12. DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
  13. DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
  14. DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
  15. DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
  16. DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
  17. DEF_NATIVE(, mov32, "mov %edi, %eax");
  18. DEF_NATIVE(, mov64, "mov %rdi, %rax");
  19. #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
  20. DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
  21. #endif
  22. unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
  23. {
  24. return paravirt_patch_insns(insnbuf, len,
  25. start__mov32, end__mov32);
  26. }
  27. unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
  28. {
  29. return paravirt_patch_insns(insnbuf, len,
  30. start__mov64, end__mov64);
  31. }
  32. extern bool pv_is_native_spin_unlock(void);
  33. unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
  34. unsigned long addr, unsigned len)
  35. {
  36. const unsigned char *start, *end;
  37. unsigned ret;
  38. #define PATCH_SITE(ops, x) \
  39. case PARAVIRT_PATCH(ops.x): \
  40. start = start_##ops##_##x; \
  41. end = end_##ops##_##x; \
  42. goto patch_site
  43. switch(type) {
  44. PATCH_SITE(pv_irq_ops, restore_fl);
  45. PATCH_SITE(pv_irq_ops, save_fl);
  46. PATCH_SITE(pv_irq_ops, irq_enable);
  47. PATCH_SITE(pv_irq_ops, irq_disable);
  48. PATCH_SITE(pv_cpu_ops, usergs_sysret32);
  49. PATCH_SITE(pv_cpu_ops, usergs_sysret64);
  50. PATCH_SITE(pv_cpu_ops, swapgs);
  51. PATCH_SITE(pv_mmu_ops, read_cr2);
  52. PATCH_SITE(pv_mmu_ops, read_cr3);
  53. PATCH_SITE(pv_mmu_ops, write_cr3);
  54. PATCH_SITE(pv_cpu_ops, clts);
  55. PATCH_SITE(pv_cpu_ops, wbinvd);
  56. #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
  57. case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
  58. if (pv_is_native_spin_unlock()) {
  59. start = start_pv_lock_ops_queued_spin_unlock;
  60. end = end_pv_lock_ops_queued_spin_unlock;
  61. goto patch_site;
  62. }
  63. #endif
  64. default:
  65. ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
  66. break;
  67. patch_site:
  68. ret = paravirt_patch_insns(ibuf, len, start, end);
  69. break;
  70. }
  71. #undef PATCH_SITE
  72. return ret;
  73. }