123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305 |
- /*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #ifndef __ARM64_KVM_MMU_H__
- #define __ARM64_KVM_MMU_H__
- #include <asm/page.h>
- #include <asm/memory.h>
- /*
- * As we only have the TTBR0_EL2 register, we cannot express
- * "negative" addresses. This makes it impossible to directly share
- * mappings with the kernel.
- *
- * Instead, give the HYP mode its own VA region at a fixed offset from
- * the kernel by just masking the top bits (which are all ones for a
- * kernel address).
- */
- #define HYP_PAGE_OFFSET_SHIFT VA_BITS
- #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
- #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
- /*
- * Our virtual mapping for the idmap-ed MMU-enable code. Must be
- * shared across all the page-tables. Conveniently, we use the last
- * possible page, where no kernel mapping will ever exist.
- */
- #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
- /*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
- * levels in addition to the PGD and potentially the PUD which are
- * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
- * tables use one level of tables less than the kernel.
- */
- #ifdef CONFIG_ARM64_64K_PAGES
- #define KVM_MMU_CACHE_MIN_PAGES 1
- #else
- #define KVM_MMU_CACHE_MIN_PAGES 2
- #endif
- #ifdef __ASSEMBLY__
- /*
- * Convert a kernel VA into a HYP VA.
- * reg: VA to be converted.
- */
- .macro kern_hyp_va reg
- and \reg, \reg, #HYP_PAGE_OFFSET_MASK
- .endm
- #else
- #include <asm/pgalloc.h>
- #include <asm/cachetype.h>
- #include <asm/cacheflush.h>
- #include <asm/mmu_context.h>
- #include <asm/pgtable.h>
- #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
- /*
- * We currently only support a 40bit IPA.
- */
- #define KVM_PHYS_SHIFT (40)
- #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
- #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
- int create_hyp_mappings(void *from, void *to);
- int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
- void free_boot_hyp_pgd(void);
- void free_hyp_pgds(void);
- void stage2_unmap_vm(struct kvm *kvm);
- int kvm_alloc_stage2_pgd(struct kvm *kvm);
- void kvm_free_stage2_pgd(struct kvm *kvm);
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable);
- int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
- void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
- phys_addr_t kvm_mmu_get_httbr(void);
- phys_addr_t kvm_mmu_get_boot_httbr(void);
- phys_addr_t kvm_get_idmap_vector(void);
- int kvm_mmu_init(void);
- void kvm_clear_hyp_idmap(void);
- #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
- #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
- static inline void kvm_clean_pgd(pgd_t *pgd) {}
- static inline void kvm_clean_pmd(pmd_t *pmd) {}
- static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
- static inline void kvm_clean_pte(pte_t *pte) {}
- static inline void kvm_clean_pte_entry(pte_t *pte) {}
- static inline void kvm_set_s2pte_writable(pte_t *pte)
- {
- pte_val(*pte) |= PTE_S2_RDWR;
- }
- static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
- {
- pmd_val(*pmd) |= PMD_S2_RDWR;
- }
- static inline void kvm_set_s2pte_readonly(pte_t *pte)
- {
- pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
- }
- static inline bool kvm_s2pte_readonly(pte_t *pte)
- {
- return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
- }
- static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
- {
- pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
- }
- static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
- {
- return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
- }
- #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
- #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
- #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
- /*
- * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
- * the entire IPA input range with a single pgd entry, and we would only need
- * one pgd entry. Note that in this case, the pgd is actually not used by
- * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
- * structure for the kernel pgtable macros to work.
- */
- #if PGDIR_SHIFT > KVM_PHYS_SHIFT
- #define PTRS_PER_S2_PGD_SHIFT 0
- #else
- #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
- #endif
- #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
- #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
- #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
- /*
- * If we are concatenating first level stage-2 page tables, we would have less
- * than or equal to 16 pointers in the fake PGD, because that's what the
- * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
- * represents the first level for the host, and we add 1 to go to the next
- * level (which uses contatenation) for the stage-2 tables.
- */
- #if PTRS_PER_S2_PGD <= 16
- #define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
- #else
- #define KVM_PREALLOC_LEVEL (0)
- #endif
- static inline void *kvm_get_hwpgd(struct kvm *kvm)
- {
- pgd_t *pgd = kvm->arch.pgd;
- pud_t *pud;
- if (KVM_PREALLOC_LEVEL == 0)
- return pgd;
- pud = pud_offset(pgd, 0);
- if (KVM_PREALLOC_LEVEL == 1)
- return pud;
- BUG_ON(KVM_PREALLOC_LEVEL != 2);
- return pmd_offset(pud, 0);
- }
- static inline unsigned int kvm_get_hwpgd_size(void)
- {
- if (KVM_PREALLOC_LEVEL > 0)
- return PTRS_PER_S2_PGD * PAGE_SIZE;
- return PTRS_PER_S2_PGD * sizeof(pgd_t);
- }
- static inline bool kvm_page_empty(void *ptr)
- {
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
- }
- #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
- #ifdef __PAGETABLE_PMD_FOLDED
- #define kvm_pmd_table_empty(kvm, pmdp) (0)
- #else
- #define kvm_pmd_table_empty(kvm, pmdp) \
- (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
- #endif
- #ifdef __PAGETABLE_PUD_FOLDED
- #define kvm_pud_table_empty(kvm, pudp) (0)
- #else
- #define kvm_pud_table_empty(kvm, pudp) \
- (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
- #endif
- struct kvm;
- #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
- static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
- {
- return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
- }
- static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
- unsigned long size,
- bool ipa_uncached)
- {
- void *va = page_address(pfn_to_page(pfn));
- kvm_flush_dcache_to_poc(va, size);
- if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range((unsigned long)va,
- (unsigned long)va + size);
- } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
- /* any kind of VIPT cache */
- __flush_icache_all();
- }
- }
- static inline void __kvm_flush_dcache_pte(pte_t pte)
- {
- struct page *page = pte_page(pte);
- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
- }
- static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
- {
- struct page *page = pmd_page(pmd);
- kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
- }
- static inline void __kvm_flush_dcache_pud(pud_t pud)
- {
- struct page *page = pud_page(pud);
- kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
- }
- #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
- void kvm_set_way_flush(struct kvm_vcpu *vcpu);
- void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
- static inline bool __kvm_cpu_uses_extended_idmap(void)
- {
- return __cpu_uses_extended_idmap();
- }
- static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
- pgd_t *hyp_pgd,
- pgd_t *merged_hyp_pgd,
- unsigned long hyp_idmap_start)
- {
- int idmap_idx;
- /*
- * Use the first entry to access the HYP mappings. It is
- * guaranteed to be free, otherwise we wouldn't use an
- * extended idmap.
- */
- VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
- merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
- /*
- * Create another extended level entry that points to the boot HYP map,
- * which contains an ID mapping of the HYP init code. We essentially
- * merge the boot and runtime HYP maps by doing so, but they don't
- * overlap anyway, so this is fine.
- */
- idmap_idx = hyp_idmap_start >> VA_BITS;
- VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
- merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
- }
- #endif /* __ASSEMBLY__ */
- #endif /* __ARM64_KVM_MMU_H__ */
|