123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150 |
- /*
- * Based on arch/arm/include/asm/mmu_context.h
- *
- * Copyright (C) 1996 Russell King.
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #ifndef __ASM_MMU_CONTEXT_H
- #define __ASM_MMU_CONTEXT_H
- #include <linux/compiler.h>
- #include <linux/sched.h>
- #include <asm/cacheflush.h>
- #include <asm/proc-fns.h>
- #include <asm-generic/mm_hooks.h>
- #include <asm/cputype.h>
- #include <asm/pgtable.h>
- #ifdef CONFIG_PID_IN_CONTEXTIDR
- static inline void contextidr_thread_switch(struct task_struct *next)
- {
- asm(
- " msr contextidr_el1, %0\n"
- " isb"
- :
- : "r" (task_pid_nr(next)));
- }
- #else
- static inline void contextidr_thread_switch(struct task_struct *next)
- {
- }
- #endif
- /*
- * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
- */
- static inline void cpu_set_reserved_ttbr0(void)
- {
- unsigned long ttbr = page_to_phys(empty_zero_page);
- asm(
- " msr ttbr0_el1, %0 // set TTBR0\n"
- " isb"
- :
- : "r" (ttbr));
- }
- /*
- * TCR.T0SZ value to use when the ID map is active. Usually equals
- * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
- * physical memory, in which case it will be smaller.
- */
- extern u64 idmap_t0sz;
- static inline bool __cpu_uses_extended_idmap(void)
- {
- return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
- unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
- }
- /*
- * Set TCR.T0SZ to its default value (based on VA_BITS)
- */
- static inline void cpu_set_default_tcr_t0sz(void)
- {
- unsigned long tcr;
- if (!__cpu_uses_extended_idmap())
- return;
- asm volatile (
- " mrs %0, tcr_el1 ;"
- " bfi %0, %1, %2, %3 ;"
- " msr tcr_el1, %0 ;"
- " isb"
- : "=&r" (tcr)
- : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
- }
- /*
- * It would be nice to return ASIDs back to the allocator, but unfortunately
- * that introduces a race with a generation rollover where we could erroneously
- * free an ASID allocated in a future generation. We could workaround this by
- * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
- * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
- * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
- * take CPU migration into account.
- */
- #define destroy_context(mm) do { } while(0)
- void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
- #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
- /*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
- static inline void
- enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
- {
- }
- /*
- * This is the actual mm switch as far as the scheduler
- * is concerned. No registers are touched. We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
- static inline void
- switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
- {
- unsigned int cpu = smp_processor_id();
- if (prev == next)
- return;
- /*
- * init_mm.pgd does not contain any user mappings and it is always
- * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
- */
- if (next == &init_mm) {
- cpu_set_reserved_ttbr0();
- return;
- }
- check_and_switch_context(next, cpu);
- }
- #define deactivate_mm(tsk,mm) do { } while (0)
- #define activate_mm(prev,next) switch_mm(prev, next, NULL)
- #endif
|