mmu_context.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #ifndef _ASM_IA64_MMU_CONTEXT_H
  2. #define _ASM_IA64_MMU_CONTEXT_H
  3. /*
  4. * Copyright (C) 1998-2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. /*
  8. * Routines to manage the allocation of task context numbers. Task context
  9. * numbers are used to reduce or eliminate the need to perform TLB flushes
  10. * due to context switches. Context numbers are implemented using ia-64
  11. * region ids. Since the IA-64 TLB does not consider the region number when
  12. * performing a TLB lookup, we need to assign a unique region id to each
  13. * region in a process. We use the least significant three bits in aregion
  14. * id for this purpose.
  15. */
  16. #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
  17. #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
  18. # include <asm/page.h>
  19. # ifndef __ASSEMBLY__
  20. #include <linux/compiler.h>
  21. #include <linux/percpu.h>
  22. #include <linux/sched.h>
  23. #include <linux/spinlock.h>
  24. #include <asm/processor.h>
  25. #include <asm-generic/mm_hooks.h>
  26. struct ia64_ctx {
  27. spinlock_t lock;
  28. unsigned int next; /* next context number to use */
  29. unsigned int limit; /* available free range */
  30. unsigned int max_ctx; /* max. context value supported by all CPUs */
  31. /* call wrap_mmu_context when next >= max */
  32. unsigned long *bitmap; /* bitmap size is max_ctx+1 */
  33. unsigned long *flushmap;/* pending rid to be flushed */
  34. };
  35. extern struct ia64_ctx ia64_ctx;
  36. DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
  37. extern void mmu_context_init (void);
  38. extern void wrap_mmu_context (struct mm_struct *mm);
  39. static inline void
  40. enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
  41. {
  42. }
  43. /*
  44. * When the context counter wraps around all TLBs need to be flushed because
  45. * an old context number might have been reused. This is signalled by the
  46. * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
  47. * below. Called by activate_mm(). <efocht@ess.nec.de>
  48. */
  49. static inline void
  50. delayed_tlb_flush (void)
  51. {
  52. extern void local_flush_tlb_all (void);
  53. unsigned long flags;
  54. if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
  55. spin_lock_irqsave(&ia64_ctx.lock, flags);
  56. if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
  57. local_flush_tlb_all();
  58. __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
  59. }
  60. spin_unlock_irqrestore(&ia64_ctx.lock, flags);
  61. }
  62. }
  63. static inline nv_mm_context_t
  64. get_mmu_context (struct mm_struct *mm)
  65. {
  66. unsigned long flags;
  67. nv_mm_context_t context = mm->context;
  68. if (likely(context))
  69. goto out;
  70. spin_lock_irqsave(&ia64_ctx.lock, flags);
  71. /* re-check, now that we've got the lock: */
  72. context = mm->context;
  73. if (context == 0) {
  74. cpumask_clear(mm_cpumask(mm));
  75. if (ia64_ctx.next >= ia64_ctx.limit) {
  76. ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
  77. ia64_ctx.max_ctx, ia64_ctx.next);
  78. ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
  79. ia64_ctx.max_ctx, ia64_ctx.next);
  80. if (ia64_ctx.next >= ia64_ctx.max_ctx)
  81. wrap_mmu_context(mm);
  82. }
  83. mm->context = context = ia64_ctx.next++;
  84. __set_bit(context, ia64_ctx.bitmap);
  85. }
  86. spin_unlock_irqrestore(&ia64_ctx.lock, flags);
  87. out:
  88. /*
  89. * Ensure we're not starting to use "context" before any old
  90. * uses of it are gone from our TLB.
  91. */
  92. delayed_tlb_flush();
  93. return context;
  94. }
  95. /*
  96. * Initialize context number to some sane value. MM is guaranteed to be a
  97. * brand-new address-space, so no TLB flushing is needed, ever.
  98. */
  99. static inline int
  100. init_new_context (struct task_struct *p, struct mm_struct *mm)
  101. {
  102. mm->context = 0;
  103. return 0;
  104. }
  105. static inline void
  106. destroy_context (struct mm_struct *mm)
  107. {
  108. /* Nothing to do. */
  109. }
  110. static inline void
  111. reload_context (nv_mm_context_t context)
  112. {
  113. unsigned long rid;
  114. unsigned long rid_incr = 0;
  115. unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
  116. old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
  117. rid = context << 3; /* make space for encoding the region number */
  118. rid_incr = 1 << 8;
  119. /* encode the region id, preferred page size, and VHPT enable bit: */
  120. rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
  121. rr1 = rr0 + 1*rid_incr;
  122. rr2 = rr0 + 2*rid_incr;
  123. rr3 = rr0 + 3*rid_incr;
  124. rr4 = rr0 + 4*rid_incr;
  125. #ifdef CONFIG_HUGETLB_PAGE
  126. rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
  127. # if RGN_HPAGE != 4
  128. # error "reload_context assumes RGN_HPAGE is 4"
  129. # endif
  130. #endif
  131. ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
  132. ia64_srlz_i(); /* srlz.i implies srlz.d */
  133. }
  134. /*
  135. * Must be called with preemption off
  136. */
  137. static inline void
  138. activate_context (struct mm_struct *mm)
  139. {
  140. nv_mm_context_t context;
  141. do {
  142. context = get_mmu_context(mm);
  143. if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
  144. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  145. reload_context(context);
  146. /*
  147. * in the unlikely event of a TLB-flush by another thread,
  148. * redo the load.
  149. */
  150. } while (unlikely(context != mm->context));
  151. }
  152. #define deactivate_mm(tsk,mm) do { } while (0)
  153. /*
  154. * Switch from address space PREV to address space NEXT.
  155. */
  156. static inline void
  157. activate_mm (struct mm_struct *prev, struct mm_struct *next)
  158. {
  159. /*
  160. * We may get interrupts here, but that's OK because interrupt
  161. * handlers cannot touch user-space.
  162. */
  163. ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
  164. activate_context(next);
  165. }
  166. #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
  167. # endif /* ! __ASSEMBLY__ */
  168. #endif /* _ASM_IA64_MMU_CONTEXT_H */