srmmu.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818
  1. /*
  2. * srmmu.c: SRMMU specific routines for memory management.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
  6. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
  9. */
  10. #include <linux/seq_file.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/kdebug.h>
  16. #include <linux/export.h>
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/log2.h>
  20. #include <linux/gfp.h>
  21. #include <linux/fs.h>
  22. #include <linux/mm.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/io-unit.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/bitext.h>
  30. #include <asm/vaddrs.h>
  31. #include <asm/cache.h>
  32. #include <asm/traps.h>
  33. #include <asm/oplib.h>
  34. #include <asm/mbus.h>
  35. #include <asm/page.h>
  36. #include <asm/asi.h>
  37. #include <asm/msi.h>
  38. #include <asm/smp.h>
  39. #include <asm/io.h>
  40. /* Now the cpu specific definitions. */
  41. #include <asm/turbosparc.h>
  42. #include <asm/tsunami.h>
  43. #include <asm/viking.h>
  44. #include <asm/swift.h>
  45. #include <asm/leon.h>
  46. #include <asm/mxcc.h>
  47. #include <asm/ross.h>
  48. #include "mm_32.h"
  49. enum mbus_module srmmu_modtype;
  50. static unsigned int hwbug_bitmask;
  51. int vac_cache_size;
  52. int vac_line_size;
  53. extern struct resource sparc_iomap;
  54. extern unsigned long last_valid_pfn;
  55. static pgd_t *srmmu_swapper_pg_dir;
  56. const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
  57. EXPORT_SYMBOL(sparc32_cachetlb_ops);
  58. #ifdef CONFIG_SMP
  59. const struct sparc32_cachetlb_ops *local_ops;
  60. #define FLUSH_BEGIN(mm)
  61. #define FLUSH_END
  62. #else
  63. #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
  64. #define FLUSH_END }
  65. #endif
  66. int flush_page_for_dma_global = 1;
  67. char *srmmu_name;
  68. ctxd_t *srmmu_ctx_table_phys;
  69. static ctxd_t *srmmu_context_table;
  70. int viking_mxcc_present;
  71. static DEFINE_SPINLOCK(srmmu_context_spinlock);
  72. static int is_hypersparc;
  73. static int srmmu_cache_pagetables;
  74. /* these will be initialized in srmmu_nocache_calcsize() */
  75. static unsigned long srmmu_nocache_size;
  76. static unsigned long srmmu_nocache_end;
  77. /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
  78. #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
  79. /* The context table is a nocache user with the biggest alignment needs. */
  80. #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
  81. void *srmmu_nocache_pool;
  82. static struct bit_map srmmu_nocache_map;
  83. static inline int srmmu_pmd_none(pmd_t pmd)
  84. { return !(pmd_val(pmd) & 0xFFFFFFF); }
  85. /* XXX should we hyper_flush_whole_icache here - Anton */
  86. static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
  87. { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
  88. void pmd_set(pmd_t *pmdp, pte_t *ptep)
  89. {
  90. unsigned long ptp; /* Physical address, shifted right by 4 */
  91. int i;
  92. ptp = __nocache_pa((unsigned long) ptep) >> 4;
  93. for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
  94. set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
  95. ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
  96. }
  97. }
  98. void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
  99. {
  100. unsigned long ptp; /* Physical address, shifted right by 4 */
  101. int i;
  102. ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
  103. for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
  104. set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
  105. ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
  106. }
  107. }
  108. /* Find an entry in the third-level page table.. */
  109. pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
  110. {
  111. void *pte;
  112. pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
  113. return (pte_t *) pte +
  114. ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  115. }
  116. /*
  117. * size: bytes to allocate in the nocache area.
  118. * align: bytes, number to align at.
  119. * Returns the virtual address of the allocated area.
  120. */
  121. static void *__srmmu_get_nocache(int size, int align)
  122. {
  123. int offset;
  124. unsigned long addr;
  125. if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
  126. printk(KERN_ERR "Size 0x%x too small for nocache request\n",
  127. size);
  128. size = SRMMU_NOCACHE_BITMAP_SHIFT;
  129. }
  130. if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
  131. printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
  132. size);
  133. size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
  134. }
  135. BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
  136. offset = bit_map_string_get(&srmmu_nocache_map,
  137. size >> SRMMU_NOCACHE_BITMAP_SHIFT,
  138. align >> SRMMU_NOCACHE_BITMAP_SHIFT);
  139. if (offset == -1) {
  140. printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
  141. size, (int) srmmu_nocache_size,
  142. srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
  143. return NULL;
  144. }
  145. addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
  146. return (void *)addr;
  147. }
  148. void *srmmu_get_nocache(int size, int align)
  149. {
  150. void *tmp;
  151. tmp = __srmmu_get_nocache(size, align);
  152. if (tmp)
  153. memset(tmp, 0, size);
  154. return tmp;
  155. }
  156. void srmmu_free_nocache(void *addr, int size)
  157. {
  158. unsigned long vaddr;
  159. int offset;
  160. vaddr = (unsigned long)addr;
  161. if (vaddr < SRMMU_NOCACHE_VADDR) {
  162. printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
  163. vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
  164. BUG();
  165. }
  166. if (vaddr + size > srmmu_nocache_end) {
  167. printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
  168. vaddr, srmmu_nocache_end);
  169. BUG();
  170. }
  171. if (!is_power_of_2(size)) {
  172. printk("Size 0x%x is not a power of 2\n", size);
  173. BUG();
  174. }
  175. if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
  176. printk("Size 0x%x is too small\n", size);
  177. BUG();
  178. }
  179. if (vaddr & (size - 1)) {
  180. printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
  181. BUG();
  182. }
  183. offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
  184. size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
  185. bit_map_clear(&srmmu_nocache_map, offset, size);
  186. }
  187. static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
  188. unsigned long end);
  189. /* Return how much physical memory we have. */
  190. static unsigned long __init probe_memory(void)
  191. {
  192. unsigned long total = 0;
  193. int i;
  194. for (i = 0; sp_banks[i].num_bytes; i++)
  195. total += sp_banks[i].num_bytes;
  196. return total;
  197. }
  198. /*
  199. * Reserve nocache dynamically proportionally to the amount of
  200. * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
  201. */
  202. static void __init srmmu_nocache_calcsize(void)
  203. {
  204. unsigned long sysmemavail = probe_memory() / 1024;
  205. int srmmu_nocache_npages;
  206. srmmu_nocache_npages =
  207. sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
  208. /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
  209. // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
  210. if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
  211. srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
  212. /* anything above 1280 blows up */
  213. if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
  214. srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
  215. srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
  216. srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
  217. }
  218. static void __init srmmu_nocache_init(void)
  219. {
  220. void *srmmu_nocache_bitmap;
  221. unsigned int bitmap_bits;
  222. pgd_t *pgd;
  223. pmd_t *pmd;
  224. pte_t *pte;
  225. unsigned long paddr, vaddr;
  226. unsigned long pteval;
  227. bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
  228. srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
  229. SRMMU_NOCACHE_ALIGN_MAX, 0UL);
  230. memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
  231. srmmu_nocache_bitmap =
  232. __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
  233. SMP_CACHE_BYTES, 0UL);
  234. bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
  235. srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
  236. memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
  237. init_mm.pgd = srmmu_swapper_pg_dir;
  238. srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
  239. paddr = __pa((unsigned long)srmmu_nocache_pool);
  240. vaddr = SRMMU_NOCACHE_VADDR;
  241. while (vaddr < srmmu_nocache_end) {
  242. pgd = pgd_offset_k(vaddr);
  243. pmd = pmd_offset(__nocache_fix(pgd), vaddr);
  244. pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
  245. pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
  246. if (srmmu_cache_pagetables)
  247. pteval |= SRMMU_CACHE;
  248. set_pte(__nocache_fix(pte), __pte(pteval));
  249. vaddr += PAGE_SIZE;
  250. paddr += PAGE_SIZE;
  251. }
  252. flush_cache_all();
  253. flush_tlb_all();
  254. }
  255. pgd_t *get_pgd_fast(void)
  256. {
  257. pgd_t *pgd = NULL;
  258. pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
  259. if (pgd) {
  260. pgd_t *init = pgd_offset_k(0);
  261. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  262. memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
  263. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  264. }
  265. return pgd;
  266. }
  267. /*
  268. * Hardware needs alignment to 256 only, but we align to whole page size
  269. * to reduce fragmentation problems due to the buddy principle.
  270. * XXX Provide actual fragmentation statistics in /proc.
  271. *
  272. * Alignments up to the page size are the same for physical and virtual
  273. * addresses of the nocache area.
  274. */
  275. pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  276. {
  277. unsigned long pte;
  278. struct page *page;
  279. if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
  280. return NULL;
  281. page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
  282. if (!pgtable_page_ctor(page)) {
  283. __free_page(page);
  284. return NULL;
  285. }
  286. return page;
  287. }
  288. void pte_free(struct mm_struct *mm, pgtable_t pte)
  289. {
  290. unsigned long p;
  291. pgtable_page_dtor(pte);
  292. p = (unsigned long)page_address(pte); /* Cached address (for test) */
  293. if (p == 0)
  294. BUG();
  295. p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
  296. /* free non cached virtual address*/
  297. srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
  298. }
  299. /* context handling - a dynamically sized pool is used */
  300. #define NO_CONTEXT -1
  301. struct ctx_list {
  302. struct ctx_list *next;
  303. struct ctx_list *prev;
  304. unsigned int ctx_number;
  305. struct mm_struct *ctx_mm;
  306. };
  307. static struct ctx_list *ctx_list_pool;
  308. static struct ctx_list ctx_free;
  309. static struct ctx_list ctx_used;
  310. /* At boot time we determine the number of contexts */
  311. static int num_contexts;
  312. static inline void remove_from_ctx_list(struct ctx_list *entry)
  313. {
  314. entry->next->prev = entry->prev;
  315. entry->prev->next = entry->next;
  316. }
  317. static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
  318. {
  319. entry->next = head;
  320. (entry->prev = head->prev)->next = entry;
  321. head->prev = entry;
  322. }
  323. #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
  324. #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
  325. static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
  326. {
  327. struct ctx_list *ctxp;
  328. ctxp = ctx_free.next;
  329. if (ctxp != &ctx_free) {
  330. remove_from_ctx_list(ctxp);
  331. add_to_used_ctxlist(ctxp);
  332. mm->context = ctxp->ctx_number;
  333. ctxp->ctx_mm = mm;
  334. return;
  335. }
  336. ctxp = ctx_used.next;
  337. if (ctxp->ctx_mm == old_mm)
  338. ctxp = ctxp->next;
  339. if (ctxp == &ctx_used)
  340. panic("out of mmu contexts");
  341. flush_cache_mm(ctxp->ctx_mm);
  342. flush_tlb_mm(ctxp->ctx_mm);
  343. remove_from_ctx_list(ctxp);
  344. add_to_used_ctxlist(ctxp);
  345. ctxp->ctx_mm->context = NO_CONTEXT;
  346. ctxp->ctx_mm = mm;
  347. mm->context = ctxp->ctx_number;
  348. }
  349. static inline void free_context(int context)
  350. {
  351. struct ctx_list *ctx_old;
  352. ctx_old = ctx_list_pool + context;
  353. remove_from_ctx_list(ctx_old);
  354. add_to_free_ctxlist(ctx_old);
  355. }
  356. static void __init sparc_context_init(int numctx)
  357. {
  358. int ctx;
  359. unsigned long size;
  360. size = numctx * sizeof(struct ctx_list);
  361. ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
  362. for (ctx = 0; ctx < numctx; ctx++) {
  363. struct ctx_list *clist;
  364. clist = (ctx_list_pool + ctx);
  365. clist->ctx_number = ctx;
  366. clist->ctx_mm = NULL;
  367. }
  368. ctx_free.next = ctx_free.prev = &ctx_free;
  369. ctx_used.next = ctx_used.prev = &ctx_used;
  370. for (ctx = 0; ctx < numctx; ctx++)
  371. add_to_free_ctxlist(ctx_list_pool + ctx);
  372. }
  373. void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
  374. struct task_struct *tsk)
  375. {
  376. unsigned long flags;
  377. if (mm->context == NO_CONTEXT) {
  378. spin_lock_irqsave(&srmmu_context_spinlock, flags);
  379. alloc_context(old_mm, mm);
  380. spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
  381. srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
  382. }
  383. if (sparc_cpu_model == sparc_leon)
  384. leon_switch_mm();
  385. if (is_hypersparc)
  386. hyper_flush_whole_icache();
  387. srmmu_set_context(mm->context);
  388. }
  389. /* Low level IO area allocation on the SRMMU. */
  390. static inline void srmmu_mapioaddr(unsigned long physaddr,
  391. unsigned long virt_addr, int bus_type)
  392. {
  393. pgd_t *pgdp;
  394. pmd_t *pmdp;
  395. pte_t *ptep;
  396. unsigned long tmp;
  397. physaddr &= PAGE_MASK;
  398. pgdp = pgd_offset_k(virt_addr);
  399. pmdp = pmd_offset(pgdp, virt_addr);
  400. ptep = pte_offset_kernel(pmdp, virt_addr);
  401. tmp = (physaddr >> 4) | SRMMU_ET_PTE;
  402. /* I need to test whether this is consistent over all
  403. * sun4m's. The bus_type represents the upper 4 bits of
  404. * 36-bit physical address on the I/O space lines...
  405. */
  406. tmp |= (bus_type << 28);
  407. tmp |= SRMMU_PRIV;
  408. __flush_page_to_ram(virt_addr);
  409. set_pte(ptep, __pte(tmp));
  410. }
  411. void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
  412. unsigned long xva, unsigned int len)
  413. {
  414. while (len != 0) {
  415. len -= PAGE_SIZE;
  416. srmmu_mapioaddr(xpa, xva, bus);
  417. xva += PAGE_SIZE;
  418. xpa += PAGE_SIZE;
  419. }
  420. flush_tlb_all();
  421. }
  422. static inline void srmmu_unmapioaddr(unsigned long virt_addr)
  423. {
  424. pgd_t *pgdp;
  425. pmd_t *pmdp;
  426. pte_t *ptep;
  427. pgdp = pgd_offset_k(virt_addr);
  428. pmdp = pmd_offset(pgdp, virt_addr);
  429. ptep = pte_offset_kernel(pmdp, virt_addr);
  430. /* No need to flush uncacheable page. */
  431. __pte_clear(ptep);
  432. }
  433. void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
  434. {
  435. while (len != 0) {
  436. len -= PAGE_SIZE;
  437. srmmu_unmapioaddr(virt_addr);
  438. virt_addr += PAGE_SIZE;
  439. }
  440. flush_tlb_all();
  441. }
  442. /* tsunami.S */
  443. extern void tsunami_flush_cache_all(void);
  444. extern void tsunami_flush_cache_mm(struct mm_struct *mm);
  445. extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  446. extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
  447. extern void tsunami_flush_page_to_ram(unsigned long page);
  448. extern void tsunami_flush_page_for_dma(unsigned long page);
  449. extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
  450. extern void tsunami_flush_tlb_all(void);
  451. extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
  452. extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  453. extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
  454. extern void tsunami_setup_blockops(void);
  455. /* swift.S */
  456. extern void swift_flush_cache_all(void);
  457. extern void swift_flush_cache_mm(struct mm_struct *mm);
  458. extern void swift_flush_cache_range(struct vm_area_struct *vma,
  459. unsigned long start, unsigned long end);
  460. extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
  461. extern void swift_flush_page_to_ram(unsigned long page);
  462. extern void swift_flush_page_for_dma(unsigned long page);
  463. extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
  464. extern void swift_flush_tlb_all(void);
  465. extern void swift_flush_tlb_mm(struct mm_struct *mm);
  466. extern void swift_flush_tlb_range(struct vm_area_struct *vma,
  467. unsigned long start, unsigned long end);
  468. extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
  469. #if 0 /* P3: deadwood to debug precise flushes on Swift. */
  470. void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  471. {
  472. int cctx, ctx1;
  473. page &= PAGE_MASK;
  474. if ((ctx1 = vma->vm_mm->context) != -1) {
  475. cctx = srmmu_get_context();
  476. /* Is context # ever different from current context? P3 */
  477. if (cctx != ctx1) {
  478. printk("flush ctx %02x curr %02x\n", ctx1, cctx);
  479. srmmu_set_context(ctx1);
  480. swift_flush_page(page);
  481. __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
  482. "r" (page), "i" (ASI_M_FLUSH_PROBE));
  483. srmmu_set_context(cctx);
  484. } else {
  485. /* Rm. prot. bits from virt. c. */
  486. /* swift_flush_cache_all(); */
  487. /* swift_flush_cache_page(vma, page); */
  488. swift_flush_page(page);
  489. __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
  490. "r" (page), "i" (ASI_M_FLUSH_PROBE));
  491. /* same as above: srmmu_flush_tlb_page() */
  492. }
  493. }
  494. }
  495. #endif
  496. /*
  497. * The following are all MBUS based SRMMU modules, and therefore could
  498. * be found in a multiprocessor configuration. On the whole, these
  499. * chips seems to be much more touchy about DVMA and page tables
  500. * with respect to cache coherency.
  501. */
  502. /* viking.S */
  503. extern void viking_flush_cache_all(void);
  504. extern void viking_flush_cache_mm(struct mm_struct *mm);
  505. extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  506. unsigned long end);
  507. extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
  508. extern void viking_flush_page_to_ram(unsigned long page);
  509. extern void viking_flush_page_for_dma(unsigned long page);
  510. extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
  511. extern void viking_flush_page(unsigned long page);
  512. extern void viking_mxcc_flush_page(unsigned long page);
  513. extern void viking_flush_tlb_all(void);
  514. extern void viking_flush_tlb_mm(struct mm_struct *mm);
  515. extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  516. unsigned long end);
  517. extern void viking_flush_tlb_page(struct vm_area_struct *vma,
  518. unsigned long page);
  519. extern void sun4dsmp_flush_tlb_all(void);
  520. extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
  521. extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  522. unsigned long end);
  523. extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
  524. unsigned long page);
  525. /* hypersparc.S */
  526. extern void hypersparc_flush_cache_all(void);
  527. extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
  528. extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  529. extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
  530. extern void hypersparc_flush_page_to_ram(unsigned long page);
  531. extern void hypersparc_flush_page_for_dma(unsigned long page);
  532. extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
  533. extern void hypersparc_flush_tlb_all(void);
  534. extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
  535. extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  536. extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
  537. extern void hypersparc_setup_blockops(void);
  538. /*
  539. * NOTE: All of this startup code assumes the low 16mb (approx.) of
  540. * kernel mappings are done with one single contiguous chunk of
  541. * ram. On small ram machines (classics mainly) we only get
  542. * around 8mb mapped for us.
  543. */
  544. static void __init early_pgtable_allocfail(char *type)
  545. {
  546. prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
  547. prom_halt();
  548. }
  549. static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
  550. unsigned long end)
  551. {
  552. pgd_t *pgdp;
  553. pmd_t *pmdp;
  554. pte_t *ptep;
  555. while (start < end) {
  556. pgdp = pgd_offset_k(start);
  557. if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
  558. pmdp = __srmmu_get_nocache(
  559. SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
  560. if (pmdp == NULL)
  561. early_pgtable_allocfail("pmd");
  562. memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
  563. pgd_set(__nocache_fix(pgdp), pmdp);
  564. }
  565. pmdp = pmd_offset(__nocache_fix(pgdp), start);
  566. if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
  567. ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
  568. if (ptep == NULL)
  569. early_pgtable_allocfail("pte");
  570. memset(__nocache_fix(ptep), 0, PTE_SIZE);
  571. pmd_set(__nocache_fix(pmdp), ptep);
  572. }
  573. if (start > (0xffffffffUL - PMD_SIZE))
  574. break;
  575. start = (start + PMD_SIZE) & PMD_MASK;
  576. }
  577. }
  578. static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
  579. unsigned long end)
  580. {
  581. pgd_t *pgdp;
  582. pmd_t *pmdp;
  583. pte_t *ptep;
  584. while (start < end) {
  585. pgdp = pgd_offset_k(start);
  586. if (pgd_none(*pgdp)) {
  587. pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
  588. if (pmdp == NULL)
  589. early_pgtable_allocfail("pmd");
  590. memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
  591. pgd_set(pgdp, pmdp);
  592. }
  593. pmdp = pmd_offset(pgdp, start);
  594. if (srmmu_pmd_none(*pmdp)) {
  595. ptep = __srmmu_get_nocache(PTE_SIZE,
  596. PTE_SIZE);
  597. if (ptep == NULL)
  598. early_pgtable_allocfail("pte");
  599. memset(ptep, 0, PTE_SIZE);
  600. pmd_set(pmdp, ptep);
  601. }
  602. if (start > (0xffffffffUL - PMD_SIZE))
  603. break;
  604. start = (start + PMD_SIZE) & PMD_MASK;
  605. }
  606. }
  607. /* These flush types are not available on all chips... */
  608. static inline unsigned long srmmu_probe(unsigned long vaddr)
  609. {
  610. unsigned long retval;
  611. if (sparc_cpu_model != sparc_leon) {
  612. vaddr &= PAGE_MASK;
  613. __asm__ __volatile__("lda [%1] %2, %0\n\t" :
  614. "=r" (retval) :
  615. "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
  616. } else {
  617. retval = leon_swprobe(vaddr, NULL);
  618. }
  619. return retval;
  620. }
  621. /*
  622. * This is much cleaner than poking around physical address space
  623. * looking at the prom's page table directly which is what most
  624. * other OS's do. Yuck... this is much better.
  625. */
  626. static void __init srmmu_inherit_prom_mappings(unsigned long start,
  627. unsigned long end)
  628. {
  629. unsigned long probed;
  630. unsigned long addr;
  631. pgd_t *pgdp;
  632. pmd_t *pmdp;
  633. pte_t *ptep;
  634. int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
  635. while (start <= end) {
  636. if (start == 0)
  637. break; /* probably wrap around */
  638. if (start == 0xfef00000)
  639. start = KADB_DEBUGGER_BEGVM;
  640. probed = srmmu_probe(start);
  641. if (!probed) {
  642. /* continue probing until we find an entry */
  643. start += PAGE_SIZE;
  644. continue;
  645. }
  646. /* A red snapper, see what it really is. */
  647. what = 0;
  648. addr = start - PAGE_SIZE;
  649. if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
  650. if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
  651. what = 1;
  652. }
  653. if (!(start & ~(SRMMU_PGDIR_MASK))) {
  654. if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
  655. what = 2;
  656. }
  657. pgdp = pgd_offset_k(start);
  658. if (what == 2) {
  659. *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
  660. start += SRMMU_PGDIR_SIZE;
  661. continue;
  662. }
  663. if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
  664. pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
  665. SRMMU_PMD_TABLE_SIZE);
  666. if (pmdp == NULL)
  667. early_pgtable_allocfail("pmd");
  668. memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
  669. pgd_set(__nocache_fix(pgdp), pmdp);
  670. }
  671. pmdp = pmd_offset(__nocache_fix(pgdp), start);
  672. if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
  673. ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
  674. if (ptep == NULL)
  675. early_pgtable_allocfail("pte");
  676. memset(__nocache_fix(ptep), 0, PTE_SIZE);
  677. pmd_set(__nocache_fix(pmdp), ptep);
  678. }
  679. if (what == 1) {
  680. /* We bend the rule where all 16 PTPs in a pmd_t point
  681. * inside the same PTE page, and we leak a perfectly
  682. * good hardware PTE piece. Alternatives seem worse.
  683. */
  684. unsigned int x; /* Index of HW PMD in soft cluster */
  685. unsigned long *val;
  686. x = (start >> PMD_SHIFT) & 15;
  687. val = &pmdp->pmdv[x];
  688. *(unsigned long *)__nocache_fix(val) = probed;
  689. start += SRMMU_REAL_PMD_SIZE;
  690. continue;
  691. }
  692. ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
  693. *(pte_t *)__nocache_fix(ptep) = __pte(probed);
  694. start += PAGE_SIZE;
  695. }
  696. }
  697. #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
  698. /* Create a third-level SRMMU 16MB page mapping. */
  699. static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
  700. {
  701. pgd_t *pgdp = pgd_offset_k(vaddr);
  702. unsigned long big_pte;
  703. big_pte = KERNEL_PTE(phys_base >> 4);
  704. *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
  705. }
  706. /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
  707. static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
  708. {
  709. unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
  710. unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
  711. unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
  712. /* Map "low" memory only */
  713. const unsigned long min_vaddr = PAGE_OFFSET;
  714. const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
  715. if (vstart < min_vaddr || vstart >= max_vaddr)
  716. return vstart;
  717. if (vend > max_vaddr || vend < min_vaddr)
  718. vend = max_vaddr;
  719. while (vstart < vend) {
  720. do_large_mapping(vstart, pstart);
  721. vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
  722. }
  723. return vstart;
  724. }
  725. static void __init map_kernel(void)
  726. {
  727. int i;
  728. if (phys_base > 0) {
  729. do_large_mapping(PAGE_OFFSET, phys_base);
  730. }
  731. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  732. map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
  733. }
  734. }
  735. void (*poke_srmmu)(void) = NULL;
  736. void __init srmmu_paging_init(void)
  737. {
  738. int i;
  739. phandle cpunode;
  740. char node_str[128];
  741. pgd_t *pgd;
  742. pmd_t *pmd;
  743. pte_t *pte;
  744. unsigned long pages_avail;
  745. init_mm.context = (unsigned long) NO_CONTEXT;
  746. sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
  747. if (sparc_cpu_model == sun4d)
  748. num_contexts = 65536; /* We know it is Viking */
  749. else {
  750. /* Find the number of contexts on the srmmu. */
  751. cpunode = prom_getchild(prom_root_node);
  752. num_contexts = 0;
  753. while (cpunode != 0) {
  754. prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
  755. if (!strcmp(node_str, "cpu")) {
  756. num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
  757. break;
  758. }
  759. cpunode = prom_getsibling(cpunode);
  760. }
  761. }
  762. if (!num_contexts) {
  763. prom_printf("Something wrong, can't find cpu node in paging_init.\n");
  764. prom_halt();
  765. }
  766. pages_avail = 0;
  767. last_valid_pfn = bootmem_init(&pages_avail);
  768. srmmu_nocache_calcsize();
  769. srmmu_nocache_init();
  770. srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
  771. map_kernel();
  772. /* ctx table has to be physically aligned to its size */
  773. srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
  774. srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
  775. for (i = 0; i < num_contexts; i++)
  776. srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
  777. flush_cache_all();
  778. srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
  779. #ifdef CONFIG_SMP
  780. /* Stop from hanging here... */
  781. local_ops->tlb_all();
  782. #else
  783. flush_tlb_all();
  784. #endif
  785. poke_srmmu();
  786. srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
  787. srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
  788. srmmu_allocate_ptable_skeleton(
  789. __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
  790. srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
  791. pgd = pgd_offset_k(PKMAP_BASE);
  792. pmd = pmd_offset(pgd, PKMAP_BASE);
  793. pte = pte_offset_kernel(pmd, PKMAP_BASE);
  794. pkmap_page_table = pte;
  795. flush_cache_all();
  796. flush_tlb_all();
  797. sparc_context_init(num_contexts);
  798. kmap_init();
  799. {
  800. unsigned long zones_size[MAX_NR_ZONES];
  801. unsigned long zholes_size[MAX_NR_ZONES];
  802. unsigned long npages;
  803. int znum;
  804. for (znum = 0; znum < MAX_NR_ZONES; znum++)
  805. zones_size[znum] = zholes_size[znum] = 0;
  806. npages = max_low_pfn - pfn_base;
  807. zones_size[ZONE_DMA] = npages;
  808. zholes_size[ZONE_DMA] = npages - pages_avail;
  809. npages = highend_pfn - max_low_pfn;
  810. zones_size[ZONE_HIGHMEM] = npages;
  811. zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
  812. free_area_init_node(0, zones_size, pfn_base, zholes_size);
  813. }
  814. }
  815. void mmu_info(struct seq_file *m)
  816. {
  817. seq_printf(m,
  818. "MMU type\t: %s\n"
  819. "contexts\t: %d\n"
  820. "nocache total\t: %ld\n"
  821. "nocache used\t: %d\n",
  822. srmmu_name,
  823. num_contexts,
  824. srmmu_nocache_size,
  825. srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
  826. }
  827. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  828. {
  829. mm->context = NO_CONTEXT;
  830. return 0;
  831. }
  832. void destroy_context(struct mm_struct *mm)
  833. {
  834. unsigned long flags;
  835. if (mm->context != NO_CONTEXT) {
  836. flush_cache_mm(mm);
  837. srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
  838. flush_tlb_mm(mm);
  839. spin_lock_irqsave(&srmmu_context_spinlock, flags);
  840. free_context(mm->context);
  841. spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
  842. mm->context = NO_CONTEXT;
  843. }
  844. }
  845. /* Init various srmmu chip types. */
  846. static void __init srmmu_is_bad(void)
  847. {
  848. prom_printf("Could not determine SRMMU chip type.\n");
  849. prom_halt();
  850. }
  851. static void __init init_vac_layout(void)
  852. {
  853. phandle nd;
  854. int cache_lines;
  855. char node_str[128];
  856. #ifdef CONFIG_SMP
  857. int cpu = 0;
  858. unsigned long max_size = 0;
  859. unsigned long min_line_size = 0x10000000;
  860. #endif
  861. nd = prom_getchild(prom_root_node);
  862. while ((nd = prom_getsibling(nd)) != 0) {
  863. prom_getstring(nd, "device_type", node_str, sizeof(node_str));
  864. if (!strcmp(node_str, "cpu")) {
  865. vac_line_size = prom_getint(nd, "cache-line-size");
  866. if (vac_line_size == -1) {
  867. prom_printf("can't determine cache-line-size, halting.\n");
  868. prom_halt();
  869. }
  870. cache_lines = prom_getint(nd, "cache-nlines");
  871. if (cache_lines == -1) {
  872. prom_printf("can't determine cache-nlines, halting.\n");
  873. prom_halt();
  874. }
  875. vac_cache_size = cache_lines * vac_line_size;
  876. #ifdef CONFIG_SMP
  877. if (vac_cache_size > max_size)
  878. max_size = vac_cache_size;
  879. if (vac_line_size < min_line_size)
  880. min_line_size = vac_line_size;
  881. //FIXME: cpus not contiguous!!
  882. cpu++;
  883. if (cpu >= nr_cpu_ids || !cpu_online(cpu))
  884. break;
  885. #else
  886. break;
  887. #endif
  888. }
  889. }
  890. if (nd == 0) {
  891. prom_printf("No CPU nodes found, halting.\n");
  892. prom_halt();
  893. }
  894. #ifdef CONFIG_SMP
  895. vac_cache_size = max_size;
  896. vac_line_size = min_line_size;
  897. #endif
  898. printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
  899. (int)vac_cache_size, (int)vac_line_size);
  900. }
  901. static void poke_hypersparc(void)
  902. {
  903. volatile unsigned long clear;
  904. unsigned long mreg = srmmu_get_mmureg();
  905. hyper_flush_unconditional_combined();
  906. mreg &= ~(HYPERSPARC_CWENABLE);
  907. mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
  908. mreg |= (HYPERSPARC_CMODE);
  909. srmmu_set_mmureg(mreg);
  910. #if 0 /* XXX I think this is bad news... -DaveM */
  911. hyper_clear_all_tags();
  912. #endif
  913. put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
  914. hyper_flush_whole_icache();
  915. clear = srmmu_get_faddr();
  916. clear = srmmu_get_fstatus();
  917. }
  918. static const struct sparc32_cachetlb_ops hypersparc_ops = {
  919. .cache_all = hypersparc_flush_cache_all,
  920. .cache_mm = hypersparc_flush_cache_mm,
  921. .cache_page = hypersparc_flush_cache_page,
  922. .cache_range = hypersparc_flush_cache_range,
  923. .tlb_all = hypersparc_flush_tlb_all,
  924. .tlb_mm = hypersparc_flush_tlb_mm,
  925. .tlb_page = hypersparc_flush_tlb_page,
  926. .tlb_range = hypersparc_flush_tlb_range,
  927. .page_to_ram = hypersparc_flush_page_to_ram,
  928. .sig_insns = hypersparc_flush_sig_insns,
  929. .page_for_dma = hypersparc_flush_page_for_dma,
  930. };
  931. static void __init init_hypersparc(void)
  932. {
  933. srmmu_name = "ROSS HyperSparc";
  934. srmmu_modtype = HyperSparc;
  935. init_vac_layout();
  936. is_hypersparc = 1;
  937. sparc32_cachetlb_ops = &hypersparc_ops;
  938. poke_srmmu = poke_hypersparc;
  939. hypersparc_setup_blockops();
  940. }
  941. static void poke_swift(void)
  942. {
  943. unsigned long mreg;
  944. /* Clear any crap from the cache or else... */
  945. swift_flush_cache_all();
  946. /* Enable I & D caches */
  947. mreg = srmmu_get_mmureg();
  948. mreg |= (SWIFT_IE | SWIFT_DE);
  949. /*
  950. * The Swift branch folding logic is completely broken. At
  951. * trap time, if things are just right, if can mistakenly
  952. * think that a trap is coming from kernel mode when in fact
  953. * it is coming from user mode (it mis-executes the branch in
  954. * the trap code). So you see things like crashme completely
  955. * hosing your machine which is completely unacceptable. Turn
  956. * this shit off... nice job Fujitsu.
  957. */
  958. mreg &= ~(SWIFT_BF);
  959. srmmu_set_mmureg(mreg);
  960. }
  961. static const struct sparc32_cachetlb_ops swift_ops = {
  962. .cache_all = swift_flush_cache_all,
  963. .cache_mm = swift_flush_cache_mm,
  964. .cache_page = swift_flush_cache_page,
  965. .cache_range = swift_flush_cache_range,
  966. .tlb_all = swift_flush_tlb_all,
  967. .tlb_mm = swift_flush_tlb_mm,
  968. .tlb_page = swift_flush_tlb_page,
  969. .tlb_range = swift_flush_tlb_range,
  970. .page_to_ram = swift_flush_page_to_ram,
  971. .sig_insns = swift_flush_sig_insns,
  972. .page_for_dma = swift_flush_page_for_dma,
  973. };
  974. #define SWIFT_MASKID_ADDR 0x10003018
  975. static void __init init_swift(void)
  976. {
  977. unsigned long swift_rev;
  978. __asm__ __volatile__("lda [%1] %2, %0\n\t"
  979. "srl %0, 0x18, %0\n\t" :
  980. "=r" (swift_rev) :
  981. "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
  982. srmmu_name = "Fujitsu Swift";
  983. switch (swift_rev) {
  984. case 0x11:
  985. case 0x20:
  986. case 0x23:
  987. case 0x30:
  988. srmmu_modtype = Swift_lots_o_bugs;
  989. hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
  990. /*
  991. * Gee george, I wonder why Sun is so hush hush about
  992. * this hardware bug... really braindamage stuff going
  993. * on here. However I think we can find a way to avoid
  994. * all of the workaround overhead under Linux. Basically,
  995. * any page fault can cause kernel pages to become user
  996. * accessible (the mmu gets confused and clears some of
  997. * the ACC bits in kernel ptes). Aha, sounds pretty
  998. * horrible eh? But wait, after extensive testing it appears
  999. * that if you use pgd_t level large kernel pte's (like the
  1000. * 4MB pages on the Pentium) the bug does not get tripped
  1001. * at all. This avoids almost all of the major overhead.
  1002. * Welcome to a world where your vendor tells you to,
  1003. * "apply this kernel patch" instead of "sorry for the
  1004. * broken hardware, send it back and we'll give you
  1005. * properly functioning parts"
  1006. */
  1007. break;
  1008. case 0x25:
  1009. case 0x31:
  1010. srmmu_modtype = Swift_bad_c;
  1011. hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
  1012. /*
  1013. * You see Sun allude to this hardware bug but never
  1014. * admit things directly, they'll say things like,
  1015. * "the Swift chip cache problems" or similar.
  1016. */
  1017. break;
  1018. default:
  1019. srmmu_modtype = Swift_ok;
  1020. break;
  1021. }
  1022. sparc32_cachetlb_ops = &swift_ops;
  1023. flush_page_for_dma_global = 0;
  1024. /*
  1025. * Are you now convinced that the Swift is one of the
  1026. * biggest VLSI abortions of all time? Bravo Fujitsu!
  1027. * Fujitsu, the !#?!%$'d up processor people. I bet if
  1028. * you examined the microcode of the Swift you'd find
  1029. * XXX's all over the place.
  1030. */
  1031. poke_srmmu = poke_swift;
  1032. }
  1033. static void turbosparc_flush_cache_all(void)
  1034. {
  1035. flush_user_windows();
  1036. turbosparc_idflash_clear();
  1037. }
  1038. static void turbosparc_flush_cache_mm(struct mm_struct *mm)
  1039. {
  1040. FLUSH_BEGIN(mm)
  1041. flush_user_windows();
  1042. turbosparc_idflash_clear();
  1043. FLUSH_END
  1044. }
  1045. static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  1046. {
  1047. FLUSH_BEGIN(vma->vm_mm)
  1048. flush_user_windows();
  1049. turbosparc_idflash_clear();
  1050. FLUSH_END
  1051. }
  1052. static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
  1053. {
  1054. FLUSH_BEGIN(vma->vm_mm)
  1055. flush_user_windows();
  1056. if (vma->vm_flags & VM_EXEC)
  1057. turbosparc_flush_icache();
  1058. turbosparc_flush_dcache();
  1059. FLUSH_END
  1060. }
  1061. /* TurboSparc is copy-back, if we turn it on, but this does not work. */
  1062. static void turbosparc_flush_page_to_ram(unsigned long page)
  1063. {
  1064. #ifdef TURBOSPARC_WRITEBACK
  1065. volatile unsigned long clear;
  1066. if (srmmu_probe(page))
  1067. turbosparc_flush_page_cache(page);
  1068. clear = srmmu_get_fstatus();
  1069. #endif
  1070. }
  1071. static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
  1072. {
  1073. }
  1074. static void turbosparc_flush_page_for_dma(unsigned long page)
  1075. {
  1076. turbosparc_flush_dcache();
  1077. }
  1078. static void turbosparc_flush_tlb_all(void)
  1079. {
  1080. srmmu_flush_whole_tlb();
  1081. }
  1082. static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
  1083. {
  1084. FLUSH_BEGIN(mm)
  1085. srmmu_flush_whole_tlb();
  1086. FLUSH_END
  1087. }
  1088. static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  1089. {
  1090. FLUSH_BEGIN(vma->vm_mm)
  1091. srmmu_flush_whole_tlb();
  1092. FLUSH_END
  1093. }
  1094. static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  1095. {
  1096. FLUSH_BEGIN(vma->vm_mm)
  1097. srmmu_flush_whole_tlb();
  1098. FLUSH_END
  1099. }
  1100. static void poke_turbosparc(void)
  1101. {
  1102. unsigned long mreg = srmmu_get_mmureg();
  1103. unsigned long ccreg;
  1104. /* Clear any crap from the cache or else... */
  1105. turbosparc_flush_cache_all();
  1106. /* Temporarily disable I & D caches */
  1107. mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
  1108. mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
  1109. srmmu_set_mmureg(mreg);
  1110. ccreg = turbosparc_get_ccreg();
  1111. #ifdef TURBOSPARC_WRITEBACK
  1112. ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
  1113. ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
  1114. /* Write-back D-cache, emulate VLSI
  1115. * abortion number three, not number one */
  1116. #else
  1117. /* For now let's play safe, optimize later */
  1118. ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
  1119. /* Do DVMA snooping in Dcache, Write-thru D-cache */
  1120. ccreg &= ~(TURBOSPARC_uS2);
  1121. /* Emulate VLSI abortion number three, not number one */
  1122. #endif
  1123. switch (ccreg & 7) {
  1124. case 0: /* No SE cache */
  1125. case 7: /* Test mode */
  1126. break;
  1127. default:
  1128. ccreg |= (TURBOSPARC_SCENABLE);
  1129. }
  1130. turbosparc_set_ccreg(ccreg);
  1131. mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
  1132. mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
  1133. srmmu_set_mmureg(mreg);
  1134. }
  1135. static const struct sparc32_cachetlb_ops turbosparc_ops = {
  1136. .cache_all = turbosparc_flush_cache_all,
  1137. .cache_mm = turbosparc_flush_cache_mm,
  1138. .cache_page = turbosparc_flush_cache_page,
  1139. .cache_range = turbosparc_flush_cache_range,
  1140. .tlb_all = turbosparc_flush_tlb_all,
  1141. .tlb_mm = turbosparc_flush_tlb_mm,
  1142. .tlb_page = turbosparc_flush_tlb_page,
  1143. .tlb_range = turbosparc_flush_tlb_range,
  1144. .page_to_ram = turbosparc_flush_page_to_ram,
  1145. .sig_insns = turbosparc_flush_sig_insns,
  1146. .page_for_dma = turbosparc_flush_page_for_dma,
  1147. };
  1148. static void __init init_turbosparc(void)
  1149. {
  1150. srmmu_name = "Fujitsu TurboSparc";
  1151. srmmu_modtype = TurboSparc;
  1152. sparc32_cachetlb_ops = &turbosparc_ops;
  1153. poke_srmmu = poke_turbosparc;
  1154. }
  1155. static void poke_tsunami(void)
  1156. {
  1157. unsigned long mreg = srmmu_get_mmureg();
  1158. tsunami_flush_icache();
  1159. tsunami_flush_dcache();
  1160. mreg &= ~TSUNAMI_ITD;
  1161. mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
  1162. srmmu_set_mmureg(mreg);
  1163. }
  1164. static const struct sparc32_cachetlb_ops tsunami_ops = {
  1165. .cache_all = tsunami_flush_cache_all,
  1166. .cache_mm = tsunami_flush_cache_mm,
  1167. .cache_page = tsunami_flush_cache_page,
  1168. .cache_range = tsunami_flush_cache_range,
  1169. .tlb_all = tsunami_flush_tlb_all,
  1170. .tlb_mm = tsunami_flush_tlb_mm,
  1171. .tlb_page = tsunami_flush_tlb_page,
  1172. .tlb_range = tsunami_flush_tlb_range,
  1173. .page_to_ram = tsunami_flush_page_to_ram,
  1174. .sig_insns = tsunami_flush_sig_insns,
  1175. .page_for_dma = tsunami_flush_page_for_dma,
  1176. };
  1177. static void __init init_tsunami(void)
  1178. {
  1179. /*
  1180. * Tsunami's pretty sane, Sun and TI actually got it
  1181. * somewhat right this time. Fujitsu should have
  1182. * taken some lessons from them.
  1183. */
  1184. srmmu_name = "TI Tsunami";
  1185. srmmu_modtype = Tsunami;
  1186. sparc32_cachetlb_ops = &tsunami_ops;
  1187. poke_srmmu = poke_tsunami;
  1188. tsunami_setup_blockops();
  1189. }
  1190. static void poke_viking(void)
  1191. {
  1192. unsigned long mreg = srmmu_get_mmureg();
  1193. static int smp_catch;
  1194. if (viking_mxcc_present) {
  1195. unsigned long mxcc_control = mxcc_get_creg();
  1196. mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
  1197. mxcc_control &= ~(MXCC_CTL_RRC);
  1198. mxcc_set_creg(mxcc_control);
  1199. /*
  1200. * We don't need memory parity checks.
  1201. * XXX This is a mess, have to dig out later. ecd.
  1202. viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
  1203. */
  1204. /* We do cache ptables on MXCC. */
  1205. mreg |= VIKING_TCENABLE;
  1206. } else {
  1207. unsigned long bpreg;
  1208. mreg &= ~(VIKING_TCENABLE);
  1209. if (smp_catch++) {
  1210. /* Must disable mixed-cmd mode here for other cpu's. */
  1211. bpreg = viking_get_bpreg();
  1212. bpreg &= ~(VIKING_ACTION_MIX);
  1213. viking_set_bpreg(bpreg);
  1214. /* Just in case PROM does something funny. */
  1215. msi_set_sync();
  1216. }
  1217. }
  1218. mreg |= VIKING_SPENABLE;
  1219. mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
  1220. mreg |= VIKING_SBENABLE;
  1221. mreg &= ~(VIKING_ACENABLE);
  1222. srmmu_set_mmureg(mreg);
  1223. }
  1224. static struct sparc32_cachetlb_ops viking_ops = {
  1225. .cache_all = viking_flush_cache_all,
  1226. .cache_mm = viking_flush_cache_mm,
  1227. .cache_page = viking_flush_cache_page,
  1228. .cache_range = viking_flush_cache_range,
  1229. .tlb_all = viking_flush_tlb_all,
  1230. .tlb_mm = viking_flush_tlb_mm,
  1231. .tlb_page = viking_flush_tlb_page,
  1232. .tlb_range = viking_flush_tlb_range,
  1233. .page_to_ram = viking_flush_page_to_ram,
  1234. .sig_insns = viking_flush_sig_insns,
  1235. .page_for_dma = viking_flush_page_for_dma,
  1236. };
  1237. #ifdef CONFIG_SMP
  1238. /* On sun4d the cpu broadcasts local TLB flushes, so we can just
  1239. * perform the local TLB flush and all the other cpus will see it.
  1240. * But, unfortunately, there is a bug in the sun4d XBUS backplane
  1241. * that requires that we add some synchronization to these flushes.
  1242. *
  1243. * The bug is that the fifo which keeps track of all the pending TLB
  1244. * broadcasts in the system is an entry or two too small, so if we
  1245. * have too many going at once we'll overflow that fifo and lose a TLB
  1246. * flush resulting in corruption.
  1247. *
  1248. * Our workaround is to take a global spinlock around the TLB flushes,
  1249. * which guarentees we won't ever have too many pending. It's a big
  1250. * hammer, but a semaphore like system to make sure we only have N TLB
  1251. * flushes going at once will require SMP locking anyways so there's
  1252. * no real value in trying any harder than this.
  1253. */
  1254. static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
  1255. .cache_all = viking_flush_cache_all,
  1256. .cache_mm = viking_flush_cache_mm,
  1257. .cache_page = viking_flush_cache_page,
  1258. .cache_range = viking_flush_cache_range,
  1259. .tlb_all = sun4dsmp_flush_tlb_all,
  1260. .tlb_mm = sun4dsmp_flush_tlb_mm,
  1261. .tlb_page = sun4dsmp_flush_tlb_page,
  1262. .tlb_range = sun4dsmp_flush_tlb_range,
  1263. .page_to_ram = viking_flush_page_to_ram,
  1264. .sig_insns = viking_flush_sig_insns,
  1265. .page_for_dma = viking_flush_page_for_dma,
  1266. };
  1267. #endif
  1268. static void __init init_viking(void)
  1269. {
  1270. unsigned long mreg = srmmu_get_mmureg();
  1271. /* Ahhh, the viking. SRMMU VLSI abortion number two... */
  1272. if (mreg & VIKING_MMODE) {
  1273. srmmu_name = "TI Viking";
  1274. viking_mxcc_present = 0;
  1275. msi_set_sync();
  1276. /*
  1277. * We need this to make sure old viking takes no hits
  1278. * on it's cache for dma snoops to workaround the
  1279. * "load from non-cacheable memory" interrupt bug.
  1280. * This is only necessary because of the new way in
  1281. * which we use the IOMMU.
  1282. */
  1283. viking_ops.page_for_dma = viking_flush_page;
  1284. #ifdef CONFIG_SMP
  1285. viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
  1286. #endif
  1287. flush_page_for_dma_global = 0;
  1288. } else {
  1289. srmmu_name = "TI Viking/MXCC";
  1290. viking_mxcc_present = 1;
  1291. srmmu_cache_pagetables = 1;
  1292. }
  1293. sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  1294. &viking_ops;
  1295. #ifdef CONFIG_SMP
  1296. if (sparc_cpu_model == sun4d)
  1297. sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  1298. &viking_sun4d_smp_ops;
  1299. #endif
  1300. poke_srmmu = poke_viking;
  1301. }
  1302. /* Probe for the srmmu chip version. */
  1303. static void __init get_srmmu_type(void)
  1304. {
  1305. unsigned long mreg, psr;
  1306. unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
  1307. srmmu_modtype = SRMMU_INVAL_MOD;
  1308. hwbug_bitmask = 0;
  1309. mreg = srmmu_get_mmureg(); psr = get_psr();
  1310. mod_typ = (mreg & 0xf0000000) >> 28;
  1311. mod_rev = (mreg & 0x0f000000) >> 24;
  1312. psr_typ = (psr >> 28) & 0xf;
  1313. psr_vers = (psr >> 24) & 0xf;
  1314. /* First, check for sparc-leon. */
  1315. if (sparc_cpu_model == sparc_leon) {
  1316. init_leon();
  1317. return;
  1318. }
  1319. /* Second, check for HyperSparc or Cypress. */
  1320. if (mod_typ == 1) {
  1321. switch (mod_rev) {
  1322. case 7:
  1323. /* UP or MP Hypersparc */
  1324. init_hypersparc();
  1325. break;
  1326. case 0:
  1327. case 2:
  1328. case 10:
  1329. case 11:
  1330. case 12:
  1331. case 13:
  1332. case 14:
  1333. case 15:
  1334. default:
  1335. prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
  1336. prom_halt();
  1337. break;
  1338. }
  1339. return;
  1340. }
  1341. /* Now Fujitsu TurboSparc. It might happen that it is
  1342. * in Swift emulation mode, so we will check later...
  1343. */
  1344. if (psr_typ == 0 && psr_vers == 5) {
  1345. init_turbosparc();
  1346. return;
  1347. }
  1348. /* Next check for Fujitsu Swift. */
  1349. if (psr_typ == 0 && psr_vers == 4) {
  1350. phandle cpunode;
  1351. char node_str[128];
  1352. /* Look if it is not a TurboSparc emulating Swift... */
  1353. cpunode = prom_getchild(prom_root_node);
  1354. while ((cpunode = prom_getsibling(cpunode)) != 0) {
  1355. prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
  1356. if (!strcmp(node_str, "cpu")) {
  1357. if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
  1358. prom_getintdefault(cpunode, "psr-version", 1) == 5) {
  1359. init_turbosparc();
  1360. return;
  1361. }
  1362. break;
  1363. }
  1364. }
  1365. init_swift();
  1366. return;
  1367. }
  1368. /* Now the Viking family of srmmu. */
  1369. if (psr_typ == 4 &&
  1370. ((psr_vers == 0) ||
  1371. ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
  1372. init_viking();
  1373. return;
  1374. }
  1375. /* Finally the Tsunami. */
  1376. if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
  1377. init_tsunami();
  1378. return;
  1379. }
  1380. /* Oh well */
  1381. srmmu_is_bad();
  1382. }
  1383. #ifdef CONFIG_SMP
  1384. /* Local cross-calls. */
  1385. static void smp_flush_page_for_dma(unsigned long page)
  1386. {
  1387. xc1((smpfunc_t) local_ops->page_for_dma, page);
  1388. local_ops->page_for_dma(page);
  1389. }
  1390. static void smp_flush_cache_all(void)
  1391. {
  1392. xc0((smpfunc_t) local_ops->cache_all);
  1393. local_ops->cache_all();
  1394. }
  1395. static void smp_flush_tlb_all(void)
  1396. {
  1397. xc0((smpfunc_t) local_ops->tlb_all);
  1398. local_ops->tlb_all();
  1399. }
  1400. static void smp_flush_cache_mm(struct mm_struct *mm)
  1401. {
  1402. if (mm->context != NO_CONTEXT) {
  1403. cpumask_t cpu_mask;
  1404. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1405. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1406. if (!cpumask_empty(&cpu_mask))
  1407. xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
  1408. local_ops->cache_mm(mm);
  1409. }
  1410. }
  1411. static void smp_flush_tlb_mm(struct mm_struct *mm)
  1412. {
  1413. if (mm->context != NO_CONTEXT) {
  1414. cpumask_t cpu_mask;
  1415. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1416. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1417. if (!cpumask_empty(&cpu_mask)) {
  1418. xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
  1419. if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
  1420. cpumask_copy(mm_cpumask(mm),
  1421. cpumask_of(smp_processor_id()));
  1422. }
  1423. local_ops->tlb_mm(mm);
  1424. }
  1425. }
  1426. static void smp_flush_cache_range(struct vm_area_struct *vma,
  1427. unsigned long start,
  1428. unsigned long end)
  1429. {
  1430. struct mm_struct *mm = vma->vm_mm;
  1431. if (mm->context != NO_CONTEXT) {
  1432. cpumask_t cpu_mask;
  1433. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1434. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1435. if (!cpumask_empty(&cpu_mask))
  1436. xc3((smpfunc_t) local_ops->cache_range,
  1437. (unsigned long) vma, start, end);
  1438. local_ops->cache_range(vma, start, end);
  1439. }
  1440. }
  1441. static void smp_flush_tlb_range(struct vm_area_struct *vma,
  1442. unsigned long start,
  1443. unsigned long end)
  1444. {
  1445. struct mm_struct *mm = vma->vm_mm;
  1446. if (mm->context != NO_CONTEXT) {
  1447. cpumask_t cpu_mask;
  1448. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1449. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1450. if (!cpumask_empty(&cpu_mask))
  1451. xc3((smpfunc_t) local_ops->tlb_range,
  1452. (unsigned long) vma, start, end);
  1453. local_ops->tlb_range(vma, start, end);
  1454. }
  1455. }
  1456. static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
  1457. {
  1458. struct mm_struct *mm = vma->vm_mm;
  1459. if (mm->context != NO_CONTEXT) {
  1460. cpumask_t cpu_mask;
  1461. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1462. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1463. if (!cpumask_empty(&cpu_mask))
  1464. xc2((smpfunc_t) local_ops->cache_page,
  1465. (unsigned long) vma, page);
  1466. local_ops->cache_page(vma, page);
  1467. }
  1468. }
  1469. static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  1470. {
  1471. struct mm_struct *mm = vma->vm_mm;
  1472. if (mm->context != NO_CONTEXT) {
  1473. cpumask_t cpu_mask;
  1474. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1475. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1476. if (!cpumask_empty(&cpu_mask))
  1477. xc2((smpfunc_t) local_ops->tlb_page,
  1478. (unsigned long) vma, page);
  1479. local_ops->tlb_page(vma, page);
  1480. }
  1481. }
  1482. static void smp_flush_page_to_ram(unsigned long page)
  1483. {
  1484. /* Current theory is that those who call this are the one's
  1485. * who have just dirtied their cache with the pages contents
  1486. * in kernel space, therefore we only run this on local cpu.
  1487. *
  1488. * XXX This experiment failed, research further... -DaveM
  1489. */
  1490. #if 1
  1491. xc1((smpfunc_t) local_ops->page_to_ram, page);
  1492. #endif
  1493. local_ops->page_to_ram(page);
  1494. }
  1495. static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
  1496. {
  1497. cpumask_t cpu_mask;
  1498. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  1499. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  1500. if (!cpumask_empty(&cpu_mask))
  1501. xc2((smpfunc_t) local_ops->sig_insns,
  1502. (unsigned long) mm, insn_addr);
  1503. local_ops->sig_insns(mm, insn_addr);
  1504. }
  1505. static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
  1506. .cache_all = smp_flush_cache_all,
  1507. .cache_mm = smp_flush_cache_mm,
  1508. .cache_page = smp_flush_cache_page,
  1509. .cache_range = smp_flush_cache_range,
  1510. .tlb_all = smp_flush_tlb_all,
  1511. .tlb_mm = smp_flush_tlb_mm,
  1512. .tlb_page = smp_flush_tlb_page,
  1513. .tlb_range = smp_flush_tlb_range,
  1514. .page_to_ram = smp_flush_page_to_ram,
  1515. .sig_insns = smp_flush_sig_insns,
  1516. .page_for_dma = smp_flush_page_for_dma,
  1517. };
  1518. #endif
  1519. /* Load up routines and constants for sun4m and sun4d mmu */
  1520. void __init load_mmu(void)
  1521. {
  1522. /* Functions */
  1523. get_srmmu_type();
  1524. #ifdef CONFIG_SMP
  1525. /* El switcheroo... */
  1526. local_ops = sparc32_cachetlb_ops;
  1527. if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
  1528. smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
  1529. smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
  1530. smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
  1531. smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
  1532. }
  1533. if (poke_srmmu == poke_viking) {
  1534. /* Avoid unnecessary cross calls. */
  1535. smp_cachetlb_ops.cache_all = local_ops->cache_all;
  1536. smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
  1537. smp_cachetlb_ops.cache_range = local_ops->cache_range;
  1538. smp_cachetlb_ops.cache_page = local_ops->cache_page;
  1539. smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
  1540. smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
  1541. smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
  1542. }
  1543. /* It really is const after this point. */
  1544. sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  1545. &smp_cachetlb_ops;
  1546. #endif
  1547. if (sparc_cpu_model == sun4d)
  1548. ld_mmu_iounit();
  1549. else
  1550. ld_mmu_iommu();
  1551. #ifdef CONFIG_SMP
  1552. if (sparc_cpu_model == sun4d)
  1553. sun4d_init_smp();
  1554. else if (sparc_cpu_model == sparc_leon)
  1555. leon_init_smp();
  1556. else
  1557. sun4m_init_smp();
  1558. #endif
  1559. }