ultra.S 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <asm/asi.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/page.h>
  9. #include <asm/spitfire.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/mmu.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/hypervisor.h>
  17. #include <asm/cpudata.h>
  18. /* Basically, most of the Spitfire vs. Cheetah madness
  19. * has to do with the fact that Cheetah does not support
  20. * IMMU flushes out of the secondary context. Someone needs
  21. * to throw a south lake birthday party for the folks
  22. * in Microelectronics who refused to fix this shit.
  23. */
  24. /* This file is meant to be read efficiently by the CPU, not humans.
  25. * Staraj sie tego nikomu nie pierdolnac...
  26. */
  27. .text
  28. .align 32
  29. .globl __flush_tlb_mm
  30. __flush_tlb_mm: /* 19 insns */
  31. /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  32. ldxa [%o1] ASI_DMMU, %g2
  33. cmp %g2, %o0
  34. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  35. mov 0x50, %g3
  36. stxa %g0, [%g3] ASI_DMMU_DEMAP
  37. stxa %g0, [%g3] ASI_IMMU_DEMAP
  38. sethi %hi(KERNBASE), %g3
  39. flush %g3
  40. retl
  41. nop
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. nop
  50. nop
  51. .align 32
  52. .globl __flush_tlb_page
  53. __flush_tlb_page: /* 22 insns */
  54. /* %o0 = context, %o1 = vaddr */
  55. rdpr %pstate, %g7
  56. andn %g7, PSTATE_IE, %g2
  57. wrpr %g2, %pstate
  58. mov SECONDARY_CONTEXT, %o4
  59. ldxa [%o4] ASI_DMMU, %g2
  60. stxa %o0, [%o4] ASI_DMMU
  61. andcc %o1, 1, %g0
  62. andn %o1, 1, %o3
  63. be,pn %icc, 1f
  64. or %o3, 0x10, %o3
  65. stxa %g0, [%o3] ASI_IMMU_DEMAP
  66. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  67. membar #Sync
  68. stxa %g2, [%o4] ASI_DMMU
  69. sethi %hi(KERNBASE), %o4
  70. flush %o4
  71. retl
  72. wrpr %g7, 0x0, %pstate
  73. nop
  74. nop
  75. nop
  76. nop
  77. .align 32
  78. .globl __flush_tlb_pending
  79. __flush_tlb_pending: /* 27 insns */
  80. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  81. rdpr %pstate, %g7
  82. sllx %o1, 3, %o1
  83. andn %g7, PSTATE_IE, %g2
  84. wrpr %g2, %pstate
  85. mov SECONDARY_CONTEXT, %o4
  86. ldxa [%o4] ASI_DMMU, %g2
  87. stxa %o0, [%o4] ASI_DMMU
  88. 1: sub %o1, (1 << 3), %o1
  89. ldx [%o2 + %o1], %o3
  90. andcc %o3, 1, %g0
  91. andn %o3, 1, %o3
  92. be,pn %icc, 2f
  93. or %o3, 0x10, %o3
  94. stxa %g0, [%o3] ASI_IMMU_DEMAP
  95. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  96. membar #Sync
  97. brnz,pt %o1, 1b
  98. nop
  99. stxa %g2, [%o4] ASI_DMMU
  100. sethi %hi(KERNBASE), %o4
  101. flush %o4
  102. retl
  103. wrpr %g7, 0x0, %pstate
  104. nop
  105. nop
  106. nop
  107. nop
  108. .align 32
  109. .globl __flush_tlb_kernel_range
  110. __flush_tlb_kernel_range: /* 31 insns */
  111. /* %o0=start, %o1=end */
  112. cmp %o0, %o1
  113. be,pn %xcc, 2f
  114. sub %o1, %o0, %o3
  115. srlx %o3, 18, %o4
  116. brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
  117. sethi %hi(PAGE_SIZE), %o4
  118. sub %o3, %o4, %o3
  119. or %o0, 0x20, %o0 ! Nucleus
  120. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  121. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  122. membar #Sync
  123. brnz,pt %o3, 1b
  124. sub %o3, %o4, %o3
  125. 2: sethi %hi(KERNBASE), %o3
  126. flush %o3
  127. retl
  128. nop
  129. nop
  130. nop
  131. nop
  132. nop
  133. nop
  134. nop
  135. nop
  136. nop
  137. nop
  138. nop
  139. nop
  140. nop
  141. nop
  142. nop
  143. __spitfire_flush_tlb_kernel_range_slow:
  144. mov 63 * 8, %o4
  145. 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
  146. andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
  147. bne,pn %xcc, 2f
  148. mov TLB_TAG_ACCESS, %o3
  149. stxa %g0, [%o3] ASI_IMMU
  150. stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
  151. membar #Sync
  152. 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
  153. andcc %o3, 0x40, %g0
  154. bne,pn %xcc, 2f
  155. mov TLB_TAG_ACCESS, %o3
  156. stxa %g0, [%o3] ASI_DMMU
  157. stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
  158. membar #Sync
  159. 2: sub %o4, 8, %o4
  160. brgez,pt %o4, 1b
  161. nop
  162. retl
  163. nop
  164. __spitfire_flush_tlb_mm_slow:
  165. rdpr %pstate, %g1
  166. wrpr %g1, PSTATE_IE, %pstate
  167. stxa %o0, [%o1] ASI_DMMU
  168. stxa %g0, [%g3] ASI_DMMU_DEMAP
  169. stxa %g0, [%g3] ASI_IMMU_DEMAP
  170. flush %g6
  171. stxa %g2, [%o1] ASI_DMMU
  172. sethi %hi(KERNBASE), %o1
  173. flush %o1
  174. retl
  175. wrpr %g1, 0, %pstate
  176. /*
  177. * The following code flushes one page_size worth.
  178. */
  179. .section .kprobes.text, "ax"
  180. .align 32
  181. .globl __flush_icache_page
  182. __flush_icache_page: /* %o0 = phys_page */
  183. srlx %o0, PAGE_SHIFT, %o0
  184. sethi %hi(PAGE_OFFSET), %g1
  185. sllx %o0, PAGE_SHIFT, %o0
  186. sethi %hi(PAGE_SIZE), %g2
  187. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  188. add %o0, %g1, %o0
  189. 1: subcc %g2, 32, %g2
  190. bne,pt %icc, 1b
  191. flush %o0 + %g2
  192. retl
  193. nop
  194. #ifdef DCACHE_ALIASING_POSSIBLE
  195. #if (PAGE_SHIFT != 13)
  196. #error only page shift of 13 is supported by dcache flush
  197. #endif
  198. #define DTAG_MASK 0x3
  199. /* This routine is Spitfire specific so the hardcoded
  200. * D-cache size and line-size are OK.
  201. */
  202. .align 64
  203. .globl __flush_dcache_page
  204. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  205. sethi %hi(PAGE_OFFSET), %g1
  206. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  207. sub %o0, %g1, %o0 ! physical address
  208. srlx %o0, 11, %o0 ! make D-cache TAG
  209. sethi %hi(1 << 14), %o2 ! D-cache size
  210. sub %o2, (1 << 5), %o2 ! D-cache line size
  211. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  212. andcc %o3, DTAG_MASK, %g0 ! Valid?
  213. be,pn %xcc, 2f ! Nope, branch
  214. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  215. cmp %o3, %o0 ! TAG match?
  216. bne,pt %xcc, 2f ! Nope, branch
  217. nop
  218. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  219. membar #Sync
  220. 2: brnz,pt %o2, 1b
  221. sub %o2, (1 << 5), %o2 ! D-cache line size
  222. /* The I-cache does not snoop local stores so we
  223. * better flush that too when necessary.
  224. */
  225. brnz,pt %o1, __flush_icache_page
  226. sllx %o0, 11, %o0
  227. retl
  228. nop
  229. #endif /* DCACHE_ALIASING_POSSIBLE */
  230. .previous
  231. /* Cheetah specific versions, patched at boot time. */
  232. __cheetah_flush_tlb_mm: /* 19 insns */
  233. rdpr %pstate, %g7
  234. andn %g7, PSTATE_IE, %g2
  235. wrpr %g2, 0x0, %pstate
  236. wrpr %g0, 1, %tl
  237. mov PRIMARY_CONTEXT, %o2
  238. mov 0x40, %g3
  239. ldxa [%o2] ASI_DMMU, %g2
  240. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  241. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  242. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  243. stxa %o0, [%o2] ASI_DMMU
  244. stxa %g0, [%g3] ASI_DMMU_DEMAP
  245. stxa %g0, [%g3] ASI_IMMU_DEMAP
  246. stxa %g2, [%o2] ASI_DMMU
  247. sethi %hi(KERNBASE), %o2
  248. flush %o2
  249. wrpr %g0, 0, %tl
  250. retl
  251. wrpr %g7, 0x0, %pstate
  252. __cheetah_flush_tlb_page: /* 22 insns */
  253. /* %o0 = context, %o1 = vaddr */
  254. rdpr %pstate, %g7
  255. andn %g7, PSTATE_IE, %g2
  256. wrpr %g2, 0x0, %pstate
  257. wrpr %g0, 1, %tl
  258. mov PRIMARY_CONTEXT, %o4
  259. ldxa [%o4] ASI_DMMU, %g2
  260. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  261. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  262. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  263. stxa %o0, [%o4] ASI_DMMU
  264. andcc %o1, 1, %g0
  265. be,pn %icc, 1f
  266. andn %o1, 1, %o3
  267. stxa %g0, [%o3] ASI_IMMU_DEMAP
  268. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  269. membar #Sync
  270. stxa %g2, [%o4] ASI_DMMU
  271. sethi %hi(KERNBASE), %o4
  272. flush %o4
  273. wrpr %g0, 0, %tl
  274. retl
  275. wrpr %g7, 0x0, %pstate
  276. __cheetah_flush_tlb_pending: /* 27 insns */
  277. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  278. rdpr %pstate, %g7
  279. sllx %o1, 3, %o1
  280. andn %g7, PSTATE_IE, %g2
  281. wrpr %g2, 0x0, %pstate
  282. wrpr %g0, 1, %tl
  283. mov PRIMARY_CONTEXT, %o4
  284. ldxa [%o4] ASI_DMMU, %g2
  285. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  286. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  287. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  288. stxa %o0, [%o4] ASI_DMMU
  289. 1: sub %o1, (1 << 3), %o1
  290. ldx [%o2 + %o1], %o3
  291. andcc %o3, 1, %g0
  292. be,pn %icc, 2f
  293. andn %o3, 1, %o3
  294. stxa %g0, [%o3] ASI_IMMU_DEMAP
  295. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  296. membar #Sync
  297. brnz,pt %o1, 1b
  298. nop
  299. stxa %g2, [%o4] ASI_DMMU
  300. sethi %hi(KERNBASE), %o4
  301. flush %o4
  302. wrpr %g0, 0, %tl
  303. retl
  304. wrpr %g7, 0x0, %pstate
  305. __cheetah_flush_tlb_kernel_range: /* 31 insns */
  306. /* %o0=start, %o1=end */
  307. cmp %o0, %o1
  308. be,pn %xcc, 2f
  309. sub %o1, %o0, %o3
  310. srlx %o3, 18, %o4
  311. brnz,pn %o4, 3f
  312. sethi %hi(PAGE_SIZE), %o4
  313. sub %o3, %o4, %o3
  314. or %o0, 0x20, %o0 ! Nucleus
  315. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  316. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  317. membar #Sync
  318. brnz,pt %o3, 1b
  319. sub %o3, %o4, %o3
  320. 2: sethi %hi(KERNBASE), %o3
  321. flush %o3
  322. retl
  323. nop
  324. 3: mov 0x80, %o4
  325. stxa %g0, [%o4] ASI_DMMU_DEMAP
  326. membar #Sync
  327. stxa %g0, [%o4] ASI_IMMU_DEMAP
  328. membar #Sync
  329. retl
  330. nop
  331. nop
  332. nop
  333. nop
  334. nop
  335. nop
  336. nop
  337. nop
  338. #ifdef DCACHE_ALIASING_POSSIBLE
  339. __cheetah_flush_dcache_page: /* 11 insns */
  340. sethi %hi(PAGE_OFFSET), %g1
  341. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  342. sub %o0, %g1, %o0
  343. sethi %hi(PAGE_SIZE), %o4
  344. 1: subcc %o4, (1 << 5), %o4
  345. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  346. membar #Sync
  347. bne,pt %icc, 1b
  348. nop
  349. retl /* I-cache flush never needed on Cheetah, see callers. */
  350. nop
  351. #endif /* DCACHE_ALIASING_POSSIBLE */
  352. /* Hypervisor specific versions, patched at boot time. */
  353. __hypervisor_tlb_tl0_error:
  354. save %sp, -192, %sp
  355. mov %i0, %o0
  356. call hypervisor_tlbop_error
  357. mov %i1, %o1
  358. ret
  359. restore
  360. __hypervisor_flush_tlb_mm: /* 19 insns */
  361. mov %o0, %o2 /* ARG2: mmu context */
  362. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  363. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  364. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  365. mov HV_FAST_MMU_DEMAP_CTX, %o5
  366. ta HV_FAST_TRAP
  367. brnz,pn %o0, 1f
  368. mov HV_FAST_MMU_DEMAP_CTX, %o1
  369. retl
  370. nop
  371. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
  372. jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
  373. nop
  374. nop
  375. nop
  376. nop
  377. nop
  378. nop
  379. nop
  380. __hypervisor_flush_tlb_page: /* 22 insns */
  381. /* %o0 = context, %o1 = vaddr */
  382. mov %o0, %g2
  383. mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
  384. mov %g2, %o1 /* ARG1: mmu context */
  385. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  386. srlx %o0, PAGE_SHIFT, %o0
  387. sllx %o0, PAGE_SHIFT, %o0
  388. ta HV_MMU_UNMAP_ADDR_TRAP
  389. brnz,pn %o0, 1f
  390. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  391. retl
  392. nop
  393. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  394. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  395. nop
  396. nop
  397. nop
  398. nop
  399. nop
  400. nop
  401. nop
  402. nop
  403. nop
  404. __hypervisor_flush_tlb_pending: /* 27 insns */
  405. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  406. sllx %o1, 3, %g1
  407. mov %o2, %g2
  408. mov %o0, %g3
  409. 1: sub %g1, (1 << 3), %g1
  410. ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
  411. mov %g3, %o1 /* ARG1: mmu context */
  412. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  413. srlx %o0, PAGE_SHIFT, %o0
  414. sllx %o0, PAGE_SHIFT, %o0
  415. ta HV_MMU_UNMAP_ADDR_TRAP
  416. brnz,pn %o0, 1f
  417. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  418. brnz,pt %g1, 1b
  419. nop
  420. retl
  421. nop
  422. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  423. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  424. nop
  425. nop
  426. nop
  427. nop
  428. nop
  429. nop
  430. nop
  431. nop
  432. nop
  433. __hypervisor_flush_tlb_kernel_range: /* 31 insns */
  434. /* %o0=start, %o1=end */
  435. cmp %o0, %o1
  436. be,pn %xcc, 2f
  437. sub %o1, %o0, %g2
  438. srlx %g2, 18, %g3
  439. brnz,pn %g3, 4f
  440. mov %o0, %g1
  441. sethi %hi(PAGE_SIZE), %g3
  442. sub %g2, %g3, %g2
  443. 1: add %g1, %g2, %o0 /* ARG0: virtual address */
  444. mov 0, %o1 /* ARG1: mmu context */
  445. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  446. ta HV_MMU_UNMAP_ADDR_TRAP
  447. brnz,pn %o0, 3f
  448. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  449. brnz,pt %g2, 1b
  450. sub %g2, %g3, %g2
  451. 2: retl
  452. nop
  453. 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  454. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  455. nop
  456. 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
  457. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  458. mov 0, %o2 /* ARG2: mmu context == nucleus */
  459. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  460. mov HV_FAST_MMU_DEMAP_CTX, %o5
  461. ta HV_FAST_TRAP
  462. brnz,pn %o0, 3b
  463. mov HV_FAST_MMU_DEMAP_CTX, %o1
  464. retl
  465. nop
  466. #ifdef DCACHE_ALIASING_POSSIBLE
  467. /* XXX Niagara and friends have an 8K cache, so no aliasing is
  468. * XXX possible, but nothing explicit in the Hypervisor API
  469. * XXX guarantees this.
  470. */
  471. __hypervisor_flush_dcache_page: /* 2 insns */
  472. retl
  473. nop
  474. #endif
  475. tlb_patch_one:
  476. 1: lduw [%o1], %g1
  477. stw %g1, [%o0]
  478. flush %o0
  479. subcc %o2, 1, %o2
  480. add %o1, 4, %o1
  481. bne,pt %icc, 1b
  482. add %o0, 4, %o0
  483. retl
  484. nop
  485. #ifdef CONFIG_SMP
  486. /* These are all called by the slaves of a cross call, at
  487. * trap level 1, with interrupts fully disabled.
  488. *
  489. * Register usage:
  490. * %g5 mm->context (all tlb flushes)
  491. * %g1 address arg 1 (tlb page and range flushes)
  492. * %g7 address arg 2 (tlb range flush only)
  493. *
  494. * %g6 scratch 1
  495. * %g2 scratch 2
  496. * %g3 scratch 3
  497. * %g4 scratch 4
  498. */
  499. .align 32
  500. .globl xcall_flush_tlb_mm
  501. xcall_flush_tlb_mm: /* 24 insns */
  502. mov PRIMARY_CONTEXT, %g2
  503. ldxa [%g2] ASI_DMMU, %g3
  504. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  505. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  506. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  507. stxa %g5, [%g2] ASI_DMMU
  508. mov 0x40, %g4
  509. stxa %g0, [%g4] ASI_DMMU_DEMAP
  510. stxa %g0, [%g4] ASI_IMMU_DEMAP
  511. stxa %g3, [%g2] ASI_DMMU
  512. retry
  513. nop
  514. nop
  515. nop
  516. nop
  517. nop
  518. nop
  519. nop
  520. nop
  521. nop
  522. nop
  523. nop
  524. nop
  525. nop
  526. .globl xcall_flush_tlb_page
  527. xcall_flush_tlb_page: /* 20 insns */
  528. /* %g5=context, %g1=vaddr */
  529. mov PRIMARY_CONTEXT, %g4
  530. ldxa [%g4] ASI_DMMU, %g2
  531. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  532. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  533. or %g5, %g4, %g5
  534. mov PRIMARY_CONTEXT, %g4
  535. stxa %g5, [%g4] ASI_DMMU
  536. andcc %g1, 0x1, %g0
  537. be,pn %icc, 2f
  538. andn %g1, 0x1, %g5
  539. stxa %g0, [%g5] ASI_IMMU_DEMAP
  540. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  541. membar #Sync
  542. stxa %g2, [%g4] ASI_DMMU
  543. retry
  544. nop
  545. nop
  546. nop
  547. nop
  548. nop
  549. .globl xcall_flush_tlb_kernel_range
  550. xcall_flush_tlb_kernel_range: /* 44 insns */
  551. sethi %hi(PAGE_SIZE - 1), %g2
  552. or %g2, %lo(PAGE_SIZE - 1), %g2
  553. andn %g1, %g2, %g1
  554. andn %g7, %g2, %g7
  555. sub %g7, %g1, %g3
  556. srlx %g3, 18, %g2
  557. brnz,pn %g2, 2f
  558. add %g2, 1, %g2
  559. sub %g3, %g2, %g3
  560. or %g1, 0x20, %g1 ! Nucleus
  561. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  562. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  563. membar #Sync
  564. brnz,pt %g3, 1b
  565. sub %g3, %g2, %g3
  566. retry
  567. 2: mov 63 * 8, %g1
  568. 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
  569. andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
  570. bne,pn %xcc, 2f
  571. mov TLB_TAG_ACCESS, %g2
  572. stxa %g0, [%g2] ASI_IMMU
  573. stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
  574. membar #Sync
  575. 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
  576. andcc %g2, 0x40, %g0
  577. bne,pn %xcc, 2f
  578. mov TLB_TAG_ACCESS, %g2
  579. stxa %g0, [%g2] ASI_DMMU
  580. stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
  581. membar #Sync
  582. 2: sub %g1, 8, %g1
  583. brgez,pt %g1, 1b
  584. nop
  585. retry
  586. nop
  587. nop
  588. nop
  589. nop
  590. nop
  591. nop
  592. nop
  593. nop
  594. nop
  595. /* This runs in a very controlled environment, so we do
  596. * not need to worry about BH races etc.
  597. */
  598. .globl xcall_sync_tick
  599. xcall_sync_tick:
  600. 661: rdpr %pstate, %g2
  601. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  602. .section .sun4v_2insn_patch, "ax"
  603. .word 661b
  604. nop
  605. nop
  606. .previous
  607. rdpr %pil, %g2
  608. wrpr %g0, PIL_NORMAL_MAX, %pil
  609. sethi %hi(109f), %g7
  610. b,pt %xcc, etrap_irq
  611. 109: or %g7, %lo(109b), %g7
  612. #ifdef CONFIG_TRACE_IRQFLAGS
  613. call trace_hardirqs_off
  614. nop
  615. #endif
  616. call smp_synchronize_tick_client
  617. nop
  618. b rtrap_xcall
  619. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  620. .globl xcall_fetch_glob_regs
  621. xcall_fetch_glob_regs:
  622. sethi %hi(global_cpu_snapshot), %g1
  623. or %g1, %lo(global_cpu_snapshot), %g1
  624. __GET_CPUID(%g2)
  625. sllx %g2, 6, %g3
  626. add %g1, %g3, %g1
  627. rdpr %tstate, %g7
  628. stx %g7, [%g1 + GR_SNAP_TSTATE]
  629. rdpr %tpc, %g7
  630. stx %g7, [%g1 + GR_SNAP_TPC]
  631. rdpr %tnpc, %g7
  632. stx %g7, [%g1 + GR_SNAP_TNPC]
  633. stx %o7, [%g1 + GR_SNAP_O7]
  634. stx %i7, [%g1 + GR_SNAP_I7]
  635. /* Don't try this at home kids... */
  636. rdpr %cwp, %g3
  637. sub %g3, 1, %g7
  638. wrpr %g7, %cwp
  639. mov %i7, %g7
  640. wrpr %g3, %cwp
  641. stx %g7, [%g1 + GR_SNAP_RPC]
  642. sethi %hi(trap_block), %g7
  643. or %g7, %lo(trap_block), %g7
  644. sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
  645. add %g7, %g2, %g7
  646. ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
  647. stx %g3, [%g1 + GR_SNAP_THREAD]
  648. retry
  649. .globl xcall_fetch_glob_pmu
  650. xcall_fetch_glob_pmu:
  651. sethi %hi(global_cpu_snapshot), %g1
  652. or %g1, %lo(global_cpu_snapshot), %g1
  653. __GET_CPUID(%g2)
  654. sllx %g2, 6, %g3
  655. add %g1, %g3, %g1
  656. rd %pic, %g7
  657. stx %g7, [%g1 + (4 * 8)]
  658. rd %pcr, %g7
  659. stx %g7, [%g1 + (0 * 8)]
  660. retry
  661. .globl xcall_fetch_glob_pmu_n4
  662. xcall_fetch_glob_pmu_n4:
  663. sethi %hi(global_cpu_snapshot), %g1
  664. or %g1, %lo(global_cpu_snapshot), %g1
  665. __GET_CPUID(%g2)
  666. sllx %g2, 6, %g3
  667. add %g1, %g3, %g1
  668. ldxa [%g0] ASI_PIC, %g7
  669. stx %g7, [%g1 + (4 * 8)]
  670. mov 0x08, %g3
  671. ldxa [%g3] ASI_PIC, %g7
  672. stx %g7, [%g1 + (5 * 8)]
  673. mov 0x10, %g3
  674. ldxa [%g3] ASI_PIC, %g7
  675. stx %g7, [%g1 + (6 * 8)]
  676. mov 0x18, %g3
  677. ldxa [%g3] ASI_PIC, %g7
  678. stx %g7, [%g1 + (7 * 8)]
  679. mov %o0, %g2
  680. mov %o1, %g3
  681. mov %o5, %g7
  682. mov HV_FAST_VT_GET_PERFREG, %o5
  683. mov 3, %o0
  684. ta HV_FAST_TRAP
  685. stx %o1, [%g1 + (3 * 8)]
  686. mov HV_FAST_VT_GET_PERFREG, %o5
  687. mov 2, %o0
  688. ta HV_FAST_TRAP
  689. stx %o1, [%g1 + (2 * 8)]
  690. mov HV_FAST_VT_GET_PERFREG, %o5
  691. mov 1, %o0
  692. ta HV_FAST_TRAP
  693. stx %o1, [%g1 + (1 * 8)]
  694. mov HV_FAST_VT_GET_PERFREG, %o5
  695. mov 0, %o0
  696. ta HV_FAST_TRAP
  697. stx %o1, [%g1 + (0 * 8)]
  698. mov %g2, %o0
  699. mov %g3, %o1
  700. mov %g7, %o5
  701. retry
  702. __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
  703. sethi %hi(PAGE_SIZE - 1), %g2
  704. or %g2, %lo(PAGE_SIZE - 1), %g2
  705. andn %g1, %g2, %g1
  706. andn %g7, %g2, %g7
  707. sub %g7, %g1, %g3
  708. srlx %g3, 18, %g2
  709. brnz,pn %g2, 2f
  710. add %g2, 1, %g2
  711. sub %g3, %g2, %g3
  712. or %g1, 0x20, %g1 ! Nucleus
  713. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  714. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  715. membar #Sync
  716. brnz,pt %g3, 1b
  717. sub %g3, %g2, %g3
  718. retry
  719. 2: mov 0x80, %g2
  720. stxa %g0, [%g2] ASI_DMMU_DEMAP
  721. membar #Sync
  722. stxa %g0, [%g2] ASI_IMMU_DEMAP
  723. membar #Sync
  724. retry
  725. nop
  726. nop
  727. nop
  728. nop
  729. nop
  730. nop
  731. nop
  732. nop
  733. nop
  734. nop
  735. nop
  736. nop
  737. nop
  738. nop
  739. nop
  740. nop
  741. nop
  742. nop
  743. nop
  744. nop
  745. nop
  746. nop
  747. #ifdef DCACHE_ALIASING_POSSIBLE
  748. .align 32
  749. .globl xcall_flush_dcache_page_cheetah
  750. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  751. sethi %hi(PAGE_SIZE), %g3
  752. 1: subcc %g3, (1 << 5), %g3
  753. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  754. membar #Sync
  755. bne,pt %icc, 1b
  756. nop
  757. retry
  758. nop
  759. #endif /* DCACHE_ALIASING_POSSIBLE */
  760. .globl xcall_flush_dcache_page_spitfire
  761. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  762. %g7 == kernel page virtual address
  763. %g5 == (page->mapping != NULL) */
  764. #ifdef DCACHE_ALIASING_POSSIBLE
  765. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  766. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  767. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  768. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  769. andcc %g2, 0x3, %g0
  770. be,pn %xcc, 2f
  771. andn %g2, 0x3, %g2
  772. cmp %g2, %g1
  773. bne,pt %xcc, 2f
  774. nop
  775. stxa %g0, [%g3] ASI_DCACHE_TAG
  776. membar #Sync
  777. 2: cmp %g3, 0
  778. bne,pt %xcc, 1b
  779. sub %g3, (1 << 5), %g3
  780. brz,pn %g5, 2f
  781. #endif /* DCACHE_ALIASING_POSSIBLE */
  782. sethi %hi(PAGE_SIZE), %g3
  783. 1: flush %g7
  784. subcc %g3, (1 << 5), %g3
  785. bne,pt %icc, 1b
  786. add %g7, (1 << 5), %g7
  787. 2: retry
  788. nop
  789. nop
  790. /* %g5: error
  791. * %g6: tlb op
  792. */
  793. __hypervisor_tlb_xcall_error:
  794. mov %g5, %g4
  795. mov %g6, %g5
  796. ba,pt %xcc, etrap
  797. rd %pc, %g7
  798. mov %l4, %o0
  799. call hypervisor_tlbop_error_xcall
  800. mov %l5, %o1
  801. ba,a,pt %xcc, rtrap
  802. .globl __hypervisor_xcall_flush_tlb_mm
  803. __hypervisor_xcall_flush_tlb_mm: /* 24 insns */
  804. /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
  805. mov %o0, %g2
  806. mov %o1, %g3
  807. mov %o2, %g4
  808. mov %o3, %g1
  809. mov %o5, %g7
  810. clr %o0 /* ARG0: CPU lists unimplemented */
  811. clr %o1 /* ARG1: CPU lists unimplemented */
  812. mov %g5, %o2 /* ARG2: mmu context */
  813. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  814. mov HV_FAST_MMU_DEMAP_CTX, %o5
  815. ta HV_FAST_TRAP
  816. mov HV_FAST_MMU_DEMAP_CTX, %g6
  817. brnz,pn %o0, 1f
  818. mov %o0, %g5
  819. mov %g2, %o0
  820. mov %g3, %o1
  821. mov %g4, %o2
  822. mov %g1, %o3
  823. mov %g7, %o5
  824. membar #Sync
  825. retry
  826. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  827. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  828. nop
  829. .globl __hypervisor_xcall_flush_tlb_page
  830. __hypervisor_xcall_flush_tlb_page: /* 20 insns */
  831. /* %g5=ctx, %g1=vaddr */
  832. mov %o0, %g2
  833. mov %o1, %g3
  834. mov %o2, %g4
  835. mov %g1, %o0 /* ARG0: virtual address */
  836. mov %g5, %o1 /* ARG1: mmu context */
  837. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  838. srlx %o0, PAGE_SHIFT, %o0
  839. sllx %o0, PAGE_SHIFT, %o0
  840. ta HV_MMU_UNMAP_ADDR_TRAP
  841. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  842. brnz,a,pn %o0, 1f
  843. mov %o0, %g5
  844. mov %g2, %o0
  845. mov %g3, %o1
  846. mov %g4, %o2
  847. membar #Sync
  848. retry
  849. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  850. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  851. nop
  852. .globl __hypervisor_xcall_flush_tlb_kernel_range
  853. __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
  854. /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
  855. sethi %hi(PAGE_SIZE - 1), %g2
  856. or %g2, %lo(PAGE_SIZE - 1), %g2
  857. andn %g1, %g2, %g1
  858. andn %g7, %g2, %g7
  859. sub %g7, %g1, %g3
  860. srlx %g3, 18, %g7
  861. add %g2, 1, %g2
  862. sub %g3, %g2, %g3
  863. mov %o0, %g2
  864. mov %o1, %g4
  865. brnz,pn %g7, 2f
  866. mov %o2, %g7
  867. 1: add %g1, %g3, %o0 /* ARG0: virtual address */
  868. mov 0, %o1 /* ARG1: mmu context */
  869. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  870. ta HV_MMU_UNMAP_ADDR_TRAP
  871. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  872. brnz,pn %o0, 1f
  873. mov %o0, %g5
  874. sethi %hi(PAGE_SIZE), %o2
  875. brnz,pt %g3, 1b
  876. sub %g3, %o2, %g3
  877. 5: mov %g2, %o0
  878. mov %g4, %o1
  879. mov %g7, %o2
  880. membar #Sync
  881. retry
  882. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  883. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  884. nop
  885. 2: mov %o3, %g1
  886. mov %o5, %g3
  887. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  888. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  889. mov 0, %o2 /* ARG2: mmu context == nucleus */
  890. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  891. mov HV_FAST_MMU_DEMAP_CTX, %o5
  892. ta HV_FAST_TRAP
  893. mov %g1, %o3
  894. brz,pt %o0, 5b
  895. mov %g3, %o5
  896. mov HV_FAST_MMU_DEMAP_CTX, %g6
  897. ba,pt %xcc, 1b
  898. clr %g5
  899. /* These just get rescheduled to PIL vectors. */
  900. .globl xcall_call_function
  901. xcall_call_function:
  902. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  903. retry
  904. .globl xcall_call_function_single
  905. xcall_call_function_single:
  906. wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
  907. retry
  908. .globl xcall_receive_signal
  909. xcall_receive_signal:
  910. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  911. retry
  912. .globl xcall_capture
  913. xcall_capture:
  914. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  915. retry
  916. #ifdef CONFIG_KGDB
  917. .globl xcall_kgdb_capture
  918. xcall_kgdb_capture:
  919. wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
  920. retry
  921. #endif
  922. #endif /* CONFIG_SMP */
  923. .globl cheetah_patch_cachetlbops
  924. cheetah_patch_cachetlbops:
  925. save %sp, -128, %sp
  926. sethi %hi(__flush_tlb_mm), %o0
  927. or %o0, %lo(__flush_tlb_mm), %o0
  928. sethi %hi(__cheetah_flush_tlb_mm), %o1
  929. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  930. call tlb_patch_one
  931. mov 19, %o2
  932. sethi %hi(__flush_tlb_page), %o0
  933. or %o0, %lo(__flush_tlb_page), %o0
  934. sethi %hi(__cheetah_flush_tlb_page), %o1
  935. or %o1, %lo(__cheetah_flush_tlb_page), %o1
  936. call tlb_patch_one
  937. mov 22, %o2
  938. sethi %hi(__flush_tlb_pending), %o0
  939. or %o0, %lo(__flush_tlb_pending), %o0
  940. sethi %hi(__cheetah_flush_tlb_pending), %o1
  941. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  942. call tlb_patch_one
  943. mov 27, %o2
  944. sethi %hi(__flush_tlb_kernel_range), %o0
  945. or %o0, %lo(__flush_tlb_kernel_range), %o0
  946. sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
  947. or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
  948. call tlb_patch_one
  949. mov 31, %o2
  950. #ifdef DCACHE_ALIASING_POSSIBLE
  951. sethi %hi(__flush_dcache_page), %o0
  952. or %o0, %lo(__flush_dcache_page), %o0
  953. sethi %hi(__cheetah_flush_dcache_page), %o1
  954. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  955. call tlb_patch_one
  956. mov 11, %o2
  957. #endif /* DCACHE_ALIASING_POSSIBLE */
  958. #ifdef CONFIG_SMP
  959. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  960. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  961. sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
  962. or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
  963. call tlb_patch_one
  964. mov 44, %o2
  965. #endif /* CONFIG_SMP */
  966. ret
  967. restore
  968. .globl hypervisor_patch_cachetlbops
  969. hypervisor_patch_cachetlbops:
  970. save %sp, -128, %sp
  971. sethi %hi(__flush_tlb_mm), %o0
  972. or %o0, %lo(__flush_tlb_mm), %o0
  973. sethi %hi(__hypervisor_flush_tlb_mm), %o1
  974. or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
  975. call tlb_patch_one
  976. mov 19, %o2
  977. sethi %hi(__flush_tlb_page), %o0
  978. or %o0, %lo(__flush_tlb_page), %o0
  979. sethi %hi(__hypervisor_flush_tlb_page), %o1
  980. or %o1, %lo(__hypervisor_flush_tlb_page), %o1
  981. call tlb_patch_one
  982. mov 22, %o2
  983. sethi %hi(__flush_tlb_pending), %o0
  984. or %o0, %lo(__flush_tlb_pending), %o0
  985. sethi %hi(__hypervisor_flush_tlb_pending), %o1
  986. or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
  987. call tlb_patch_one
  988. mov 27, %o2
  989. sethi %hi(__flush_tlb_kernel_range), %o0
  990. or %o0, %lo(__flush_tlb_kernel_range), %o0
  991. sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
  992. or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
  993. call tlb_patch_one
  994. mov 31, %o2
  995. #ifdef DCACHE_ALIASING_POSSIBLE
  996. sethi %hi(__flush_dcache_page), %o0
  997. or %o0, %lo(__flush_dcache_page), %o0
  998. sethi %hi(__hypervisor_flush_dcache_page), %o1
  999. or %o1, %lo(__hypervisor_flush_dcache_page), %o1
  1000. call tlb_patch_one
  1001. mov 2, %o2
  1002. #endif /* DCACHE_ALIASING_POSSIBLE */
  1003. #ifdef CONFIG_SMP
  1004. sethi %hi(xcall_flush_tlb_mm), %o0
  1005. or %o0, %lo(xcall_flush_tlb_mm), %o0
  1006. sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
  1007. or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
  1008. call tlb_patch_one
  1009. mov 24, %o2
  1010. sethi %hi(xcall_flush_tlb_page), %o0
  1011. or %o0, %lo(xcall_flush_tlb_page), %o0
  1012. sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
  1013. or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
  1014. call tlb_patch_one
  1015. mov 20, %o2
  1016. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  1017. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  1018. sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  1019. or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  1020. call tlb_patch_one
  1021. mov 44, %o2
  1022. #endif /* CONFIG_SMP */
  1023. ret
  1024. restore